Exemple #1
0
 def test_languages(self):
     # All languages are described.
     for lang in LANGUAGES:
         assert lang in LANGUAGE_NAMES
         assert lang in LANGUAGE_TO_SOURCE_EXT_MAP
         # This isn't true, as not all languages need headers.
         # assert lang in LANGUAGE_TO_HEADER_EXT_MAP
     # All default languages are languages.
     for lang in DEFAULT_LANGUAGES:
         assert lang in LANGUAGES
     # All keys are languages.
     for lang in LANGUAGE_TO_SOURCE_EXT_MAP.iterkeys():
         assert lang in LANGUAGES
     for lang in LANGUAGE_TO_HEADER_EXT_MAP.iterkeys():
         assert lang in LANGUAGES
     # All values are languages.
     for lang in SOURCE_EXT_TO_LANGUAGE_MAP.itervalues():
         assert lang in LANGUAGES
     # Extensions are sane.
     for lang in LANGUAGES:
         assert LANGUAGE_TO_SOURCE_EXT_MAP[lang][0] == "."
         assert lang == \
             SOURCE_EXT_TO_LANGUAGE_MAP[LANGUAGE_TO_SOURCE_EXT_MAP[lang]]
     for ext in SOURCE_EXT_TO_LANGUAGE_MAP:
         assert ext[0] == "."
Exemple #2
0
    def get_compilation_commands(self, submission_format):
        """See TaskType.get_compilation_commands."""
        res = dict()
        for language in LANGUAGES:
            source_ext = LANGUAGE_TO_SOURCE_EXT_MAP[language]
            header_ext = LANGUAGE_TO_HEADER_EXT_MAP.get(language)
            source_filenames = []
            # Manager
            manager_source_filename = "manager%s" % source_ext
            source_filenames.append(manager_source_filename)
            # Manager's header.
            if header_ext is not None:
                manager_header_filename = "manager%s" % header_ext
                source_filenames.append(manager_header_filename)

            for filename in submission_format:
                source_filename = filename.replace(".%l", source_ext)
                source_filenames.append(source_filename)
                # Headers
                if header_ext is not None:
                    header_filename = filename.replace(".%l", header_ext)
                    source_filenames.append(header_filename)

            # Get compilation command and compile.
            executable_filename = "manager"
            commands = get_compilation_commands(language, source_filenames,
                                                executable_filename)
            res[language] = commands
        return res
Exemple #3
0
    def get_compilation_commands(self, submission_format):
        """See TaskType.get_compilation_commands."""
        res = dict()
        for language in LANGUAGES:
            source_ext = LANGUAGE_TO_SOURCE_EXT_MAP[language]
            header_ext = LANGUAGE_TO_HEADER_EXT_MAP.get(language)
            source_filenames = []
            # Manager
            manager_source_filename = "manager%s" % source_ext
            source_filenames.append(manager_source_filename)
            # Manager's header.
            if header_ext is not None:
                manager_header_filename = "manager%s" % header_ext
                source_filenames.append(manager_header_filename)

            for filename in submission_format:
                source_filename = filename.replace(".%l", source_ext)
                source_filenames.append(source_filename)
                # Headers
                if header_ext is not None:
                    header_filename = filename.replace(".%l", header_ext)
                    source_filenames.append(header_filename)

            # Get compilation command and compile.
            executable_filename = "manager"
            commands = get_compilation_commands(language,
                                                source_filenames,
                                                executable_filename)
            res[language] = commands
        return res
Exemple #4
0
    def compile(self, job, file_cacher):
        """See TaskType.compile."""
        # Detect the submission's language. The checks about the
        # formal correctedness of the submission are done in CWS,
        # before accepting it.
        language = job.language
        source_ext = LANGUAGE_TO_SOURCE_EXT_MAP[language]

        # Create the sandbox
        sandbox = create_sandbox(file_cacher)
        job.sandboxes.append(sandbox.path)

        # Prepare the source files in the sandbox
        files_to_get = {}
        source_filenames = []
        # Stub.
        stub_filename = "stub%s" % source_ext
        source_filenames.append(stub_filename)
        files_to_get[stub_filename] = job.managers[stub_filename].digest
        # User's submission.
        for filename, fileinfo in job.files.iteritems():
            source_filename = filename.replace(".%l", source_ext)
            source_filenames.append(source_filename)
            files_to_get[source_filename] = fileinfo.digest

        # Also copy all managers that might be useful during compilation.
        for filename in job.managers.iterkeys():
            if any(filename.endswith(header)
                   for header in LANGUAGE_TO_HEADER_EXT_MAP.itervalues()):
                files_to_get[filename] = \
                    job.managers[filename].digest
            elif any(filename.endswith(source)
                     for source in LANGUAGE_TO_SOURCE_EXT_MAP.itervalues()):
                files_to_get[filename] = \
                    job.managers[filename].digest
            elif any(filename.endswith(obj)
                     for obj in LANGUAGE_TO_OBJ_EXT_MAP.itervalues()):
                files_to_get[filename] = \
                    job.managers[filename].digest

        for filename, digest in files_to_get.iteritems():
            sandbox.create_file_from_storage(filename, digest)

        # Prepare the compilation command
        executable_filename = \
            "_".join(pattern.replace(".%l", "")
                     for pattern in job.files.keys())
        commands = get_compilation_commands(language,
                                            source_filenames,
                                            executable_filename)

        # Run the compilation
        operation_success, compilation_success, text, plus = \
            compilation_step(sandbox, commands)

        # Retrieve the compiled executables
        job.success = operation_success
        job.compilation_success = compilation_success
        job.plus = plus
        job.text = text
        if operation_success and compilation_success:
            digest = sandbox.get_file_to_storage(
                executable_filename,
                "Executable %s for %s" %
                (executable_filename, job.info))
            job.executables[executable_filename] = \
                Executable(executable_filename, digest)

        # Cleanup
        delete_sandbox(sandbox)
Exemple #5
0
    def compile(self, job, file_cacher):
        """See TaskType.compile."""
        # Detect the submission's language. The checks about the
        # formal correctedness of the submission are done in CWS,
        # before accepting it.
        language = job.language
        source_ext = LANGUAGE_TO_SOURCE_EXT_MAP[language]

        # TODO: here we are sure that submission.files are the same as
        # task.submission_format. The following check shouldn't be
        # here, but in the definition of the task, since this actually
        # checks that task's task type and submission format agree.
        if len(job.files) != 1:
            job.success = True
            job.compilation_success = False
            job.text = [N_("Invalid files in submission")]
            logger.error("Submission contains %d files, expecting 1",
                         len(job.files),
                         extra={"operation": job.info})
            return True

        # Create the sandbox
        sandbox = create_sandbox(file_cacher)
        job.sandboxes.append(sandbox.path)

        # Prepare the source files in the sandbox
        files_to_get = {}
        format_filename = job.files.keys()[0]
        source_filenames = []
        # Stub.
        source_filenames.append("stub%s" % source_ext)
        files_to_get[source_filenames[-1]] = \
            job.managers["stub%s" % source_ext].digest
        # User's submission.
        source_filenames.append(format_filename.replace(".%l", source_ext))
        files_to_get[source_filenames[-1]] = \
            job.files[format_filename].digest

        # Also copy all managers that might be useful during compilation.
        for filename in job.managers.iterkeys():
            if any(
                    filename.endswith(header)
                    for header in LANGUAGE_TO_HEADER_EXT_MAP.itervalues()):
                files_to_get[filename] = \
                    job.managers[filename].digest
            elif any(
                    filename.endswith(source)
                    for source in LANGUAGE_TO_SOURCE_EXT_MAP.itervalues()):
                files_to_get[filename] = \
                    job.managers[filename].digest
            elif any(
                    filename.endswith(obj)
                    for obj in LANGUAGE_TO_OBJ_EXT_MAP.itervalues()):
                files_to_get[filename] = \
                    job.managers[filename].digest

        for filename, digest in files_to_get.iteritems():
            sandbox.create_file_from_storage(filename, digest)

        # Prepare the compilation command
        executable_filename = format_filename.replace(".%l", "")
        commands = get_compilation_commands(language, source_filenames,
                                            executable_filename)

        # Run the compilation
        operation_success, compilation_success, text, plus = \
            compilation_step(sandbox, commands)

        # Retrieve the compiled executables
        job.success = operation_success
        job.compilation_success = compilation_success
        job.plus = plus
        job.text = text
        if operation_success and compilation_success:
            digest = sandbox.get_file_to_storage(
                executable_filename,
                "Executable %s for %s" % (executable_filename, job.info))
            job.executables[executable_filename] = \
                Executable(executable_filename, digest)

        # Cleanup
        delete_sandbox(sandbox)
Exemple #6
0
    def get_task(self):
        """See docstring in class Loader.

        """
        name = os.path.split(self.path)[1]

        if (not os.path.exists(os.path.join(self.path, "task.yaml"))) and \
           (not os.path.exists(os.path.join(self.path, "..", name + ".yaml"))):
            logger.critical("File missing: \"task.yaml\"")
            return None

        # We first look for the yaml file inside the task folder,
        # and eventually fallback to a yaml file in its parent folder.
        try:
            conf = yaml.safe_load(
                io.open(os.path.join(self.path, "task.yaml"),
                        "rt", encoding="utf-8"))
        except IOError:
            conf = yaml.safe_load(
                io.open(os.path.join(self.path, "..", name + ".yaml"),
                        "rt", encoding="utf-8"))

        logger.info("Loading parameters for task %s.", name)

        # Here we update the time of the last import
        touch(os.path.join(self.path, ".itime"))
        # If this file is not deleted, then the import failed
        touch(os.path.join(self.path, ".import_error"))

        args = {}

        load(conf, args, ["name", "nome_breve"])
        load(conf, args, ["title", "nome"])

        assert name == args["name"]

        if args["name"] == args["title"]:
            logger.warning("Short name equals long name (title). "
                           "Please check.")

        primary_language = load(conf, None, "primary_language")
        if primary_language is None:
            primary_language = 'it'
        paths = [os.path.join(self.path, "statement", "statement.pdf"),
                 os.path.join(self.path, "testo", "testo.pdf")]
        for path in paths:
            if os.path.exists(path):
                digest = self.file_cacher.put_file_from_path(
                    path,
                    "Statement for task %s (lang: %s)" % (name,
                                                          primary_language))
                break
        else:
            logger.critical("Couldn't find any task statement, aborting...")
            sys.exit(1)
        args["statements"] = [Statement(primary_language, digest)]

        args["primary_statements"] = '["%s"]' % (primary_language)

        args["attachments"] = []  # FIXME Use auxiliary

        args["submission_format"] = [
            SubmissionFormatElement("%s.%%l" % name)]

        if conf.get("score_mode", None) == SCORE_MODE_MAX:
            args["score_mode"] = SCORE_MODE_MAX
        elif conf.get("score_mode", None) == SCORE_MODE_MAX_TOKENED_LAST:
            args["score_mode"] = SCORE_MODE_MAX_TOKENED_LAST

        # Use the new token settings format if detected.
        if "token_mode" in conf:
            load(conf, args, "token_mode")
            load(conf, args, "token_max_number")
            load(conf, args, "token_min_interval", conv=make_timedelta)
            load(conf, args, "token_gen_initial")
            load(conf, args, "token_gen_number")
            load(conf, args, "token_gen_interval", conv=make_timedelta)
            load(conf, args, "token_gen_max")
        # Otherwise fall back on the old one.
        else:
            logger.warning(
                "%s.yaml uses a deprecated format for token settings which "
                "will soon stop being supported, you're advised to update it.",
                name)
            # Determine the mode.
            if conf.get("token_initial", None) is None:
                args["token_mode"] = "disabled"
            elif conf.get("token_gen_number", 0) > 0 and \
                    conf.get("token_gen_time", 0) == 0:
                args["token_mode"] = "infinite"
            else:
                args["token_mode"] = "finite"
            # Set the old default values.
            args["token_gen_initial"] = 0
            args["token_gen_number"] = 0
            args["token_gen_interval"] = timedelta()
            # Copy the parameters to their new names.
            load(conf, args, "token_total", "token_max_number")
            load(conf, args, "token_min_interval", conv=make_timedelta)
            load(conf, args, "token_initial", "token_gen_initial")
            load(conf, args, "token_gen_number")
            load(conf, args, "token_gen_time", "token_gen_interval",
                 conv=make_timedelta)
            load(conf, args, "token_max", "token_gen_max")
            # Remove some corner cases.
            if args["token_gen_initial"] is None:
                args["token_gen_initial"] = 0
            if args["token_gen_interval"].total_seconds() == 0:
                args["token_gen_interval"] = timedelta(minutes=1)

        load(conf, args, "max_submission_number")
        load(conf, args, "max_user_test_number")
        load(conf, args, "min_submission_interval", conv=make_timedelta)
        load(conf, args, "min_user_test_interval", conv=make_timedelta)

        # Attachments
        args["attachments"] = []
        if os.path.exists(os.path.join(self.path, "att")):
            for filename in os.listdir(os.path.join(self.path, "att")):
                digest = self.file_cacher.put_file_from_path(
                    os.path.join(self.path, "att", filename),
                    "Attachment %s for task %s" % (filename, name))
                args["attachments"] += [Attachment(filename, digest)]

        task = Task(**args)

        args = {}
        args["task"] = task
        args["description"] = conf.get("version", "Default")
        args["autojudge"] = False

        load(conf, args, ["time_limit", "timeout"], conv=float)
        load(conf, args, ["memory_limit", "memlimit"])

        # Builds the parameters that depend on the task type
        args["managers"] = []
        infile_param = conf.get("infile", "input.txt")
        outfile_param = conf.get("outfile", "output.txt")

        # If there is sol/grader.%l for some language %l, then,
        # presuming that the task type is Batch, we retrieve graders
        # in the form sol/grader.%l
        graders = False
        for lang in LANGUAGES:
            if os.path.exists(os.path.join(
                    self.path, "sol", "grader.%s" % lang)):
                graders = True
                break
        if graders:
            # Read grader for each language
            for lang in LANGUAGES:
                grader_filename = os.path.join(
                    self.path, "sol", "grader.%s" % lang)
                if os.path.exists(grader_filename):
                    digest = self.file_cacher.put_file_from_path(
                        grader_filename,
                        "Grader for task %s and language %s" % (name, lang))
                    args["managers"] += [
                        Manager("grader.%s" % lang, digest)]
                else:
                    logger.warning("Grader for language %s not found ", lang)
            # Read managers with other known file extensions
            for other_filename in os.listdir(os.path.join(self.path, "sol")):
                if any(other_filename.endswith(header)
                       for header in LANGUAGE_TO_HEADER_EXT_MAP.itervalues()):
                    digest = self.file_cacher.put_file_from_path(
                        os.path.join(self.path, "sol", other_filename),
                        "Manager %s for task %s" % (other_filename, name))
                    args["managers"] += [
                        Manager(other_filename, digest)]
            compilation_param = "grader"
        else:
            compilation_param = "alone"

        # If there is check/checker (or equivalent), then, presuming
        # that the task type is Batch or OutputOnly, we retrieve the
        # comparator
        paths = [os.path.join(self.path, "check", "checker"),
                 os.path.join(self.path, "cor", "correttore")]
        for path in paths:
            if os.path.exists(path):
                digest = self.file_cacher.put_file_from_path(
                    path,
                    "Manager for task %s" % name)
                args["managers"] += [
                    Manager("checker", digest)]
                evaluation_param = "comparator"
                break
        else:
            evaluation_param = "diff"

        # Detect subtasks by checking GEN
        gen_filename = os.path.join(self.path, 'gen', 'GEN')
        try:
            with io.open(gen_filename, "rt", encoding="utf-8") as gen_file:
                subtasks = []
                testcases = 0
                points = None
                for line in gen_file:
                    line = line.strip()
                    splitted = line.split('#', 1)

                    if len(splitted) == 1:
                        # This line represents a testcase, otherwise
                        # it's just a blank
                        if splitted[0] != '':
                            testcases += 1

                    else:
                        testcase, comment = splitted
                        testcase = testcase.strip()
                        comment = comment.strip()
                        testcase_detected = testcase != ''
                        copy_testcase_detected = comment.startswith("COPY:")
                        subtask_detected = comment.startswith('ST:')

                        flags = [testcase_detected,
                                 copy_testcase_detected,
                                 subtask_detected]
                        if len([x for x in flags if x]) > 1:
                            raise Exception("No testcase and command in"
                                            " the same line allowed")

                        # This line represents a testcase and contains a
                        # comment, but the comment doesn't start a new
                        # subtask
                        if testcase_detected or copy_testcase_detected:
                            testcases += 1

                        # This line starts a new subtask
                        if subtask_detected:
                            # Close the previous subtask
                            if points is None:
                                assert(testcases == 0)
                            else:
                                subtasks.append([points, testcases])
                            # Open the new one
                            testcases = 0
                            points = int(comment[3:].strip())

                # Close last subtask (if no subtasks were defined, just
                # fallback to Sum)
                if points is None:
                    args["score_type"] = "Sum"
                    total_value = float(conf.get("total_value", 100.0))
                    input_value = 0.0
                    n_input = testcases
                    if n_input != 0:
                        input_value = total_value / n_input
                    args["score_type_parameters"] = "%s" % input_value
                else:
                    subtasks.append([points, testcases])
                    assert(100 == sum([int(st[0]) for st in subtasks]))
                    n_input = sum([int(st[1]) for st in subtasks])
                    args["score_type"] = "GroupMin"
                    args["score_type_parameters"] = "%s" % subtasks

                if "n_input" in conf:
                    assert int(conf['n_input']) == n_input

        # If gen/GEN doesn't exist, just fallback to Sum
        except IOError:
            args["score_type"] = "Sum"
            total_value = float(conf.get("total_value", 100.0))
            input_value = 0.0
            n_input = int(conf['n_input'])
            if n_input != 0:
                input_value = total_value / n_input
            args["score_type_parameters"] = "%s" % input_value

        # If output_only is set, then the task type is OutputOnly
        if conf.get('output_only', False):
            args["task_type"] = "OutputOnly"
            args["time_limit"] = None
            args["memory_limit"] = None
            args["task_type_parameters"] = '["%s"]' % evaluation_param
            task.submission_format = [
                SubmissionFormatElement("output_%03d.txt" % i)
                for i in xrange(n_input)]

        # If there is check/manager (or equivalent), then the task
        # type is Communication
        else:
            paths = [os.path.join(self.path, "check", "manager"),
                     os.path.join(self.path, "cor", "manager")]
            for path in paths:
                if os.path.exists(path):
                    args["task_type"] = "Communication"
                    args["task_type_parameters"] = '[]'
                    digest = self.file_cacher.put_file_from_path(
                        path,
                        "Manager for task %s" % name)
                    args["managers"] += [
                        Manager("manager", digest)]
                    for lang in LANGUAGES:
                        stub_name = os.path.join(
                            self.path, "sol", "stub.%s" % lang)
                        if os.path.exists(stub_name):
                            digest = self.file_cacher.put_file_from_path(
                                stub_name,
                                "Stub for task %s and language %s" % (name,
                                                                      lang))
                            args["managers"] += [
                                Manager("stub.%s" % lang, digest)]
                        else:
                            logger.warning("Stub for language %s not "
                                           "found.", lang)
                    for other_filename in os.listdir(os.path.join(self.path,
                                                                  "sol")):
                        if any(other_filename.endswith(header) for header in
                               LANGUAGE_TO_HEADER_EXT_MAP.itervalues()):
                            digest = self.file_cacher.put_file_from_path(
                                os.path.join(self.path, "sol", other_filename),
                                "Stub %s for task %s" % (other_filename, name))
                            args["managers"] += [
                                Manager(other_filename, digest)]
                    break

            # Otherwise, the task type is Batch
            else:
                args["task_type"] = "Batch"
                args["task_type_parameters"] = \
                    '["%s", ["%s", "%s"], "%s"]' % \
                    (compilation_param, infile_param, outfile_param,
                     evaluation_param)

        args["testcases"] = []
        for i in xrange(n_input):
            input_digest = self.file_cacher.put_file_from_path(
                os.path.join(self.path, "input", "input%d.txt" % i),
                "Input %d for task %s" % (i, name))
            output_digest = self.file_cacher.put_file_from_path(
                os.path.join(self.path, "output", "output%d.txt" % i),
                "Output %d for task %s" % (i, name))
            args["testcases"] += [
                Testcase("%03d" % i, False, input_digest, output_digest)]
            if args["task_type"] == "OutputOnly":
                task.attachments += [
                    Attachment("input_%03d.txt" % i, input_digest)]
        public_testcases = load(conf, None, ["public_testcases", "risultati"],
                                conv=lambda x: "" if x is None else x)
        if public_testcases != "":
            for x in public_testcases.split(","):
                args["testcases"][int(x.strip())].public = True

        dataset = Dataset(**args)
        task.active_dataset = dataset

        # Import was successful
        os.remove(os.path.join(self.path, ".import_error"))

        logger.info("Task parameters loaded.")

        return task
Exemple #7
0
    def task_has_changed(self):
        """See docstring in class TaskLoader

        """
        name = os.path.split(self.path)[1]

        if (not os.path.exists(os.path.join(self.path, "task.yaml"))) and \
           (not os.path.exists(os.path.join(self.path, "..", name + ".yaml"))):
            logger.critical("File missing: \"task.yaml\"")
            return None

        # We first look for the yaml file inside the task folder,
        # and eventually fallback to a yaml file in its parent folder.
        try:
            conf = yaml.safe_load(
                io.open(os.path.join(self.path, "task.yaml"),
                        "rt", encoding="utf-8"))
        except IOError:
            conf = yaml.safe_load(
                io.open(os.path.join(self.path, "..", name + ".yaml"),
                        "rt", encoding="utf-8"))

        # If there is no .itime file, we assume that the task has changed
        if not os.path.exists(os.path.join(self.path, ".itime")):
            return True

        getmtime = lambda fname: os.stat(fname).st_mtime

        itime = getmtime(os.path.join(self.path, ".itime"))

        # Generate a task's list of files
        # Testcases
        files = []
        for filename in os.listdir(os.path.join(self.path, "input")):
            files.append(os.path.join(self.path, "input", filename))

        for filename in os.listdir(os.path.join(self.path, "output")):
            files.append(os.path.join(self.path, "output", filename))

        # Attachments
        if os.path.exists(os.path.join(self.path, "att")):
            for filename in os.listdir(os.path.join(self.path, "att")):
                files.append(os.path.join(self.path, "att", filename))

        # Score file
        files.append(os.path.join(self.path, "gen", "GEN"))

        # Statement
        files.append(os.path.join(self.path, "statement", "statement.pdf"))
        files.append(os.path.join(self.path, "testo", "testo.pdf"))

        # Managers
        files.append(os.path.join(self.path, "check", "checker"))
        files.append(os.path.join(self.path, "cor", "correttore"))
        files.append(os.path.join(self.path, "check", "manager"))
        files.append(os.path.join(self.path, "cor", "manager"))
        if not conf.get('output_only', False) and \
                os.path.isdir(os.path.join(self.path, "sol")):
            for lang in LANGUAGES:
                files.append(
                    os.path.join(self.path, "sol", "grader.%s" % lang))
            for other_filename in os.listdir(os.path.join(self.path, "sol")):
                if any(other_filename.endswith(header)
                       for header in LANGUAGE_TO_HEADER_EXT_MAP.itervalues()):
                    files.append(
                        os.path.join(self.path, "sol", other_filename))

        # Yaml
        files.append(os.path.join(self.path, "task.yaml"))
        files.append(os.path.join(self.path, "..", name + ".yaml"))

        # Check is any of the files have changed
        for fname in files:
            if os.path.exists(fname):
                if getmtime(fname) > itime:
                    return True

        if os.path.exists(os.path.join(self.path, ".import_error")):
            logger.warning("Last attempt to import task %s failed,"
                           " I'm not trying again.", name)
        return False
Exemple #8
0
    def compile(self, job, file_cacher):
        """See TaskType.compile."""
        # Detect the submission's language. The checks about the
        # formal correctedness of the submission are done in CWS,
        # before accepting it.
        language = job.language
        source_ext = LANGUAGE_TO_SOURCE_EXT_MAP[language]

        # TODO: here we are sure that submission.files are the same as
        # task.submission_format. The following check shouldn't be
        # here, but in the definition of the task, since this actually
        # checks that task's task type and submission format agree.
        if len(job.files) != 1:
            job.success = True
            job.compilation_success = False
            job.text = [N_("Invalid files in submission")]
            logger.error("Submission contains %d files, expecting 1", len(job.files), extra={"operation": job.info})
            return True

        # Create the sandbox
        sandbox = create_sandbox(file_cacher)
        job.sandboxes.append(sandbox.path)

        # Prepare the source files in the sandbox
        files_to_get = {}
        format_filename = job.files.keys()[0]
        source_filenames = []
        # Stub.
        source_filenames.append("stub%s" % source_ext)
        files_to_get[source_filenames[-1]] = job.managers["stub%s" % source_ext].digest
        # User's submission.
        source_filenames.append(format_filename.replace(".%l", source_ext))
        files_to_get[source_filenames[-1]] = job.files[format_filename].digest

        # Also copy all managers that might be useful during compilation.
        # We likely want to compile with .cpp or .o files, so add them to our
        # command line
        for filename in job.managers.iterkeys():
            if any(filename.endswith(header) for header in LANGUAGE_TO_HEADER_EXT_MAP.itervalues()):
                files_to_get[filename] = job.managers[filename].digest
            elif any(filename.endswith(source) for source in LANGUAGE_TO_SOURCE_EXT_MAP.itervalues()):
                files_to_get[filename] = job.managers[filename].digest
                if filename not in source_filenames:
                    source_filenames.insert(1, filename)
            elif any(filename.endswith(obj) for obj in LANGUAGE_TO_OBJ_EXT_MAP.itervalues()):
                files_to_get[filename] = job.managers[filename].digest
                if filename not in source_filenames:
                    source_filenames.insert(1, filename)

        for filename, digest in files_to_get.iteritems():
            sandbox.create_file_from_storage(filename, digest)

        # Prepare the compilation command
        executable_filename = format_filename.replace(".%l", "")
        commands = get_compilation_commands(language, source_filenames, executable_filename)

        # Run the compilation
        operation_success, compilation_success, text, plus = compilation_step(sandbox, commands)

        # Retrieve the compiled executables
        job.success = operation_success
        job.compilation_success = compilation_success
        job.plus = plus
        job.text = text
        if operation_success and compilation_success:
            digest = sandbox.get_file_to_storage(
                executable_filename, "Executable %s for %s" % (executable_filename, job.info)
            )
            job.executables[executable_filename] = Executable(executable_filename, digest)

        # Cleanup
        delete_sandbox(sandbox)
Exemple #9
0
    def compile(self, job, file_cacher):
        """See TaskType.compile."""
        # Detect the submission's language. The checks about the
        # formal correctedness of the submission are done in CWS,
        # before accepting it.
        language = job.language
        source_ext = LANGUAGE_TO_SOURCE_EXT_MAP[language]

        # TODO: here we are sure that submission.files are the same as
        # task.submission_format. The following check shouldn't be
        # here, but in the definition of the task, since this actually
        # checks that task's task type and submission format agree.
        if len(job.files) != 1:
            job.success = True
            job.compilation_success = False
            job.text = [N_("Invalid files in submission")]
            logger.error("Submission contains %d files, expecting 1",
                         len(job.files), extra={"operation": job.info})
            return True

        # Create the sandbox
        sandbox = create_sandbox(file_cacher)
        job.sandboxes.append(sandbox.path)

        # Prepare the source files in the sandbox
        files_to_get = {}
        format_filename = job.files.keys()[0]
        source_filenames = []
        source_filenames.append(format_filename.replace(".%l", source_ext))
        files_to_get[source_filenames[0]] = \
            job.files[format_filename].digest
        # If a grader is specified, we add to the command line (and to
        # the files to get) the corresponding manager. The grader must
        # be the first file in source_filenames.
        if self.parameters[0] == "grader":
            source_filenames.insert(0, "grader%s" % source_ext)
            files_to_get["grader%s" % source_ext] = \
                job.managers["grader%s" % source_ext].digest

        # Also copy all *.h and *lib.pas graders
        for filename in job.managers.iterkeys():
            if any(filename.endswith(header)
                   for header in LANGUAGE_TO_HEADER_EXT_MAP.itervalues()):
                files_to_get[filename] = \
                    job.managers[filename].digest

        for filename, digest in files_to_get.iteritems():
            sandbox.create_file_from_storage(filename, digest)

        # Prepare the compilation command
        executable_filename = format_filename.replace(".%l", "")
        commands = get_compilation_commands(language,
                                            source_filenames,
                                            executable_filename)

        # Run the compilation
        operation_success, compilation_success, text, plus = \
            compilation_step(sandbox, commands)

        # Retrieve the compiled executables
        job.success = operation_success
        job.compilation_success = compilation_success
        job.plus = plus
        job.text = text
        if operation_success and compilation_success:
            digest = sandbox.get_file_to_storage(
                executable_filename,
                "Executable %s for %s" %
                (executable_filename, job.info))
            job.executables[executable_filename] = \
                Executable(executable_filename, digest)

        # Cleanup
        delete_sandbox(sandbox)
Exemple #10
0
    def get_task(self, name):
        """See docstring in class Loader.

        """
        try:
            num = self.tasks_order[name]

        # Here we expose an undocumented behavior, so that cmsMake can
        # import a task even without the whole contest; this is not to
        # be relied upon in general
        except AttributeError:
            num = 1

        task_path = os.path.join(self.path, name)

        # We first look for the yaml file inside the task folder,
        # and eventually fallback to a yaml file in its parent folder.
        try:
            conf = yaml.safe_load(
                io.open(os.path.join(task_path, "task.yaml"),
                        "rt",
                        encoding="utf-8"))
        except IOError:
            conf = yaml.safe_load(
                io.open(os.path.join(self.path, name + ".yaml"),
                        "rt",
                        encoding="utf-8"))

        logger.info("Loading parameters for task %s.", name)

        # Here we update the time of the last import
        touch(os.path.join(task_path, ".itime"))
        # If this file is not deleted, then the import failed
        touch(os.path.join(task_path, ".import_error"))

        args = {}

        args["num"] = num
        load(conf, args, ["name", "nome_breve"])
        load(conf, args, ["title", "nome"])

        assert name == args["name"]

        if args["name"] == args["title"]:
            logger.warning("Short name equals long name (title). "
                           "Please check.")

        primary_language = load(conf, None, "primary_language")
        if primary_language is None:
            primary_language = 'it'
        paths = [
            os.path.join(task_path, "statement", "statement.pdf"),
            os.path.join(task_path, "testo", "testo.pdf")
        ]
        for path in paths:
            if os.path.exists(path):
                digest = self.file_cacher.put_file_from_path(
                    path, "Statement for task %s (lang: %s)" %
                    (name, primary_language))
                break
        else:
            logger.critical("Couldn't find any task statement, aborting...")
            sys.exit(1)
        args["statements"] = [Statement(primary_language, digest)]

        args["primary_statements"] = '["%s"]' % (primary_language)

        args["attachments"] = []  # FIXME Use auxiliary

        args["submission_format"] = [SubmissionFormatElement("%s.%%l" % name)]

        if conf.get("score_mode", None) == SCORE_MODE_MAX:
            args["score_mode"] = SCORE_MODE_MAX
        elif conf.get("score_mode", None) == SCORE_MODE_MAX_TOKENED_LAST:
            args["score_mode"] = SCORE_MODE_MAX_TOKENED_LAST

        # Use the new token settings format if detected.
        if "token_mode" in conf:
            load(conf, args, "token_mode")
            load(conf, args, "token_max_number")
            load(conf, args, "token_min_interval", conv=make_timedelta)
            load(conf, args, "token_gen_initial")
            load(conf, args, "token_gen_number")
            load(conf, args, "token_gen_interval", conv=make_timedelta)
            load(conf, args, "token_gen_max")
        # Otherwise fall back on the old one.
        else:
            logger.warning(
                "%s.yaml uses a deprecated format for token settings which "
                "will soon stop being supported, you're advised to update it.",
                name)
            # Determine the mode.
            if conf.get("token_initial", None) is None:
                args["token_mode"] = "disabled"
            elif conf.get("token_gen_number", 0) > 0 and \
                    conf.get("token_gen_time", 0) == 0:
                args["token_mode"] = "infinite"
            else:
                args["token_mode"] = "finite"
            # Set the old default values.
            args["token_gen_initial"] = 0
            args["token_gen_number"] = 0
            args["token_gen_interval"] = timedelta()
            # Copy the parameters to their new names.
            load(conf, args, "token_total", "token_max_number")
            load(conf, args, "token_min_interval", conv=make_timedelta)
            load(conf, args, "token_initial", "token_gen_initial")
            load(conf, args, "token_gen_number")
            load(conf,
                 args,
                 "token_gen_time",
                 "token_gen_interval",
                 conv=make_timedelta)
            load(conf, args, "token_max", "token_gen_max")
            # Remove some corner cases.
            if args["token_gen_initial"] is None:
                args["token_gen_initial"] = 0
            if args["token_gen_interval"].total_seconds() == 0:
                args["token_gen_interval"] = timedelta(minutes=1)

        load(conf, args, "max_submission_number")
        load(conf, args, "max_user_test_number")
        load(conf, args, "min_submission_interval", conv=make_timedelta)
        load(conf, args, "min_user_test_interval", conv=make_timedelta)

        # Attachments
        args["attachments"] = []
        if os.path.exists(os.path.join(task_path, "att")):
            for filename in os.listdir(os.path.join(task_path, "att")):
                digest = self.file_cacher.put_file_from_path(
                    os.path.join(task_path, "att", filename),
                    "Attachment %s for task %s" % (filename, name))
                args["attachments"] += [Attachment(filename, digest)]

        task = Task(**args)

        args = {}
        args["task"] = task
        args["description"] = conf.get("version", "Default")
        args["autojudge"] = False

        load(conf, args, ["time_limit", "timeout"], conv=float)
        load(conf, args, ["memory_limit", "memlimit"])

        # Builds the parameters that depend on the task type
        args["managers"] = []
        infile_param = conf.get("infile", "input.txt")
        outfile_param = conf.get("outfile", "output.txt")

        # If there is sol/grader.%l for some language %l, then,
        # presuming that the task type is Batch, we retrieve graders
        # in the form sol/grader.%l
        graders = False
        for lang in LANGUAGES:
            if os.path.exists(
                    os.path.join(task_path, "sol", "grader.%s" % lang)):
                graders = True
                break
        if graders:
            # Read grader for each language
            for lang in LANGUAGES:
                grader_filename = os.path.join(task_path, "sol",
                                               "grader.%s" % lang)
                if os.path.exists(grader_filename):
                    digest = self.file_cacher.put_file_from_path(
                        grader_filename,
                        "Grader for task %s and language %s" % (name, lang))
                    args["managers"] += [Manager("grader.%s" % lang, digest)]
                else:
                    logger.warning("Grader for language %s not found ", lang)
            # Read managers with other known file extensions
            for other_filename in os.listdir(os.path.join(task_path, "sol")):
                if any(
                        other_filename.endswith(header)
                        for header in LANGUAGE_TO_HEADER_EXT_MAP.itervalues()):
                    digest = self.file_cacher.put_file_from_path(
                        os.path.join(task_path, "sol", other_filename),
                        "Manager %s for task %s" % (other_filename, name))
                    args["managers"] += [Manager(other_filename, digest)]
            compilation_param = "grader"
        else:
            compilation_param = "alone"

        # If there is check/checker (or equivalent), then, presuming
        # that the task type is Batch or OutputOnly, we retrieve the
        # comparator
        paths = [
            os.path.join(task_path, "check", "checker"),
            os.path.join(task_path, "cor", "correttore")
        ]
        for path in paths:
            if os.path.exists(path):
                digest = self.file_cacher.put_file_from_path(
                    path, "Manager for task %s" % name)
                args["managers"] += [Manager("checker", digest)]
                evaluation_param = "comparator"
                break
        else:
            evaluation_param = "diff"

        # Detect subtasks by checking GEN
        gen_filename = os.path.join(task_path, 'gen', 'GEN')
        try:
            with io.open(gen_filename, "rt", encoding="utf-8") as gen_file:
                subtasks = []
                testcases = 0
                points = None
                for line in gen_file:
                    line = line.strip()
                    splitted = line.split('#', 1)

                    if len(splitted) == 1:
                        # This line represents a testcase, otherwise
                        # it's just a blank
                        if splitted[0] != '':
                            testcases += 1

                    else:
                        testcase, comment = splitted
                        testcase = testcase.strip()
                        comment = comment.strip()
                        testcase_detected = testcase != ''
                        copy_testcase_detected = comment.startswith("COPY:")
                        subtask_detected = comment.startswith('ST:')

                        flags = [
                            testcase_detected, copy_testcase_detected,
                            subtask_detected
                        ]
                        if len([x for x in flags if x]) > 1:
                            raise Exception("No testcase and command in"
                                            " the same line allowed")

                        # This line represents a testcase and contains a
                        # comment, but the comment doesn't start a new
                        # subtask
                        if testcase_detected or copy_testcase_detected:
                            testcases += 1

                        # This line starts a new subtask
                        if subtask_detected:
                            # Close the previous subtask
                            if points is None:
                                assert (testcases == 0)
                            else:
                                subtasks.append([points, testcases])
                            # Open the new one
                            testcases = 0
                            points = int(comment[3:].strip())

                # Close last subtask (if no subtasks were defined, just
                # fallback to Sum)
                if points is None:
                    args["score_type"] = "Sum"
                    total_value = float(conf.get("total_value", 100.0))
                    input_value = 0.0
                    n_input = testcases
                    if n_input != 0:
                        input_value = total_value / n_input
                    args["score_type_parameters"] = "%s" % input_value
                else:
                    subtasks.append([points, testcases])
                    assert (100 == sum([int(st[0]) for st in subtasks]))
                    n_input = sum([int(st[1]) for st in subtasks])
                    args["score_type"] = "GroupMin"
                    args["score_type_parameters"] = "%s" % subtasks

                if "n_input" in conf:
                    assert int(conf['n_input']) == n_input

        # If gen/GEN doesn't exist, just fallback to Sum
        except IOError:
            args["score_type"] = "Sum"
            total_value = float(conf.get("total_value", 100.0))
            input_value = 0.0
            n_input = int(conf['n_input'])
            if n_input != 0:
                input_value = total_value / n_input
            args["score_type_parameters"] = "%s" % input_value

        # If output_only is set, then the task type is OutputOnly
        if conf.get('output_only', False):
            args["task_type"] = "OutputOnly"
            args["time_limit"] = None
            args["memory_limit"] = None
            args["task_type_parameters"] = '["%s"]' % evaluation_param
            task.submission_format = [
                SubmissionFormatElement("output_%03d.txt" % i)
                for i in xrange(n_input)
            ]

        # If there is check/manager (or equivalent), then the task
        # type is Communication
        else:
            paths = [
                os.path.join(task_path, "check", "manager"),
                os.path.join(task_path, "cor", "manager")
            ]
            for path in paths:
                if os.path.exists(path):
                    args["task_type"] = "Communication"
                    args["task_type_parameters"] = '[]'
                    digest = self.file_cacher.put_file_from_path(
                        path, "Manager for task %s" % name)
                    args["managers"] += [Manager("manager", digest)]
                    for lang in LANGUAGES:
                        stub_name = os.path.join(task_path, "sol",
                                                 "stub.%s" % lang)
                        if os.path.exists(stub_name):
                            digest = self.file_cacher.put_file_from_path(
                                stub_name, "Stub for task %s and language %s" %
                                (name, lang))
                            args["managers"] += [
                                Manager("stub.%s" % lang, digest)
                            ]
                        else:
                            logger.warning(
                                "Stub for language %s not "
                                "found.", lang)
                    for other_filename in os.listdir(
                            os.path.join(task_path, "sol")):
                        if any(
                                other_filename.endswith(header) for header in
                                LANGUAGE_TO_HEADER_EXT_MAP.itervalues()):
                            digest = self.file_cacher.put_file_from_path(
                                os.path.join(task_path, "sol", other_filename),
                                "Stub %s for task %s" % (other_filename, name))
                            args["managers"] += [
                                Manager(other_filename, digest)
                            ]
                    break

            # Otherwise, the task type is Batch
            else:
                args["task_type"] = "Batch"
                args["task_type_parameters"] = \
                    '["%s", ["%s", "%s"], "%s"]' % \
                    (compilation_param, infile_param, outfile_param,
                     evaluation_param)

        args["testcases"] = []
        for i in xrange(n_input):
            input_digest = self.file_cacher.put_file_from_path(
                os.path.join(task_path, "input", "input%d.txt" % i),
                "Input %d for task %s" % (i, name))
            output_digest = self.file_cacher.put_file_from_path(
                os.path.join(task_path, "output", "output%d.txt" % i),
                "Output %d for task %s" % (i, name))
            args["testcases"] += [
                Testcase("%03d" % i, False, input_digest, output_digest)
            ]
            if args["task_type"] == "OutputOnly":
                task.attachments += [
                    Attachment("input_%03d.txt" % i, input_digest)
                ]
        public_testcases = load(conf,
                                None, ["public_testcases", "risultati"],
                                conv=lambda x: "" if x is None else x)
        if public_testcases != "":
            for x in public_testcases.split(","):
                args["testcases"][int(x.strip())].public = True

        dataset = Dataset(**args)
        task.active_dataset = dataset

        # Import was successful
        os.remove(os.path.join(task_path, ".import_error"))

        logger.info("Task parameters loaded.")

        return task
Exemple #11
0
    def has_changed(self, name):
        """See docstring in class Loader

        """
        path = os.path.realpath(os.path.join(self.path, name))

        try:
            conf = yaml.safe_load(
                io.open(os.path.join(path, "task.yaml"),
                        "rt",
                        encoding="utf-8"))
        except IOError:
            conf = yaml.safe_load(
                io.open(os.path.join(self.path, name + ".yaml"),
                        "rt",
                        encoding="utf-8"))

        # If there is no .itime file, we assume that the task has changed
        if not os.path.exists(os.path.join(path, ".itime")):
            return True

        getmtime = lambda fname: os.stat(fname).st_mtime

        itime = getmtime(os.path.join(path, ".itime"))

        # Generate a task's list of files
        # Testcases
        files = []
        for filename in os.listdir(os.path.join(path, "input")):
            files.append(os.path.join(path, "input", filename))

        for filename in os.listdir(os.path.join(path, "output")):
            files.append(os.path.join(path, "output", filename))

        # Attachments
        if os.path.exists(os.path.join(path, "att")):
            for filename in os.listdir(os.path.join(path, "att")):
                files.append(os.path.join(path, "att", filename))

        # Score file
        files.append(os.path.join(path, "gen", "GEN"))

        # Statement
        files.append(os.path.join(path, "statement", "statement.pdf"))
        files.append(os.path.join(path, "testo", "testo.pdf"))

        # Managers
        files.append(os.path.join(path, "check", "checker"))
        files.append(os.path.join(path, "cor", "correttore"))
        files.append(os.path.join(path, "check", "manager"))
        files.append(os.path.join(path, "cor", "manager"))
        if not conf.get('output_only', False) and \
                os.path.isdir(os.path.join(path, "sol")):
            for lang in LANGUAGES:
                files.append(os.path.join(path, "sol", "grader.%s" % lang))
            for other_filename in os.listdir(os.path.join(path, "sol")):
                if any(
                        other_filename.endswith(header)
                        for header in LANGUAGE_TO_HEADER_EXT_MAP.itervalues()):
                    files.append(os.path.join(path, "sol", other_filename))

        # Yaml
        files.append(os.path.join(path, "task.yaml"))
        files.append(os.path.join(self.path, name + ".yaml"))

        # Check is any of the files have changed
        for fname in files:
            if os.path.exists(fname):
                if getmtime(fname) > itime:
                    return True

        if os.path.exists(os.path.join(path, ".import_error")):
            logger.warning(
                "Last attempt to import task %s failed,"
                " I'm not trying again.", name)
        return False
Exemple #12
0
    def task_has_changed(self):
        """See docstring in class TaskLoader."""
        name = os.path.split(self.path)[1]

        if (not os.path.exists(os.path.join(self.path, "task.yaml"))) and \
           (not os.path.exists(os.path.join(self.path, "..", name + ".yaml"))):
            logger.critical("File missing: \"task.yaml\"")
            sys.exit(1)

        # We first look for the yaml file inside the task folder,
        # and eventually fallback to a yaml file in its parent folder.
        try:
            conf = yaml.safe_load(
                io.open(os.path.join(self.path, "task.yaml"),
                        "rt",
                        encoding="utf-8"))
        except IOError:
            conf = yaml.safe_load(
                io.open(os.path.join(self.path, "..", name + ".yaml"),
                        "rt",
                        encoding="utf-8"))

        # If there is no .itime file, we assume that the task has changed
        if not os.path.exists(os.path.join(self.path, ".itime")):
            return True

        getmtime = lambda fname: os.stat(fname).st_mtime

        itime = getmtime(os.path.join(self.path, ".itime"))

        # Generate a task's list of files
        # Testcases
        files = []
        for filename in os.listdir(os.path.join(self.path, "input")):
            files.append(os.path.join(self.path, "input", filename))

        for filename in os.listdir(os.path.join(self.path, "output")):
            files.append(os.path.join(self.path, "output", filename))

        # Attachments
        if os.path.exists(os.path.join(self.path, "att")):
            for filename in os.listdir(os.path.join(self.path, "att")):
                files.append(os.path.join(self.path, "att", filename))

        # Score file
        files.append(os.path.join(self.path, "gen", "GEN"))

        # Statement
        files.append(os.path.join(self.path, "statement", "statement.pdf"))
        files.append(os.path.join(self.path, "testo", "testo.pdf"))

        # Managers
        files.append(os.path.join(self.path, "check", "checker"))
        files.append(os.path.join(self.path, "cor", "correttore"))
        files.append(os.path.join(self.path, "check", "manager"))
        files.append(os.path.join(self.path, "cor", "manager"))
        if not conf.get('output_only', False) and \
                os.path.isdir(os.path.join(self.path, "sol")):
            for lang in LANGUAGES:
                files.append(os.path.join(self.path, "sol",
                                          "grader.%s" % lang))
            for other_filename in os.listdir(os.path.join(self.path, "sol")):
                if any(
                        other_filename.endswith(header)
                        for header in LANGUAGE_TO_HEADER_EXT_MAP.itervalues()):
                    files.append(os.path.join(self.path, "sol",
                                              other_filename))

        # Yaml
        files.append(os.path.join(self.path, "task.yaml"))
        files.append(os.path.join(self.path, "..", name + ".yaml"))

        # Check is any of the files have changed
        for fname in files:
            if os.path.exists(fname):
                if getmtime(fname) > itime:
                    return True

        if os.path.exists(os.path.join(self.path, ".import_error")):
            logger.warning(
                "Last attempt to import task %s failed, I'm not "
                "trying again. After fixing the error, delete the "
                "file .import_error", name)
            sys.exit(1)

        return False
Exemple #13
0
    def compile(self, job, file_cacher):
        """See TaskType.compile."""
        # Detect the submission's language. The checks about the
        # formal correctedness of the submission are done in CWS,
        # before accepting it.
        language = job.language
        source_ext = LANGUAGE_TO_SOURCE_EXT_MAP[language]

        # Create the sandbox
        sandbox = create_sandbox(file_cacher)
        job.sandboxes.append(sandbox.path)

        # Prepare the source files in the sandbox
        files_to_get = {}
        source_filenames = []
        # Stub.
        stub_filename = "stub%s" % source_ext
        source_filenames.append(stub_filename)
        files_to_get[stub_filename] = job.managers[stub_filename].digest
        # User's submission.
        for filename, fileinfo in job.files.iteritems():
            source_filename = filename.replace(".%l", source_ext)
            source_filenames.append(source_filename)
            files_to_get[source_filename] = fileinfo.digest

        # Also copy all managers that might be useful during compilation.
        for filename in job.managers.iterkeys():
            if any(
                    filename.endswith(header)
                    for header in LANGUAGE_TO_HEADER_EXT_MAP.itervalues()):
                files_to_get[filename] = \
                    job.managers[filename].digest
            elif any(
                    filename.endswith(source)
                    for source in LANGUAGE_TO_SOURCE_EXT_MAP.itervalues()):
                files_to_get[filename] = \
                    job.managers[filename].digest
            elif any(
                    filename.endswith(obj)
                    for obj in LANGUAGE_TO_OBJ_EXT_MAP.itervalues()):
                files_to_get[filename] = \
                    job.managers[filename].digest

        for filename, digest in files_to_get.iteritems():
            sandbox.create_file_from_storage(filename, digest)

        # Prepare the compilation command
        executable_filename = \
            "_".join(pattern.replace(".%l", "")
                     for pattern in job.files.keys())
        commands = get_compilation_commands(language, source_filenames,
                                            executable_filename)

        # Run the compilation
        operation_success, compilation_success, text, plus = \
            compilation_step(sandbox, commands)

        # Retrieve the compiled executables
        job.success = operation_success
        job.compilation_success = compilation_success
        job.plus = plus
        job.text = text
        if operation_success and compilation_success:
            digest = sandbox.get_file_to_storage(
                executable_filename,
                "Executable %s for %s" % (executable_filename, job.info))
            job.executables[executable_filename] = \
                Executable(executable_filename, digest)

        # Cleanup
        delete_sandbox(sandbox)
Exemple #14
0
    def has_changed(self, name):
        """See docstring in class Loader

        """
        path = os.path.realpath(os.path.join(self.path, name))

        try:
            conf = yaml.safe_load(
                io.open(os.path.join(path, "task.yaml"),
                        "rt", encoding="utf-8"))
        except IOError:
            conf = yaml.safe_load(
                io.open(os.path.join(self.path, name + ".yaml"),
                        "rt", encoding="utf-8"))

        # If there is a .skip_import file, we pretend the task didn't change
        if os.path.exists(os.path.join(path, ".skip_import")):
            return False

        # If there is no .itime file, we assume that the task has changed
        if not os.path.exists(os.path.join(path, ".itime")):
            return True

        getmtime = lambda fname: os.stat(fname).st_mtime

        itime = getmtime(os.path.join(path, ".itime"))

        # Generate a task's list of files
        # Testcases
        files = []
        for filename in os.listdir(os.path.join(path, "input")):
            files.append(os.path.join(path, "input", filename))

        for filename in os.listdir(os.path.join(path, "output")):
            files.append(os.path.join(path, "output", filename))

        # Attachments
        if os.path.exists(os.path.join(path, "att")):
            for filename in os.listdir(os.path.join(path, "att")):
                files.append(os.path.join(path, "att", filename))

        # Score file
        files.append(os.path.join(path, "gen", "GEN"))

        # Statement
        files.append(os.path.join(path, "statement", "statement.pdf"))
        files.append(os.path.join(path, "testo", "testo.pdf"))

        # Managers
        files.append(os.path.join(path, "check", "checker"))
        files.append(os.path.join(path, "cor", "correttore"))
        files.append(os.path.join(path, "check", "manager"))
        files.append(os.path.join(path, "cor", "manager"))
        if not conf.get('output_only', False) and \
                os.path.isdir(os.path.join(path, "sol")):
            for lang in LANGUAGES:
                files.append(os.path.join(path, "sol", "grader.%s" % lang))
            for other_filename in os.listdir(os.path.join(path, "sol")):
                if any(other_filename.endswith(header)
                       for header in LANGUAGE_TO_HEADER_EXT_MAP.itervalues()):
                    files.append(os.path.join(path, "sol", other_filename))

        # Yaml
        files.append(os.path.join(path, "task.yaml"))
        files.append(os.path.join(self.path, name + ".yaml"))

        # Check is any of the files have changed
        for fname in files:
            if os.path.exists(fname):
                if getmtime(fname) > itime:
                    return True

        if os.path.exists(os.path.join(path, ".import_error")):
            logger.warning("Last attempt to import task %s failed,"
                           " I'm not trying again.", name)
        return False
Exemple #15
0
    def compile(self, job, file_cacher):
        """See TaskType.compile."""
        # Detect the submission's language. The checks about the
        # formal correctedness of the submission are done in CWS,
        # before accepting it.
        language = job.language
        source_ext = LANGUAGE_TO_SOURCE_EXT_MAP[language]
        header_ext = LANGUAGE_TO_HEADER_EXT_MAP.get(language)

        # TODO: here we are sure that submission.files are the same as
        # task.submission_format. The following check shouldn't be
        # here, but in the definition of the task, since this actually
        # checks that task's task type and submission format agree.
        if len(job.files) != 2:
            job.success = True
            job.compilation_success = False
            job.text = [N_("Invalid files in submission")]
            logger.error("Submission contains %d files, expecting 2",
                         len(job.files),
                         extra={"operation": job.info})
            return True

        # First and only one compilation.
        sandbox = create_sandbox(file_cacher)
        job.sandboxes.append(sandbox.path)
        files_to_get = {}

        source_filenames = []

        # Manager.
        manager_filename = "manager%s" % source_ext
        source_filenames.append(manager_filename)
        files_to_get[manager_filename] = \
            job.managers[manager_filename].digest
        # Manager's header.
        if header_ext is not None:
            manager_filename = "manager%s" % header_ext
            source_filenames.append(manager_filename)
            files_to_get[manager_filename] = \
                job.managers[manager_filename].digest

        # User's submissions and headers.
        for filename, file_ in job.files.iteritems():
            source_filename = filename.replace(".%l", source_ext)
            source_filenames.append(source_filename)
            files_to_get[source_filename] = file_.digest
            # Headers (fixing compile error again here).
            if header_ext is not None:
                header_filename = filename.replace(".%l", header_ext)
                source_filenames.append(header_filename)
                files_to_get[header_filename] = \
                    job.managers[header_filename].digest

        for filename, digest in files_to_get.iteritems():
            sandbox.create_file_from_storage(filename, digest)

        # Get compilation command and compile.
        executable_filename = "manager"
        commands = get_compilation_commands(language, source_filenames,
                                            executable_filename)
        operation_success, compilation_success, text, plus = \
            compilation_step(sandbox, commands)

        # Retrieve the compiled executables
        job.success = operation_success
        job.compilation_success = compilation_success
        job.plus = plus
        job.text = text
        if operation_success and compilation_success:
            digest = sandbox.get_file_to_storage(
                executable_filename,
                "Executable %s for %s" % (executable_filename, job.info))
            job.executables[executable_filename] = \
                Executable(executable_filename, digest)

        # Cleanup
        delete_sandbox(sandbox)
Exemple #16
0
    def compile(self, job, file_cacher):
        """See TaskType.compile."""
        # Detect the submission's language. The checks about the
        # formal correctedness of the submission are done in CWS,
        # before accepting it.
        language = job.language
        source_ext = LANGUAGE_TO_SOURCE_EXT_MAP[language]
        header_ext = LANGUAGE_TO_HEADER_EXT_MAP.get(language)

        # TODO: here we are sure that submission.files are the same as
        # task.submission_format. The following check shouldn't be
        # here, but in the definition of the task, since this actually
        # checks that task's task type and submission format agree.
        if len(job.files) != 2:
            job.success = True
            job.compilation_success = False
            job.text = [N_("Invalid files in submission")]
            logger.error("Submission contains %d files, expecting 2",
                         len(job.files), extra={"operation": job.info})
            return True

        # First and only one compilation.
        sandbox = create_sandbox(file_cacher)
        job.sandboxes.append(sandbox.path)
        files_to_get = {}

        source_filenames = []

        # Manager.
        manager_filename = "manager%s" % source_ext
        source_filenames.append(manager_filename)
        files_to_get[manager_filename] = \
            job.managers[manager_filename].digest
        # Manager's header.
        if header_ext is not None:
            manager_filename = "manager%s" % header_ext
            source_filenames.append(manager_filename)
            files_to_get[manager_filename] = \
                job.managers[manager_filename].digest

        # User's submissions and headers.
        for filename, file_ in job.files.iteritems():
            source_filename = filename.replace(".%l", source_ext)
            source_filenames.append(source_filename)
            files_to_get[source_filename] = file_.digest
            # Headers (fixing compile error again here).
            if header_ext is not None:
                header_filename = filename.replace(".%l", header_ext)
                source_filenames.append(header_filename)
                files_to_get[header_filename] = \
                    job.managers[header_filename].digest

        for filename, digest in files_to_get.iteritems():
            sandbox.create_file_from_storage(filename, digest)

        # Get compilation command and compile.
        executable_filename = "manager"
        commands = get_compilation_commands(language,
                                            source_filenames,
                                            executable_filename)
        operation_success, compilation_success, text, plus = \
            compilation_step(sandbox, commands)

        # Retrieve the compiled executables
        job.success = operation_success
        job.compilation_success = compilation_success
        job.plus = plus
        job.text = text
        if operation_success and compilation_success:
            digest = sandbox.get_file_to_storage(
                executable_filename,
                "Executable %s for %s" %
                (executable_filename, job.info))
            job.executables[executable_filename] = \
                Executable(executable_filename, digest)

        # Cleanup
        delete_sandbox(sandbox)