示例#1
0
    def compile(self, job, file_cacher):
        """See TaskType.compile."""
        language = get_language(job.language)
        source_ext = language.source_extension

        # Prepare the files to copy in the sandbox and to add to the
        # compilation command.
        files_to_get = {}
        source_filenames = []
        # The stub, that must have been provided (copy and add to compilation).
        stub_filename = "stub%s" % source_ext
        if not check_manager_present(job, stub_filename):
            return
        source_filenames.append(stub_filename)
        files_to_get[stub_filename] = job.managers[stub_filename].digest
        # User's submitted file(s) (copy and add to compilation).
        for codename, file_ in iteritems(job.files):
            source_filename = codename.replace(".%l", source_ext)
            source_filenames.append(source_filename)
            files_to_get[source_filename] = file_.digest
        # Any other useful manager (just copy).
        for filename, manager in iteritems(job.managers):
            if is_manager_for_compilation(filename, language):
                files_to_get[filename] = manager.digest

        # Prepare the compilation command
        executable_filename = self._executable_filename(iterkeys(job.files))
        commands = language.get_compilation_commands(source_filenames,
                                                     executable_filename)

        # Create the sandbox.
        sandbox = create_sandbox(file_cacher, name="compile")
        job.sandboxes.append(sandbox.path)

        # Copy all required files in the sandbox.
        for filename, digest in iteritems(files_to_get):
            sandbox.create_file_from_storage(filename, digest)

        # Run the compilation.
        box_success, compilation_success, text, stats = \
            compilation_step(sandbox, commands)

        # Retrieve the compiled executables.
        job.success = box_success
        job.compilation_success = compilation_success
        job.text = text
        job.plus = stats
        if box_success and compilation_success:
            digest = sandbox.get_file_to_storage(
                executable_filename,
                "Executable %s for %s" % (executable_filename, job.info))
            job.executables[executable_filename] = \
                Executable(executable_filename, digest)

        # Cleanup.
        delete_sandbox(sandbox, job.success)
示例#2
0
    def compile(self, job, file_cacher):
        """See TaskType.compile."""
        language = get_language(job.language)
        source_ext = language.source_extension

        if not check_files_number(job, 1):
            return

        user_file_format = next(iterkeys(job.files))
        user_source_filename = user_file_format.replace(".%l", source_ext)
        executable_filename = user_file_format.replace(".%l", "")

        # Create the list of filenames to be passed to the compiler. If we use
        # a grader, it needs to be in first position in the command line, and
        # we check that it exists.
        source_filenames = [user_source_filename]
        if self._uses_grader():
            grader_source_filename = Batch.GRADER_BASENAME + source_ext
            if not check_manager_present(job, grader_source_filename):
                return
            source_filenames.insert(0, grader_source_filename)

        # Prepare the compilation command.
        commands = language.get_compilation_commands(source_filenames,
                                                     executable_filename)

        # Create the sandbox.
        sandbox = create_sandbox(file_cacher, name="compile")
        job.sandboxes.append(sandbox.path)

        # Copy required files in the sandbox (includes the grader if present).
        sandbox.create_file_from_storage(user_source_filename,
                                         job.files[user_file_format].digest)
        for filename, manager in iteritems(job.managers):
            if is_manager_for_compilation(filename, language):
                sandbox.create_file_from_storage(filename, manager.digest)

        # Run the compilation.
        box_success, compilation_success, text, stats = \
            compilation_step(sandbox, commands)

        # Retrieve the compiled executables.
        job.success = box_success
        job.compilation_success = compilation_success
        job.text = text
        job.plus = stats
        if box_success and compilation_success:
            digest = sandbox.get_file_to_storage(
                executable_filename,
                "Executable %s for %s" % (executable_filename, job.info))
            job.executables[executable_filename] = \
                Executable(executable_filename, digest)

        # Cleanup.
        delete_sandbox(sandbox, job.success)
示例#3
0
    def compile(self):
        """See TaskType.compile."""
        # Detect the submission's language. The checks about the
        # formal correctedness of the submission are done in CWS,
        # before accepting it.
        language = self.job.language

        # TODO: here we are sure that submission.files are the same as
        # task.submission_format. The following check shouldn't be
        # here, but in the definition of the task, since this actually
        # checks that task's task type and submission format agree.
        if len(self.job.files) != 1:
            self.job.success = True
            self.job.compilation_success = False
            self.job.text = "Invalid files in submission"
            logger.error("Submission contains %d files, expecting 1" % len(self.job.files))
            return True

        # Create the sandbox
        sandbox = create_sandbox(self)
        self.job.sandboxes.append(sandbox.path)

        # Prepare the source files in the sandbox
        files_to_get = {}
        format_filename = self.job.files.keys()[0]
        source_filenames = []
        # Stub.
        source_filenames.append("stub.%s" % language)
        files_to_get[source_filenames[-1]] = self.job.managers["stub.%s" % language].digest
        # User's submission.
        source_filenames.append(format_filename.replace("%l", language))
        files_to_get[source_filenames[-1]] = self.job.files[format_filename].digest
        for filename, digest in files_to_get.iteritems():
            sandbox.create_file_from_storage(filename, digest)

        # Prepare the compilation command
        executable_filename = format_filename.replace(".%l", "")
        command = get_compilation_command(language, source_filenames, executable_filename)

        # Run the compilation
        operation_success, compilation_success, text, plus = compilation_step(sandbox, command)

        # Retrieve the compiled executables
        self.job.success = operation_success
        self.job.compilation_success = compilation_success
        self.job.plus = plus
        self.job.text = text
        if operation_success and compilation_success:
            digest = sandbox.get_file_to_storage(
                executable_filename, "Executable %s for %s" % (executable_filename, self.job.info)
            )
            self.job.executables[executable_filename] = Executable(executable_filename, digest)

        # Cleanup
        delete_sandbox(sandbox)
示例#4
0
    def evaluate(self, job, file_cacher):
        """See TaskType.evaluate."""
        sandbox = create_sandbox(
            file_cacher,
            multithreaded=job.multithreaded_sandbox,
            name="evaluate")
        job.sandboxes.append(sandbox.path)

        # Immediately prepare the skeleton to return
        job.sandboxes = [sandbox.path]
        job.plus = {}

        outcome = None
        text = []

        user_output_filename = self._get_user_output_filename(job)

        # Since we allow partial submission, if the file is not
        # present we report that the outcome is 0.
        if user_output_filename not in job.files:
            job.success = True
            job.outcome = "0.0"
            job.text = [N_("File not submitted")]
            return

        # First and only one step: diffing (manual or with manager).

        # Put user output and reference solution into the sandbox.
        sandbox.create_file_from_storage(
            OutputOnly.OUTPUT_FILENAME, job.files[user_output_filename].digest)
        sandbox.create_file_from_storage(
            OutputOnly.CORRECT_OUTPUT_FILENAME, job.output)

        if self._uses_checker():
            # Checker also requires the input file.
            sandbox.create_file_from_storage(
                OutputOnly.INPUT_FILENAME, job.input)
            success, outcome, text = OutputOnly._run_checker(sandbox, job)
        else:
            success = True
            outcome, text = white_diff_step(
                sandbox,
                OutputOnly.OUTPUT_FILENAME,
                OutputOnly.CORRECT_OUTPUT_FILENAME)

        # Whatever happened, we conclude.
        job.success = success
        job.outcome = "%s" % outcome if outcome is not None else None
        job.text = text

        delete_sandbox(sandbox, job.success)
示例#5
0
    def evaluate(self, job, file_cacher):
        """See TaskType.evaluate."""
        sandbox = create_sandbox(file_cacher,
                                 multithreaded=job.multithreaded_sandbox,
                                 name="evaluate")
        job.sandboxes.append(sandbox.path)

        # Immediately prepare the skeleton to return
        job.sandboxes = [sandbox.path]
        job.plus = {}

        outcome = None
        text = []

        user_output_filename = self._get_user_output_filename(job)

        # Since we allow partial submission, if the file is not
        # present we report that the outcome is 0.
        if user_output_filename not in job.files:
            job.success = True
            job.outcome = "0.0"
            job.text = [N_("File not submitted")]
            return

        # First and only one step: diffing (manual or with manager).

        # Put user output and reference solution into the sandbox.
        sandbox.create_file_from_storage(
            OutputOnly.OUTPUT_FILENAME, job.files[user_output_filename].digest)
        sandbox.create_file_from_storage(OutputOnly.CORRECT_OUTPUT_FILENAME,
                                         job.output)

        if self._uses_checker():
            # Checker also requires the input file.
            sandbox.create_file_from_storage(OutputOnly.INPUT_FILENAME,
                                             job.input)
            success, outcome, text = OutputOnly._run_checker(sandbox, job)
        else:
            success = True
            outcome, text = white_diff_step(sandbox,
                                            OutputOnly.OUTPUT_FILENAME,
                                            OutputOnly.CORRECT_OUTPUT_FILENAME)

        # Whatever happened, we conclude.
        job.success = success
        job.outcome = "%s" % outcome if outcome is not None else None
        job.text = text

        delete_sandbox(sandbox, job.success)
示例#6
0
    def compile(self):
        """See TaskType.compile."""
        # Detect the submission's language. The checks about the
        # formal correctedness of the submission are done in CWS,
        # before accepting it.
        language = self.submission.language

        # TODO: here we are sure that submission.files are the same as
        # task.submission_format. The following check shouldn't be
        # here, but in the definition of the task, since this actually
        # checks that task's task type and submission format agree.
        if len(self.submission.files) != 1:
            return self.finish_compilation(
                True, False, "Invalid files in submission",
                to_log="Submission contains %d files, expecting 1" %
                len(self.submission.files))

        # First and only one compilation.
        sandbox = create_sandbox(self)
        files_to_get = {}
        format_filename = self.submission.files.keys()[0]
        source_filenames = [format_filename.replace("%l", language)]
        files_to_get[source_filenames[0]] = \
            self.submission.files[format_filename].digest
        # If a grader is specified, we add to the command line (and to
        # the files to get) the corresponding manager.
        if self.parameters[0] == "grader":
            source_filenames.append("grader.%s" % language)
            files_to_get[source_filenames[1]] = \
                self.submission.task.managers["grader.%s" % language].digest
        executable_filename = format_filename.replace(".%l", "")
        command = get_compilation_command(language,
                                          source_filenames,
                                          executable_filename)
        operation_success, compilation_success, text, _ = \
            self.compilation_step(
            sandbox,
            command,
            files_to_get,
            {executable_filename: "Executable %s for submission %s" %
             (executable_filename, self.submission.id)})
        delete_sandbox(sandbox)

        # We had only one compilation, hence we pipe directly its
        # result to the finalization.
        return self.finish_compilation(operation_success, compilation_success,
                                       text)
示例#7
0
文件: OutputOnly.py 项目: volpino/cms
    def evaluate_testcase(self, test_number):
        """See TaskType.evaluate_testcase."""
        sandbox = create_sandbox(self)

        # Since we allow partial submission, if the file is not
        # present we report that the outcome is 0.
        if "output_%03d.txt" % test_number not in self.submission.files:
            return self.finish_evaluation_testcase(
                test_number,
                True, 0.0, "File not submitted.")
        # First and only one step: diffing (manual or with manager).
        output_digest = self.submission.files["output_%03d.txt" %
                                              test_number].digest

        # TODO: this should check self.parameters, not managers.
        if len(self.submission.task.managers) == 0:
            # No manager: I'll do a white_diff between the submission
            # file and the correct output res.txt.
            success, outcome, text = self.white_diff_step(
                sandbox,
                "output.txt", "res.txt",
                {"res.txt":
                 self.submission.task.testcases[test_number].output,
                 "output.txt": output_digest})
        else:
            # Manager present: wonderful, he'll do all the job.
            manager_filename = self.submission.task.managers.keys()[0]
            success, outcome, text = self.evaluation_step(
                sandbox,
                ["./%s" % manager_filename,
                 "input.txt", "res.txt", "output.txt"],
                {manager_filename:
                 self.submission.task.managers[manager_filename].digest},
                {"output.txt": output_digest,
                 "res.txt": self.submission.task.testcases[test_number].output,
                 "input.txt":
                 self.submission.task.testcases[test_number].input},
                allow_path=["input.txt", "output.txt", "res.txt"],
                final=True)

        # Whatever happened, we conclude.
        delete_sandbox(sandbox)
        return self.finish_evaluation_testcase(test_number,
                                               success, outcome, text)
示例#8
0
    def evaluate(self, job, file_cacher):
        """See TaskType.evaluate."""

        if len(self.parameters) <= 0:
            num_processes = 1
        else:
            num_processes = self.parameters[0]
        indices = range(num_processes)
        # Create sandboxes and FIFOs
        sandbox_mgr = create_sandbox(file_cacher, job.multithreaded_sandbox)
        sandbox_user = [
            create_sandbox(file_cacher, job.multithreaded_sandbox)
            for i in indices
        ]
        fifo_dir = [tempfile.mkdtemp(dir=config.temp_dir) for i in indices]
        fifo_in = [os.path.join(fifo_dir[i], "in%d" % i) for i in indices]
        fifo_out = [os.path.join(fifo_dir[i], "out%d" % i) for i in indices]
        for i in indices:
            os.mkfifo(fifo_in[i])
            os.mkfifo(fifo_out[i])
            os.chmod(fifo_dir[i], 0o755)
            os.chmod(fifo_in[i], 0o666)
            os.chmod(fifo_out[i], 0o666)

        # First step: we start the manager.
        manager_filename = "manager"
        manager_command = ["./%s" % manager_filename]
        for i in indices:
            manager_command.append(fifo_in[i])
            manager_command.append(fifo_out[i])
        manager_executables_to_get = {
            manager_filename: job.managers[manager_filename].digest
        }
        manager_files_to_get = {"input.txt": job.input}
        manager_allow_dirs = fifo_dir
        for filename, digest in manager_executables_to_get.iteritems():
            sandbox_mgr.create_file_from_storage(filename,
                                                 digest,
                                                 executable=True)
        for filename, digest in manager_files_to_get.iteritems():
            sandbox_mgr.create_file_from_storage(filename, digest)
        manager = evaluation_step_before_run(
            sandbox_mgr,
            manager_command,
            num_processes * job.time_limit,
            0,
            allow_dirs=manager_allow_dirs,
            writable_files=["output.txt"],
            stdin_redirect="input.txt",
            stdout_redirect="output.txt",
        )

        # Second step: we start the user submission compiled with the
        # stub.
        language = get_language(job.language)
        executable_filename = job.executables.keys()[0]
        executables_to_get = {
            executable_filename: job.executables[executable_filename].digest
        }
        processes = [None for i in indices]
        for i in indices:
            args = [fifo_out[i], fifo_in[i]]
            if num_processes != 1:
                args.append(str(i))
            commands = language.get_evaluation_commands(executable_filename,
                                                        main="grader",
                                                        args=args)
            user_allow_dirs = [fifo_dir[i]]
            for filename, digest in executables_to_get.iteritems():
                sandbox_user[i].create_file_from_storage(filename,
                                                         digest,
                                                         executable=True)
            # Assumes that the actual execution of the user solution
            # is the last command in commands, and that the previous
            # are "setup" that doesn't need tight control.
            if len(commands) > 1:
                evaluation_step(sandbox_user[i], commands[:-1], 10, 256)
            processes[i] = evaluation_step_before_run(
                sandbox_user[i],
                commands[-1],
                job.time_limit,
                job.memory_limit,
                allow_dirs=user_allow_dirs)

        # Consume output.
        wait_without_std(processes + [manager])
        # TODO: check exit codes with translate_box_exitcode.

        user_results = [evaluation_step_after_run(s) for s in sandbox_user]
        success_user = all(r[0] for r in user_results)
        plus_user = reduce(merge_evaluation_results,
                           [r[1] for r in user_results])
        success_mgr, unused_plus_mgr = \
            evaluation_step_after_run(sandbox_mgr)

        if plus_user['exit_status'] == Sandbox.EXIT_OK and \
                plus_user["execution_time"] >= job.time_limit:
            plus_user['exit_status'] = Sandbox.EXIT_TIMEOUT

        # Merge results.
        job.sandboxes = [s.path for s in sandbox_user] + [sandbox_mgr.path]
        job.plus = plus_user

        # If at least one evaluation had problems, we report the
        # problems.
        if not success_user or not success_mgr:
            success, outcome, text = False, None, None
        # If the user sandbox detected some problem (timeout, ...),
        # the outcome is 0.0 and the text describes that problem.
        elif not is_evaluation_passed(plus_user):
            success = True
            outcome, text = 0.0, human_evaluation_message(plus_user)
            if job.get_output:
                job.user_output = None
        # Otherwise, we use the manager to obtain the outcome.
        else:
            success = True
            outcome = None
            text = None

            input_filename = "input.txt"
            output_filename = "output.txt"
            # Check that the output file was created
            if not sandbox_mgr.file_exists(output_filename):
                outcome = 0.0
                text = [
                    N_("Evaluation didn't produce file %s"), output_filename
                ]
                if job.get_output:
                    job.user_output = None

            else:
                # If asked so, put the output file into the storage
                if job.get_output:
                    job.user_output = sandbox_mgr.get_file_to_storage(
                        output_filename,
                        "Output file in job %s" % job.info,
                        trunc_len=1024 * 1024 * 10)

                # If just asked to execute, fill text and set dummy
                # outcome.
                if job.only_execution:
                    outcome = 0.0
                    text = [N_("Execution completed successfully")]

                # Otherwise evaluate the output file.
                else:

                    # Put the reference solution into the sandbox
                    sandbox_mgr.create_file_from_storage("res.txt", job.output)

                    # Check the solution with white_diff
                    if self.parameters[1] == "diff":
                        outcome, text = white_diff_step(
                            sandbox_mgr, output_filename, "res.txt")

                    # Check the solution with a comparator
                    elif self.parameters[1] == "comparator":
                        manager_filename = "checker"

                        if manager_filename not in job.managers:
                            logger.error(
                                "Configuration error: missing or "
                                "invalid comparator (it must be "
                                "named 'checker')",
                                extra={"operation": job.info})
                            success = False

                        else:
                            sandbox_mgr.create_file_from_storage(
                                manager_filename,
                                job.managers[manager_filename].digest,
                                executable=True)
                            # Rewrite input file. The untrusted
                            # contestant program should not be able to
                            # modify it; however, the grader may
                            # destroy the input file to prevent the
                            # contestant's program from directly
                            # accessing it. Since we cannot create
                            # files already existing in the sandbox,
                            # we try removing the file first.
                            try:
                                sandbox_mgr.remove_file(input_filename)
                            except OSError as e:
                                # Let us be extra sure that the file
                                # was actually removed and we did not
                                # mess up with permissions.
                                assert not sandbox_mgr.file_exists(
                                    input_filename)
                            sandbox_mgr.create_file_from_storage(
                                input_filename, job.input)

                            # Allow using any number of processes (because e.g.
                            # one may want to write a bash checker who calls
                            # other processes). Set to a high number because
                            # to avoid fork-bombing the worker.
                            sandbox_mgr.max_processes = 1000

                            success, _ = evaluation_step(
                                sandbox_mgr, [[
                                    "./%s" % manager_filename, input_filename,
                                    "res.txt", output_filename
                                ]])
                        if success:
                            try:
                                outcome, text = \
                                    extract_outcome_and_text(sandbox_mgr)
                            except ValueError as e:
                                logger.error(
                                    "Invalid output from "
                                    "comparator: %s",
                                    e.message,
                                    extra={"operation": job.info})
                                success = False

                    else:
                        raise ValueError("Unrecognized second parameter"
                                         " `%s' for Communication tasktype." %
                                         self.parameters[2])

        # Whatever happened, we conclude.
        job.success = success
        job.outcome = "%s" % outcome if outcome is not None else None
        job.text = text

        delete_sandbox(sandbox_mgr, job.success)
        for s in sandbox_user:
            delete_sandbox(s, job.success)
        if not config.keep_sandbox:
            for d in fifo_dir:
                rmtree(d)
示例#9
0
    def compile(self):
        """See TaskType.compile."""
        # Detect the submission's language. The checks about the
        # formal correctedness of the submission are done in CWS,
        # before accepting it.
        language = self.job.language
        header = HEADERS_MAP[language]

        # TODO: here we are sure that submission.files are the same as
        # task.submission_format. The following check shouldn't be
        # here, but in the definition of the task, since this actually
        # checks that task's task type and submission format agree.
        if len(self.job.files) != 2:
            self.job.success = True
            self.job.compilation_success = False
            self.job.text = "Invalid files in submission"
            logger.warning("Submission contains %d files, expecting 2" %
                           len(self.job.files))
            return True

        # First and only one compilation.
        sandbox = create_sandbox(self)
        self.job.sandboxes.append(sandbox.path)
        files_to_get = {}

        # User's submissions and headers.
        source_filenames = []
        for filename, _file in self.job.files.iteritems():
            source_filename = filename.replace("%l", language)
            source_filenames.append(source_filename)
            files_to_get[source_filename] = _file.digest
            # Headers.
            header_filename = filename.replace("%l", header)
            source_filenames.append(header_filename)
            files_to_get[header_filename] = \
                self.job.managers[header_filename].digest

        # Manager.
        manager_filename = "manager.%s" % language
        source_filenames.append(manager_filename)
        files_to_get[manager_filename] = \
                self.job.managers[manager_filename].digest
        # Manager's header.
        manager_filename = "manager.%s" % header
        source_filenames.append(manager_filename)
        files_to_get[manager_filename] = \
                self.job.managers[manager_filename].digest

        for filename, digest in files_to_get.iteritems():
            sandbox.create_file_from_storage(filename, digest)

        # Get compilation command and compile.
        executable_filename = "manager"
        command = get_compilation_command(language, source_filenames,
                                          executable_filename)
        operation_success, compilation_success, text, plus = \
            compilation_step(sandbox, command)

        # Retrieve the compiled executables
        self.job.success = operation_success
        self.job.compilation_success = compilation_success
        self.job.plus = plus
        self.job.text = text
        if operation_success and compilation_success:
            digest = sandbox.get_file_to_storage(
                executable_filename,
                "Executable %s for %s" % (executable_filename, self.job.info))
            self.job.executables[executable_filename] = \
                Executable(digest, executable_filename)

        # Cleanup
        delete_sandbox(sandbox)
示例#10
0
文件: Batch.py 项目: akmohtashami/cms
    def compile(self, job, file_cacher):
        """See TaskType.compile."""
        # Detect the submission's language. The checks about the
        # formal correctedness of the submission are done in CWS,
        # before accepting it.
        language = get_language(job.language)
        source_ext = language.source_extension

        # TODO: here we are sure that submission.files are the same as
        # task.submission_format. The following check shouldn't be
        # here, but in the definition of the task, since this actually
        # checks that task's task type and submission format agree.
        if len(job.files) != 1:
            job.success = True
            job.compilation_success = False
            job.text = [N_("Invalid files in submission")]
            logger.error("Submission contains %d files, expecting 1",
                         len(job.files), extra={"operation": job.info})
            return

        # Create the sandbox.
        sandbox = create_sandbox(
            file_cacher,
            multithreaded=job.multithreaded_sandbox,
            name="compile")
        job.sandboxes.append(sandbox.path)

        user_file_format = next(iterkeys(job.files))
        user_source_filename = user_file_format.replace(".%l", source_ext)
        executable_filename = user_file_format.replace(".%l", "")

        # Copy required files in the sandbox (includes the grader if present).
        sandbox.create_file_from_storage(
            user_source_filename, job.files[user_file_format].digest)
        for filename in iterkeys(job.managers):
            if Batch._is_manager_for_compilation(filename):
                sandbox.create_file_from_storage(
                    filename, job.managers[filename].digest)

        # Create the list of filenames to be passed to the compiler. If we use
        # a grader, it needs to be in first position in the command line.
        source_filenames = [user_source_filename]
        if self._uses_grader():
            grader_source_filename = Batch.GRADER_BASENAME + source_ext
            source_filenames.insert(0, grader_source_filename)

        # Prepare the compilation command.
        commands = language.get_compilation_commands(
            source_filenames, executable_filename)

        # Run the compilation
        operation_success, compilation_success, text, plus = \
            compilation_step(sandbox, commands)

        # Retrieve the compiled executables
        job.success = operation_success
        job.compilation_success = compilation_success
        job.plus = plus
        job.text = text
        if operation_success and compilation_success:
            digest = sandbox.get_file_to_storage(
                executable_filename,
                "Executable %s for %s" %
                (executable_filename, job.info))
            job.executables[executable_filename] = \
                Executable(executable_filename, digest)

        # Cleanup
        delete_sandbox(sandbox, job.success)
示例#11
0
    def evaluate_testcase(self, test_number):
        """See TaskType.evaluate_testcase."""
        sandbox = create_sandbox(self)
        self.job.sandboxes.append(sandbox.path)

        # Immediately prepare the skeleton to return
        self.job.evaluations[test_number] = {'sandboxes': [sandbox.path],
                                             'plus': {}}
        evaluation = self.job.evaluations[test_number]
        outcome = None
        text = None

        # Since we allow partial submission, if the file is not
        # present we report that the outcome is 0.
        if "output_%03d.txt" % test_number not in self.job.files:
            evaluation['success'] = True
            evaluation['outcome'] = 0.0
            evaluation['text'] = "File not submitted."
            return True

        # First and only one step: diffing (manual or with manager).
        output_digest = self.job.files["output_%03d.txt" %
                                       test_number].digest

        # Put the files into the sandbox
        sandbox.create_file_from_storage(
            "res.txt",
            self.job.testcases[test_number].output)
        sandbox.create_file_from_storage(
            "output.txt",
            output_digest)

        # TODO: this should check self.parameters, not managers.
        if len(self.job.managers) == 0:
            # No manager: I'll do a white_diff between the submission
            # file and the correct output res.txt.
            success = True
            outcome, text = white_diff_step(
                sandbox, "output.txt", "res.txt")

        else:
            # Manager present: wonderful, he'll do all the job.
            manager_filename = self.job.managers.keys()[0]
            sandbox.create_file_from_storage(
                manager_filename,
                self.job.managers[manager_filename].digest,
                executable=True)
            input_digest = self.job.testcases[test_number].input
            sandbox.create_file_from_storage(
                "input.txt",
                input_digest)
            success, _ = evaluation_step(
                sandbox,
                ["./%s" % manager_filename,
                 "input.txt", "res.txt", "output.txt"],
                allow_path=["input.txt", "output.txt", "res.txt"])
            if success:
                outcome, text = extract_outcome_and_text(sandbox)

        # Whatever happened, we conclude.
        evaluation['success'] = success
        evaluation['outcome'] = str(outcome) if outcome is not None else None
        evaluation['text'] = text
        delete_sandbox(sandbox)
        return success
示例#12
0
    def compile(self, job, file_cacher):
        """See TaskType.compile."""
        # Detect the submission's language. The checks about the
        # formal correctedness of the submission are done in CWS,
        # before accepting it.
        language = get_language(job.language)
        source_ext = language.source_extension

        # Create the sandbox
        sandbox = create_sandbox(file_cacher, job.multithreaded_sandbox)
        job.sandboxes.append(sandbox.path)

        # Prepare the source files in the sandbox
        files_to_get = {}
        source_filenames = []
        # Stub.
        stub_filename = "stub%s" % source_ext
        source_filenames.append(stub_filename)
        files_to_get[stub_filename] = job.managers[stub_filename].digest
        # User's submission.
        for filename, fileinfo in job.files.iteritems():
            source_filename = filename.replace(".%l", source_ext)
            source_filenames.append(source_filename)
            files_to_get[source_filename] = fileinfo.digest

        # Also copy all managers that might be useful during compilation.
        for filename in job.managers.iterkeys():
            if any(filename.endswith(header) for header in HEADER_EXTS):
                files_to_get[filename] = \
                    job.managers[filename].digest
            elif any(filename.endswith(source) for source in SOURCE_EXTS):
                files_to_get[filename] = \
                    job.managers[filename].digest
            elif any(filename.endswith(obj) for obj in OBJECT_EXTS):
                files_to_get[filename] = \
                    job.managers[filename].digest

        for filename, digest in files_to_get.iteritems():
            sandbox.create_file_from_storage(filename, digest)

        # Prepare the compilation command
        executable_filename = \
            "_".join(pattern.replace(".%l", "")
                     for pattern in job.files.keys())
        commands = language.get_compilation_commands(source_filenames,
                                                     executable_filename)

        # Run the compilation
        operation_success, compilation_success, text, plus = \
            compilation_step(sandbox, commands)

        # Retrieve the compiled executables
        job.success = operation_success
        job.compilation_success = compilation_success
        job.plus = plus
        job.text = text
        if operation_success and compilation_success:
            digest = sandbox.get_file_to_storage(
                executable_filename,
                "Executable %s for %s" % (executable_filename, job.info))
            job.executables[executable_filename] = \
                Executable(executable_filename, digest)

        # Cleanup
        delete_sandbox(sandbox, job.success)
示例#13
0
    def evaluate_testcase(self, test_number):
        """See TaskType.evaluate_testcase."""
        # f stand for first, s for second.
        first_sandbox = create_sandbox(self)
        second_sandbox = create_sandbox(self)
        fifo_dir = tempfile.mkdtemp(dir=config.temp_dir)
        fifo = os.path.join(fifo_dir, "fifo")
        os.mkfifo(fifo)

        # First step: we start the first manager.
        first_filename = "manager"
        first_command = ["./%s" % first_filename, "0", fifo]
        first_executables_to_get = {
            first_filename:
            self.job.executables[first_filename].digest
            }
        first_files_to_get = {
            "input.txt": self.job.testcases[test_number].input
            }
        first_allow_path = ["input.txt", fifo]

        # Put the required files into the sandbox
        for filename, digest in first_executables_to_get.iteritems():
            first_sandbox.create_file_from_storage(filename, digest, executable=True)
        for filename, digest in first_files_to_get.iteritems():
            first_sandbox.create_file_from_storage(filename, digest)

        first = evaluation_step_before_run(
            first_sandbox,
            first_command,
            self.job.time_limit,
            self.job.memory_limit,
            first_allow_path,
            stdin_redirect="input.txt",
            wait=False)

        # Second step: we start the second manager.
        second_filename = "manager"
        second_command = ["./%s" % second_filename, "1", fifo]
        second_executables_to_get = {
            second_filename:
            self.job.executables[second_filename].digest
            }
        second_files_to_get = {}
        second_allow_path = [fifo, "output.txt"]

        # Put the required files into the second sandbox
        for filename, digest in second_executables_to_get.iteritems():
            second_sandbox.create_file_from_storage(filename, digest, executable=True)
        for filename, digest in second_files_to_get.iteritems():
            second_sandbox.create_file_from_storage(filename, digest)

        second = evaluation_step_before_run(
            second_sandbox,
            second_command,
            self.job.time_limit,
            self.job.memory_limit,
            second_allow_path,
            stdout_redirect="output.txt",
            wait=False)

        # Consume output.
        wait_without_std([second, first])
        # TODO: check exit codes with translate_box_exitcode.

        success_first, first_plus = \
            evaluation_step_after_run(first_sandbox)
        success_second, second_plus = \
            evaluation_step_after_run(second_sandbox)

        self.job.evaluations[test_number] = {'sandboxes': [first_sandbox.path,
                                                           second_sandbox.path],
                                             'plus': second_plus}
        outcome = None
        text = None
        evaluation = self.job.evaluations[test_number]
        success = True

        # Error in the sandbox: report failure!
        if not success_first or not success_second:
            success = False

        # Contestant's error: the marks won't be good
        elif not is_evaluation_passed(first_plus) or \
                not is_evaluation_passed(second_plus):
            outcome = 0.0
            if not is_evaluation_passed(first_plus):
                text = human_evaluation_message(first_plus)
            else:
                text = human_evaluation_message(second_plus)
            if self.job.get_output:
                evaluation['output'] = None

        # Otherwise, advance to checking the solution
        else:

            # Check that the output file was created
            if not second_sandbox.file_exists('output.txt'):
                outcome = 0.0
                text = "Execution didn't produce file output.txt"
                if self.job.get_output:
                    evaluation['output'] = None

            else:
                # If asked so, put the output file into the storage
                if self.job.get_output:
                    evaluation['output'] = second_sandbox.get_file_to_storage(
                        "output.txt",
                        "Output file for testcase %d in job %s" %
                        (test_number, self.job.info))

                # If not asked otherwise, evaluate the output file
                if not self.job.only_execution:
                    # Put the reference solution into the sandbox
                    second_sandbox.create_file_from_storage(
                        "res.txt",
                        self.job.testcases[test_number].output)

                    outcome, text = white_diff_step(
                        second_sandbox, "output.txt", "res.txt")

        # Whatever happened, we conclude.
        evaluation['success'] = success
        evaluation['outcome'] = outcome
        evaluation['text'] = text

        delete_sandbox(first_sandbox)
        delete_sandbox(second_sandbox)

        return success
示例#14
0
文件: TwoSteps.py 项目: Corea/cms
    def compile(self, job, file_cacher):
        """See TaskType.compile."""
        # Detect the submission's language. The checks about the
        # formal correctedness of the submission are done in CWS,
        # before accepting it.
        language = job.language
        source_ext = LANGUAGE_TO_SOURCE_EXT_MAP[language]
        header_ext = LANGUAGE_TO_HEADER_EXT_MAP[language]

        # TODO: here we are sure that submission.files are the same as
        # task.submission_format. The following check shouldn't be
        # here, but in the definition of the task, since this actually
        # checks that task's task type and submission format agree.
        if len(job.files) != 2:
            job.success = True
            job.compilation_success = False
            job.text = [N_("Invalid files in submission")]
            logger.error("Submission contains %d files, expecting 2",
                         len(job.files), extra={"operation": job.info})
            return True

        # First and only one compilation.
        sandbox = create_sandbox(file_cacher)
        job.sandboxes.append(sandbox.path)
        files_to_get = {}

        # User's submissions and headers.
        source_filenames = []
        for filename, file_ in job.files.iteritems():
            source_filename = filename.replace(".%l", source_ext)
            source_filenames.append(source_filename)
            files_to_get[source_filename] = file_.digest
            # Headers.
            header_filename = filename.replace(".%l", header_ext)
            source_filenames.append(header_filename)
            files_to_get[header_filename] = \
                job.managers[header_filename].digest

        # Manager.
        manager_filename = "manager%s" % source_ext
        source_filenames.append(manager_filename)
        files_to_get[manager_filename] = \
            job.managers[manager_filename].digest
        # Manager's header.
        manager_filename = "manager%s" % header_ext
        source_filenames.append(manager_filename)
        files_to_get[manager_filename] = \
            job.managers[manager_filename].digest

        for filename, digest in files_to_get.iteritems():
            sandbox.create_file_from_storage(filename, digest)

        # Get compilation command and compile.
        executable_filename = "manager"
        commands = get_compilation_commands(language,
                                            source_filenames,
                                            executable_filename)
        operation_success, compilation_success, text, plus = \
            compilation_step(sandbox, commands)

        # Retrieve the compiled executables
        job.success = operation_success
        job.compilation_success = compilation_success
        job.plus = plus
        job.text = text
        if operation_success and compilation_success:
            digest = sandbox.get_file_to_storage(
                executable_filename,
                "Executable %s for %s" %
                (executable_filename, job.info))
            job.executables[executable_filename] = \
                Executable(executable_filename, digest)

        # Cleanup
        delete_sandbox(sandbox)
示例#15
0
    def evaluate_testcase(self, test_number):
        """See TaskType.evaluate_testcase."""
        sandbox = create_sandbox(self)
        self.job.sandboxes.append(sandbox.path)

        # Immediately prepare the skeleton to return
        self.job.evaluations[test_number] = {"sandboxes": [sandbox.path], "plus": {}}
        evaluation = self.job.evaluations[test_number]
        outcome = None
        text = None

        # Since we allow partial submission, if the file is not
        # present we report that the outcome is 0.
        if "output_%03d.txt" % test_number not in self.job.files:
            evaluation["success"] = True
            evaluation["outcome"] = 0.0
            evaluation["text"] = "File not submitted."
            return True

        # First and only one step: diffing (manual or with manager).
        output_digest = self.job.files["output_%03d.txt" % test_number].digest

        # Put the files into the sandbox
        sandbox.create_file_from_storage("res.txt", self.job.testcases[test_number].output)
        sandbox.create_file_from_storage("output.txt", output_digest)

        if self.job.task_type_parameters[0] == "diff":
            # No manager: I'll do a white_diff between the submission
            # file and the correct output res.txt.
            success = True
            outcome, text = white_diff_step(sandbox, "output.txt", "res.txt")

        elif self.job.task_type_parameters[0] == "comparator":
            # Manager present: wonderful, he'll do all the job.
            manager_filename = "checker"
            if not manager_filename in self.job.managers:
                logger.error("Configuration error: missing or " "invalid comparator (it must be " "named `checker')")
                success = False
            else:
                sandbox.create_file_from_storage(
                    manager_filename, self.job.managers[manager_filename].digest, executable=True
                )
                input_digest = self.job.testcases[test_number].input
                sandbox.create_file_from_storage("input.txt", input_digest)
                success, _ = evaluation_step(
                    sandbox,
                    ["./%s" % manager_filename, "input.txt", "res.txt", "output.txt"],
                    allow_path=["input.txt", "output.txt", "res.txt"],
                )
                if success:
                    outcome, text = extract_outcome_and_text(sandbox)

        else:
            raise ValueError(
                "Unrecognized first parameter "
                "`%s' for OutputOnly tasktype. "
                "Should be `diff' or `comparator'." % self.job.task_type_parameters[0]
            )

        # Whatever happened, we conclude.
        evaluation["success"] = success
        evaluation["outcome"] = str(outcome) if outcome is not None else None
        evaluation["text"] = text
        delete_sandbox(sandbox)
        return success
示例#16
0
文件: Batch.py 项目: smadbe/cms
    def evaluate(self, job, file_cacher):
        """See TaskType.evaluate."""
        # Create the sandbox
        sandbox = create_sandbox(file_cacher, job.multithreaded_sandbox)

        # Prepare the execution
        executable_filename = job.executables.keys()[0]
        language = get_language(job.language)
        commands = language.get_evaluation_commands(
            executable_filename,
            main="grader" if self._uses_grader() else executable_filename)
        executables_to_get = {
            executable_filename:
            job.executables[executable_filename].digest
            }
        input_filename, output_filename = self.parameters[1]
        stdin_redirect = None
        stdout_redirect = None
        files_allowing_write = []
        if input_filename == "":
            input_filename = "input.txt"
            stdin_redirect = input_filename
        if output_filename == "":
            output_filename = "output.txt"
            stdout_redirect = output_filename
        else:
            files_allowing_write.append(output_filename)
        files_to_get = {
            input_filename: job.input
            }

        # Put the required files into the sandbox
        for filename, digest in executables_to_get.iteritems():
            sandbox.create_file_from_storage(filename, digest, executable=True)
        for filename, digest in files_to_get.iteritems():
            sandbox.create_file_from_storage(filename, digest)

        # Actually performs the execution
        success, plus = evaluation_step(
            sandbox,
            commands,
            job.time_limit,
            job.memory_limit,
            writable_files=files_allowing_write,
            stdin_redirect=stdin_redirect,
            stdout_redirect=stdout_redirect)

        job.sandboxes = [sandbox.path]
        job.plus = plus

        outcome = None
        text = None

        # Error in the sandbox: nothing to do!
        if not success:
            pass

        # Contestant's error: the marks won't be good
        elif not is_evaluation_passed(plus):
            outcome = 0.0
            text = human_evaluation_message(plus)
            if job.get_output:
                job.user_output = None

        # Otherwise, advance to checking the solution
        else:

            # Check that the output file was created
            if not sandbox.file_exists(output_filename):
                outcome = 0.0
                text = [N_("Evaluation didn't produce file %s"),
                        output_filename]
                if job.get_output:
                    job.user_output = None

            else:
                # If asked so, put the output file into the storage
                if job.get_output:
                    job.user_output = sandbox.get_file_to_storage(
                        output_filename,
                        "Output file in job %s" % job.info,
                        trunc_len=100 * 1024)

                # If just asked to execute, fill text and set dummy
                # outcome.
                if job.only_execution:
                    outcome = 0.0
                    text = [N_("Execution completed successfully")]

                # Otherwise evaluate the output file.
                else:

                    # Put the reference solution into the sandbox
                    sandbox.create_file_from_storage(
                        "res.txt",
                        job.output)

                    # Check the solution with white_diff
                    if self.parameters[2] == "diff":
                        outcome, text = white_diff_step(
                            sandbox, output_filename, "res.txt")

                    # Check the solution with a comparator
                    elif self.parameters[2] == "comparator":
                        manager_filename = "checker"

                        if manager_filename not in job.managers:
                            logger.error("Configuration error: missing or "
                                         "invalid comparator (it must be "
                                         "named 'checker')",
                                         extra={"operation": job.info})
                            success = False

                        else:
                            sandbox.create_file_from_storage(
                                manager_filename,
                                job.managers[manager_filename].digest,
                                executable=True)
                            # Rewrite input file. The untrusted
                            # contestant program should not be able to
                            # modify it; however, the grader may
                            # destroy the input file to prevent the
                            # contestant's program from directly
                            # accessing it. Since we cannot create
                            # files already existing in the sandbox,
                            # we try removing the file first.
                            try:
                                sandbox.remove_file(input_filename)
                            except OSError as e:
                                # Let us be extra sure that the file
                                # was actually removed and we did not
                                # mess up with permissions.
                                assert not sandbox.file_exists(input_filename)
                            sandbox.create_file_from_storage(
                                input_filename,
                                job.input)

                            # Allow using any number of processes (because e.g.
                            # one may want to write a bash checker who calls
                            # other processes). Set to a high number because
                            # to avoid fork-bombing the worker.
                            sandbox.max_processes = 1000

                            success, _ = evaluation_step(
                                sandbox,
                                [["./%s" % manager_filename,
                                  input_filename, "res.txt", output_filename]])
                        if success:
                            try:
                                outcome, text = \
                                    extract_outcome_and_text(sandbox)
                            except ValueError, e:
                                logger.error("Invalid output from "
                                             "comparator: %s", e.message,
                                             extra={"operation": job.info})
                                success = False

                    else:
                        raise ValueError("Unrecognized third parameter"
                                         " `%s' for Batch tasktype." %
                                         self.parameters[2])
示例#17
0
    def evaluate_testcase(self, test_number):
        """See TaskType.evaluate_testcase."""
        sandbox = create_sandbox(self)
        self.job.sandboxes.append(sandbox.path)

        # Immediately prepare the skeleton to return
        self.job.evaluations[test_number] = {
            'sandboxes': [sandbox.path],
            'plus': {}
        }
        evaluation = self.job.evaluations[test_number]
        outcome = None
        text = None

        # Since we allow partial submission, if the file is not
        # present we report that the outcome is 0.
        if "output_%03d.txt" % test_number not in self.job.files:
            evaluation['success'] = True
            evaluation['outcome'] = "0.0"
            evaluation['text'] = "File not submitted."
            return True

        # First and only one step: diffing (manual or with manager).
        output_digest = self.job.files["output_%03d.txt" % test_number].digest

        # Put the files into the sandbox
        sandbox.create_file_from_storage(
            "res.txt", self.job.testcases[test_number].output)
        sandbox.create_file_from_storage("output.txt", output_digest)

        if self.job.task_type_parameters[0] == "diff":
            # No manager: I'll do a white_diff between the submission
            # file and the correct output res.txt.
            success = True
            outcome, text = white_diff_step(sandbox, "output.txt", "res.txt")

        elif self.job.task_type_parameters[0] == "comparator":
            # Manager present: wonderful, he'll do all the job.
            manager_filename = "checker"
            if not manager_filename in self.job.managers:
                logger.error("Configuration error: missing or "
                             "invalid comparator (it must be "
                             "named `checker')")
                success = False
            else:
                sandbox.create_file_from_storage(
                    manager_filename,
                    self.job.managers[manager_filename].digest,
                    executable=True)
                input_digest = self.job.testcases[test_number].input
                sandbox.create_file_from_storage("input.txt", input_digest)
                success, _ = evaluation_step(sandbox, [
                    "./%s" % manager_filename, "input.txt", "res.txt",
                    "output.txt"
                ])
                if success:
                    outcome, text = extract_outcome_and_text(sandbox)

        else:
            raise ValueError("Unrecognized first parameter "
                             "`%s' for OutputOnly tasktype. "
                             "Should be `diff' or `comparator'." %
                             self.job.task_type_parameters[0])

        # Whatever happened, we conclude.
        evaluation['success'] = success
        evaluation['outcome'] = str(outcome) if outcome is not None else None
        evaluation['text'] = text
        delete_sandbox(sandbox)
        return success
示例#18
0
    def evaluate(self, job, file_cacher):
        """See TaskType.evaluate."""
        if len(self.parameters) <= 0:
            num_processes = 1
        else:
            num_processes = self.parameters[0]
        indices = range(num_processes)
        # Create sandboxes and FIFOs
        sandbox_mgr = create_sandbox(
            file_cacher,
            multithreaded=job.multithreaded_sandbox,
            name="manager_evaluate")
        sandbox_user = [
            create_sandbox(
                file_cacher,
                multithreaded=job.multithreaded_sandbox,
                name="user_evaluate")
            for i in indices]
        fifo_dir = [tempfile.mkdtemp(dir=config.temp_dir) for i in indices]
        fifo_in = [os.path.join(fifo_dir[i], "in%d" % i) for i in indices]
        fifo_out = [os.path.join(fifo_dir[i], "out%d" % i) for i in indices]
        for i in indices:
            os.mkfifo(fifo_in[i])
            os.mkfifo(fifo_out[i])
            os.chmod(fifo_dir[i], 0o755)
            os.chmod(fifo_in[i], 0o666)
            os.chmod(fifo_out[i], 0o666)

        # First step: prepare the manager.
        manager_filename = "manager"
        manager_command = ["./%s" % manager_filename]
        for i in indices:
            manager_command.append(fifo_in[i])
            manager_command.append(fifo_out[i])
        manager_executables_to_get = {
            manager_filename:
            job.managers[manager_filename].digest
            }
        manager_files_to_get = {
            "input.txt": job.input
            }
        manager_allow_dirs = fifo_dir
        for filename, digest in iteritems(manager_executables_to_get):
            sandbox_mgr.create_file_from_storage(
                filename, digest, executable=True)
        for filename, digest in iteritems(manager_files_to_get):
            sandbox_mgr.create_file_from_storage(filename, digest)

        # Second step: load the executables for the user processes
        # (done before launching the manager so that it does not
        # impact its wall clock time).
        assert len(job.executables) == 1
        executable_filename = next(iterkeys(job.executables))
        executables_to_get = {
            executable_filename:
            job.executables[executable_filename].digest
            }
        for i in indices:
            for filename, digest in iteritems(executables_to_get):
                sandbox_user[i].create_file_from_storage(
                    filename, digest, executable=True)

        # Third step: start the manager.
        manager = evaluation_step_before_run(
            sandbox_mgr,
            manager_command,
            num_processes * job.time_limit,
            0,
            allow_dirs=manager_allow_dirs,
            writable_files=["output.txt"],
            stdin_redirect="input.txt")

        # Fourth step: start the user submissions compiled with the stub.
        language = get_language(job.language)
        processes = [None for i in indices]
        for i in indices:
            args = [fifo_out[i], fifo_in[i]]
            if num_processes != 1:
                args.append(str(i))
            commands = language.get_evaluation_commands(
                executable_filename,
                main="stub",
                args=args)
            user_allow_dirs = [fifo_dir[i]]
            # Assumes that the actual execution of the user solution
            # is the last command in commands, and that the previous
            # are "setup" that doesn't need tight control.
            if len(commands) > 1:
                evaluation_step(sandbox_user[i], commands[:-1], 10, 256)
            processes[i] = evaluation_step_before_run(
                sandbox_user[i],
                commands[-1],
                job.time_limit,
                job.memory_limit,
                allow_dirs=user_allow_dirs)

        # Consume output.
        wait_without_std(processes + [manager])
        # TODO: check exit codes with translate_box_exitcode.

        user_results = [evaluation_step_after_run(s) for s in sandbox_user]
        success_user = all(r[0] for r in user_results)
        plus_user = reduce(merge_evaluation_results,
                           [r[1] for r in user_results])
        success_mgr, unused_plus_mgr = \
            evaluation_step_after_run(sandbox_mgr)

        if plus_user['exit_status'] == Sandbox.EXIT_OK and \
                plus_user["execution_time"] >= job.time_limit:
            plus_user['exit_status'] = Sandbox.EXIT_TIMEOUT

        # Merge results.
        job.sandboxes = [s.path for s in sandbox_user] + [sandbox_mgr.path]
        job.plus = plus_user

        # If at least one evaluation had problems, we report the
        # problems.
        if not success_user or not success_mgr:
            success, outcome, text = False, None, []
        # If the user sandbox detected some problem (timeout, ...),
        # the outcome is 0.0 and the text describes that problem.
        elif not is_evaluation_passed(plus_user):
            success = True
            outcome, text = 0.0, human_evaluation_message(plus_user)
        # Otherwise, we use the manager to obtain the outcome.
        else:
            success = True
            outcome, text = extract_outcome_and_text(sandbox_mgr)

        # If asked so, save the output file, provided that it exists
        if job.get_output:
            if sandbox_mgr.file_exists("output.txt"):
                job.user_output = sandbox_mgr.get_file_to_storage(
                    "output.txt",
                    "Output file in job %s" % job.info)
            else:
                job.user_output = None

        # Whatever happened, we conclude.
        job.success = success
        job.outcome = "%s" % outcome if outcome is not None else None
        job.text = text

        delete_sandbox(sandbox_mgr, job.success)
        for s in sandbox_user:
            delete_sandbox(s, job.success)
        if not config.keep_sandbox:
            for d in fifo_dir:
                rmtree(d)
示例#19
0
    def compile(self):
        """See TaskType.compile."""
        # Detect the submission's language. The checks about the
        # formal correctedness of the submission are done in CWS,
        # before accepting it.
        language = self.submission.language
        header = HEADERS_MAP[language]

        # TODO: here we are sure that submission.files are the same as
        # task.submission_format. The following check shouldn't be
        # here, but in the definition of the task, since this actually
        # checks that task's task type and submission format agree.
        if len(self.submission.files) != 2:
            return self.finish_compilation(
                True, False, "Invalid files in submission",
                to_log="Submission contains %d files, expecting 2" %
                len(self.submission.files))

        # First and only one compilation.
        sandbox = create_sandbox(self)
        files_to_get = {}

        # User's submissions and headers.
        source_filenames = []
        for filename, _file in self.submission.files.iteritems():
            source_filename = filename.replace("%l", language)
            source_filenames.append(source_filename)
            files_to_get[source_filename] = _file.digest
            # Headers.
            header_filename = filename.replace("%l", header)
            source_filenames.append(header_filename)
            files_to_get[header_filename] = \
                self.submission.task.managers[header_filename].digest

        # Manager.
        manager_filename = "manager.%s" % language
        source_filenames.append(manager_filename)
        files_to_get[manager_filename] = \
                self.submission.task.managers[manager_filename].digest
        # Manager's header.
        manager_filename = "manager.%s" % header
        source_filenames.append(manager_filename)
        files_to_get[manager_filename] = \
                self.submission.task.managers[manager_filename].digest

        # Get compilation command and compile.
        executable_filename = "manager"
        command = get_compilation_command(language,
                                          source_filenames,
                                          executable_filename)
        operation_success, compilation_success, text, _ = \
            self.compilation_step(
            sandbox,
            command,
            files_to_get,
            {executable_filename: "Executable %s for submission %s" %
             (executable_filename, self.submission.id)})
        delete_sandbox(sandbox)

        # We had only one compilation, hence we pipe directly its
        # result to the finalization.
        return self.finish_compilation(operation_success, compilation_success,
                                       text)
示例#20
0
    def evaluate(self, job, file_cacher):
        """See TaskType.evaluate."""
        # Create sandboxes and FIFOs
        sandbox_mgr = create_sandbox(file_cacher)
        sandbox_user = create_sandbox(file_cacher)
        fifo_dir = tempfile.mkdtemp(dir=config.temp_dir)
        fifo_in = os.path.join(fifo_dir, "in")
        fifo_out = os.path.join(fifo_dir, "out")
        os.mkfifo(fifo_in)
        os.mkfifo(fifo_out)
        os.chmod(fifo_dir, 0o755)
        os.chmod(fifo_in, 0o666)
        os.chmod(fifo_out, 0o666)

        # First step: we start the manager.
        manager_filename = "manager"
        manager_command = ["./%s" % manager_filename, fifo_in, fifo_out]
        manager_executables_to_get = {
            manager_filename: job.managers[manager_filename].digest
        }
        manager_files_to_get = {"input.txt": job.input}
        manager_allow_dirs = [fifo_dir]
        for filename, digest in manager_executables_to_get.iteritems():
            sandbox_mgr.create_file_from_storage(filename,
                                                 digest,
                                                 executable=True)
        for filename, digest in manager_files_to_get.iteritems():
            sandbox_mgr.create_file_from_storage(filename, digest)
        manager = evaluation_step_before_run(sandbox_mgr,
                                             manager_command,
                                             job.time_limit,
                                             0,
                                             allow_dirs=manager_allow_dirs,
                                             writable_files=["output.txt"],
                                             stdin_redirect="input.txt")

        # Second step: we start the user submission compiled with the
        # stub.
        executable_filename = job.executables.keys()[0]
        command = ["./%s" % executable_filename, fifo_out, fifo_in]
        executables_to_get = {
            executable_filename: job.executables[executable_filename].digest
        }
        user_allow_dirs = [fifo_dir]
        for filename, digest in executables_to_get.iteritems():
            sandbox_user.create_file_from_storage(filename,
                                                  digest,
                                                  executable=True)
        process = evaluation_step_before_run(sandbox_user,
                                             command,
                                             job.time_limit,
                                             job.memory_limit,
                                             allow_dirs=user_allow_dirs)

        # Consume output.
        wait_without_std([process, manager])
        # TODO: check exit codes with translate_box_exitcode.

        success_user, plus_user = \
            evaluation_step_after_run(sandbox_user)
        success_mgr, unused_plus_mgr = \
            evaluation_step_after_run(sandbox_mgr)

        job.sandboxes = [sandbox_user.path, sandbox_mgr.path]
        job.plus = plus_user

        # If at least one evaluation had problems, we report the
        # problems.
        if not success_user or not success_mgr:
            success, outcome, text = False, None, None
        # If the user sandbox detected some problem (timeout, ...),
        # the outcome is 0.0 and the text describes that problem.
        elif not is_evaluation_passed(plus_user):
            success = True
            outcome, text = 0.0, human_evaluation_message(plus_user)
        # Otherwise, we use the manager to obtain the outcome.
        else:
            success = True
            outcome, text = extract_outcome_and_text(sandbox_mgr)

        # If asked so, save the output file, provided that it exists
        if job.get_output:
            if sandbox_mgr.file_exists("output.txt"):
                job.user_output = sandbox_mgr.get_file_to_storage(
                    "output.txt", "Output file in job %s" % job.info)
            else:
                job.user_output = None

        # Whatever happened, we conclude.
        job.success = success
        job.outcome = "%s" % outcome if outcome is not None else None
        job.text = text

        delete_sandbox(sandbox_mgr)
        delete_sandbox(sandbox_user)
        if not config.keep_sandbox:
            rmtree(fifo_dir)
示例#21
0
    def evaluate(self, job, file_cacher):
        """See TaskType.evaluate."""
        # Create the sandbox
        sandbox = create_sandbox(file_cacher, job.multithreaded_sandbox)

        # Prepare the execution
        assert len(job.executables) == 1
        executable_filename = next(iterkeys(job.executables))
        language = get_language(job.language)
        commands = language.get_evaluation_commands(
            executable_filename,
            main="grader" if self._uses_grader() else executable_filename)
        executables_to_get = {
            executable_filename: job.executables[executable_filename].digest
        }
        input_filename, output_filename = self.parameters[1]
        stdin_redirect = None
        stdout_redirect = None
        files_allowing_write = []
        if len(input_filename) == 0:
            input_filename = "input.txt"
            stdin_redirect = input_filename
        if len(output_filename) == 0:
            output_filename = "output.txt"
            stdout_redirect = output_filename
        else:
            files_allowing_write.append(output_filename)
        files_to_get = {input_filename: job.input}

        # Put the required files into the sandbox
        for filename, digest in iteritems(executables_to_get):
            sandbox.create_file_from_storage(filename, digest, executable=True)
        for filename, digest in iteritems(files_to_get):
            sandbox.create_file_from_storage(filename, digest)

        # Actually performs the execution
        success, plus = evaluation_step(sandbox,
                                        commands,
                                        job.time_limit,
                                        job.memory_limit,
                                        writable_files=files_allowing_write,
                                        stdin_redirect=stdin_redirect,
                                        stdout_redirect=stdout_redirect)

        job.sandboxes = [sandbox.path]
        job.plus = plus

        outcome = None
        text = []

        # Error in the sandbox: nothing to do!
        if not success:
            pass

        # Contestant's error: the marks won't be good
        elif not is_evaluation_passed(plus):
            outcome = 0.0
            text = human_evaluation_message(plus)
            if job.get_output:
                job.user_output = None

        # Otherwise, advance to checking the solution
        else:

            # Check that the output file was created
            if not sandbox.file_exists(output_filename):
                outcome = 0.0
                text = [
                    N_("Evaluation didn't produce file %s"), output_filename
                ]
                if job.get_output:
                    job.user_output = None

            else:
                # If asked so, put the output file into the storage
                if job.get_output:
                    job.user_output = sandbox.get_file_to_storage(
                        output_filename,
                        "Output file in job %s" % job.info,
                        trunc_len=100 * 1024)

                # If just asked to execute, fill text and set dummy
                # outcome.
                if job.only_execution:
                    outcome = 0.0
                    text = [N_("Execution completed successfully")]

                # Otherwise evaluate the output file.
                else:

                    # Put the reference solution into the sandbox
                    sandbox.create_file_from_storage("res.txt", job.output)

                    # Check the solution with white_diff
                    if self.parameters[2] == "diff":
                        outcome, text = white_diff_step(
                            sandbox, output_filename, "res.txt")

                    # Check the solution with a comparator
                    elif self.parameters[2] == "comparator":
                        manager_filename = "checker"

                        if manager_filename not in job.managers:
                            logger.error(
                                "Configuration error: missing or "
                                "invalid comparator (it must be "
                                "named 'checker')",
                                extra={"operation": job.info})
                            success = False

                        else:
                            sandbox.create_file_from_storage(
                                manager_filename,
                                job.managers[manager_filename].digest,
                                executable=True)
                            # Rewrite input file. The untrusted
                            # contestant program should not be able to
                            # modify it; however, the grader may
                            # destroy the input file to prevent the
                            # contestant's program from directly
                            # accessing it. Since we cannot create
                            # files already existing in the sandbox,
                            # we try removing the file first.
                            try:
                                sandbox.remove_file(input_filename)
                            except OSError as e:
                                # Let us be extra sure that the file
                                # was actually removed and we did not
                                # mess up with permissions.
                                assert not sandbox.file_exists(input_filename)
                            sandbox.create_file_from_storage(
                                input_filename, job.input)

                            # Allow using any number of processes (because e.g.
                            # one may want to write a bash checker who calls
                            # other processes). Set to a high number because
                            # to avoid fork-bombing the worker.
                            sandbox.max_processes = 1000

                            success, _ = evaluation_step(
                                sandbox, [[
                                    "./%s" % manager_filename, input_filename,
                                    "res.txt", output_filename
                                ]])
                        if success:
                            try:
                                outcome, text = \
                                    extract_outcome_and_text(sandbox)
                            except ValueError as e:
                                logger.error(
                                    "Invalid output from "
                                    "comparator: %s",
                                    e.message,
                                    extra={"operation": job.info})
                                success = False

                    else:
                        raise ValueError("Unrecognized third parameter"
                                         " `%s' for Batch tasktype." %
                                         self.parameters[2])

        # Whatever happened, we conclude.
        job.success = success
        job.outcome = "%s" % outcome if outcome is not None else None
        job.text = text

        delete_sandbox(sandbox, job.success)
示例#22
0
文件: Batch.py 项目: sekouperry/cms
    def evaluate_testcase(self, test_number):
        """See TaskType.evaluate_testcase."""
        # Create the sandbox
        sandbox = create_sandbox(self)

        # Prepare the execution
        executable_filename = self.job.executables.keys()[0]
        command = [os.path.join(".", executable_filename)]
        executables_to_get = {
            executable_filename:
            self.job.executables[executable_filename].digest
        }
        input_filename, output_filename = self.job.task_type_parameters[1]
        stdin_redirect = None
        stdout_redirect = None
        if input_filename == "":
            input_filename = "input.txt"
            stdin_redirect = input_filename
        if output_filename == "":
            output_filename = "output.txt"
            stdout_redirect = output_filename
        files_to_get = {input_filename: self.job.testcases[test_number].input}

        # Put the required files into the sandbox
        for filename, digest in executables_to_get.iteritems():
            sandbox.create_file_from_storage(filename, digest, executable=True)
        for filename, digest in files_to_get.iteritems():
            sandbox.create_file_from_storage(filename, digest)

        # Actually performs the execution
        success, plus = evaluation_step(sandbox,
                                        command,
                                        self.job.time_limit,
                                        self.job.memory_limit,
                                        stdin_redirect=stdin_redirect,
                                        stdout_redirect=stdout_redirect)

        self.job.evaluations[test_number] = {
            'sandboxes': [sandbox.path],
            'plus': plus
        }
        outcome = None
        text = None
        evaluation = self.job.evaluations[test_number]

        # Error in the sandbox: nothing to do!
        if not success:
            pass

        # Contestant's error: the marks won't be good
        elif not is_evaluation_passed(plus):
            outcome = 0.0
            text = human_evaluation_message(plus)
            if self.job.get_output:
                evaluation['output'] = None

        # Otherwise, advance to checking the solution
        else:

            # Check that the output file was created
            if not sandbox.file_exists(output_filename):
                outcome = 0.0
                text = "Execution didn't produce file %s" % \
                    (output_filename)
                if self.job.get_output:
                    evaluation['output'] = None

            else:
                # If asked so, put the output file into the storage
                if self.job.get_output:
                    evaluation['output'] = sandbox.get_file_to_storage(
                        output_filename,
                        "Output file for testcase %d in job %s" %
                        (test_number, self.job.info),
                        trunc_len=100 * 1024)

                # If not asked otherwise, evaluate the output file
                if not self.job.only_execution:

                    # Put the reference solution into the sandbox
                    sandbox.create_file_from_storage(
                        "res.txt", self.job.testcases[test_number].output)

                    # Check the solution with white_diff
                    if self.job.task_type_parameters[2] == "diff":
                        outcome, text = white_diff_step(
                            sandbox, output_filename, "res.txt")

                    # Check the solution with a comparator
                    elif self.job.task_type_parameters[2] == "comparator":
                        manager_filename = "checker"

                        if not manager_filename in self.job.managers:
                            logger.error("Configuration error: missing or "
                                         "invalid comparator (it must be "
                                         "named 'checker')")
                            success = False

                        else:
                            sandbox.create_file_from_storage(
                                manager_filename,
                                self.job.managers[manager_filename].digest,
                                executable=True)
                            success, _ = evaluation_step(
                                sandbox, [
                                    "./%s" % manager_filename, input_filename,
                                    "res.txt", output_filename
                                ])
                        if success:
                            try:
                                outcome, text = \
                                    extract_outcome_and_text(sandbox)
                            except ValueError, e:
                                logger.error("Invalid output from "
                                             "comparator: %s" % (e.message, ))
                                success = False

                    else:
                        raise ValueError("Unrecognized third parameter"
                                         " `%s' for Batch tasktype." %
                                         self.job.task_type_parameters[2])
示例#23
0
    def compile(self, job, file_cacher):
        """See TaskType.compile."""
        # Detect the submission's language. The checks about the
        # formal correctedness of the submission are done in CWS,
        # before accepting it.
        language = job.language
        source_ext = LANGUAGE_TO_SOURCE_EXT_MAP[language]

        # Create the sandbox
        sandbox = create_sandbox(file_cacher)
        job.sandboxes.append(sandbox.path)

        # Prepare the source files in the sandbox
        files_to_get = {}
        source_filenames = []
        # Stub.
        stub_filename = "stub%s" % source_ext
        source_filenames.append(stub_filename)
        files_to_get[stub_filename] = job.managers[stub_filename].digest
        # User's submission.
        for filename, fileinfo in job.files.iteritems():
            source_filename = filename.replace(".%l", source_ext)
            source_filenames.append(source_filename)
            files_to_get[source_filename] = fileinfo.digest

        # Also copy all managers that might be useful during compilation.
        for filename in job.managers.iterkeys():
            if any(filename.endswith(header)
                   for header in LANGUAGE_TO_HEADER_EXT_MAP.itervalues()):
                files_to_get[filename] = \
                    job.managers[filename].digest
            elif any(filename.endswith(source)
                     for source in LANGUAGE_TO_SOURCE_EXT_MAP.itervalues()):
                files_to_get[filename] = \
                    job.managers[filename].digest
            elif any(filename.endswith(obj)
                     for obj in LANGUAGE_TO_OBJ_EXT_MAP.itervalues()):
                files_to_get[filename] = \
                    job.managers[filename].digest

        for filename, digest in files_to_get.iteritems():
            sandbox.create_file_from_storage(filename, digest)

        # Prepare the compilation command
        executable_filename = \
            "_".join(pattern.replace(".%l", "")
                     for pattern in job.files.keys())
        commands = get_compilation_commands(language,
                                            source_filenames,
                                            executable_filename)

        # Run the compilation
        operation_success, compilation_success, text, plus = \
            compilation_step(sandbox, commands)

        # Retrieve the compiled executables
        job.success = operation_success
        job.compilation_success = compilation_success
        job.plus = plus
        job.text = text
        if operation_success and compilation_success:
            digest = sandbox.get_file_to_storage(
                executable_filename,
                "Executable %s for %s" %
                (executable_filename, job.info))
            job.executables[executable_filename] = \
                Executable(executable_filename, digest)

        # Cleanup
        delete_sandbox(sandbox)
示例#24
0
    def compile(self, job, file_cacher):
        """See TaskType.compile."""
        language = get_language(job.language)
        source_ext = language.source_extension
        header_ext = language.header_extension

        if not check_files_number(job, 2):
            return

        files_to_get = {}
        source_filenames = []

        # Manager.
        manager_filename = "manager%s" % source_ext
        if not check_manager_present(job, manager_filename):
            return
        source_filenames.append(manager_filename)
        files_to_get[manager_filename] = \
            job.managers[manager_filename].digest
        # Manager's header.
        if header_ext is not None:
            manager_filename = "manager%s" % header_ext
            if not check_manager_present(job, manager_filename):
                return
            source_filenames.append(manager_filename)
            files_to_get[manager_filename] = \
                job.managers[manager_filename].digest

        # User's submissions and headers.
        for filename, file_ in iteritems(job.files):
            source_filename = filename.replace(".%l", source_ext)
            source_filenames.append(source_filename)
            files_to_get[source_filename] = file_.digest
            # Headers (fixing compile error again here).
            if header_ext is not None:
                header_filename = filename.replace(".%l", header_ext)
                if not check_manager_present(job, header_filename):
                    return
                source_filenames.append(header_filename)
                files_to_get[header_filename] = \
                    job.managers[header_filename].digest

        # Get compilation command.
        executable_filename = "manager"
        commands = language.get_compilation_commands(
            source_filenames, executable_filename)

        # Create the sandbox and put the required files in it.
        sandbox = create_sandbox(file_cacher, name="compile")
        job.sandboxes.append(sandbox.path)

        for filename, digest in iteritems(files_to_get):
            sandbox.create_file_from_storage(filename, digest)

        # Run the compilation.
        box_success, compilation_success, text, stats = \
            compilation_step(sandbox, commands)

        # Retrieve the compiled executables
        job.success = box_success
        job.compilation_success = compilation_success
        job.text = text
        job.plus = stats
        if box_success and compilation_success:
            digest = sandbox.get_file_to_storage(
                executable_filename,
                "Executable %s for %s" %
                (executable_filename, job.info))
            job.executables[executable_filename] = \
                Executable(executable_filename, digest)

        # Cleanup
        delete_sandbox(sandbox, job.success)
示例#25
0
    def compile(self, job, file_cacher):
        """See TaskType.compile."""
        # Detect the submission's language. The checks about the
        # formal correctedness of the submission are done in CWS,
        # before accepting it.
        language = get_language(job.language)
        source_ext = language.source_extension

        # TODO: here we are sure that submission.files are the same as
        # task.submission_format. The following check shouldn't be
        # here, but in the definition of the task, since this actually
        # checks that task's task type and submission format agree.
        if len(job.files) != 1:
            job.success = True
            job.compilation_success = False
            job.text = [N_("Invalid files in submission")]
            job.plus = {}
            logger.error("Submission contains %d files, expecting 1",
                         len(job.files), extra={"operation": job.info})
            return True

        # Create the sandbox
        sandbox = create_sandbox(file_cacher, job.multithreaded_sandbox)
        job.sandboxes.append(sandbox.path)

        # Prepare the source files in the sandbox
        files_to_get = {}
        format_filename = job.files.keys()[0]
        source_filenames = []
        source_filenames.append(format_filename.replace(".%l", source_ext))
        files_to_get[source_filenames[0]] = \
            job.files[format_filename].digest
        # If a grader is specified, we add to the command line (and to
        # the files to get) the corresponding manager. The grader must
        # be the first file in source_filenames.
        compile_command = []
        if self._uses_grader():
            files_to_get["grader%s" % source_ext] = \
                job.managers["grader%s" % source_ext].digest
            # For solutions using C or C++,
            # we first compile the grader source
            # file and then delete it from sandbox,
            # to prevent the user's solution
            # files from including it.
            try:
                compile_command = language.get_compilation_no_link_command(
                    ["grader%s" % source_ext])
                compile_command += [["/bin/rm", "grader%s" % source_ext]]
            except NotImplementedError:
                compile_command = []
                source_filenames.insert(0, "grader%s" % source_ext)
            else:
                source_filenames.insert(0, "grader%s" %
                                        language.object_extension)

        # Also copy all managers that might be useful during compilation.
        for filename in job.managers.iterkeys():
            if any(filename.endswith(header) for header in
                   language.header_extensions):
                files_to_get[filename] = \
                    job.managers[filename].digest
            elif any(filename.endswith(source) for source in
                     language.source_extensions):
                files_to_get[filename] = \
                    job.managers[filename].digest
            elif any(filename.endswith(obj) for obj in
                     language.object_extensions):
                files_to_get[filename] = \
                    job.managers[filename].digest

        for filename, digest in files_to_get.iteritems():
            sandbox.create_file_from_storage(filename, digest)

        # Prepare the compilation command
        executable_filename = format_filename.replace(".%l", "")
        commands = compile_command + language.get_compilation_commands(
            source_filenames, executable_filename)

        # Run the compilation
        operation_success, compilation_success, text, plus = \
            compilation_step(sandbox, commands)

        # Retrieve the compiled executables
        job.success = operation_success
        job.compilation_success = compilation_success
        job.plus = plus
        job.text = text
        if operation_success and compilation_success:
            digest = sandbox.get_file_to_storage(
                executable_filename,
                "Executable %s for %s" %
                (executable_filename, job.info))
            job.executables[executable_filename] = \
                Executable(executable_filename, digest)

        # Cleanup
        delete_sandbox(sandbox, job.success)
示例#26
0
    def evaluate(self, job, file_cacher):
        """See TaskType.evaluate."""
        if not check_executables_number(job, 1):
            return

        executable_filename = next(iterkeys(job.executables))
        executable_digest = job.executables[executable_filename].digest

        first_sandbox = create_sandbox(file_cacher, name="first_evaluate")
        second_sandbox = create_sandbox(file_cacher, name="second_evaluate")
        job.sandboxes.append(first_sandbox.path)
        job.sandboxes.append(second_sandbox.path)

        fifo_dir = tempfile.mkdtemp(dir=config.temp_dir)
        fifo = os.path.join(fifo_dir, "fifo")
        os.mkfifo(fifo)
        os.chmod(fifo_dir, 0o755)
        os.chmod(fifo, 0o666)

        # First step: we start the first manager.
        first_command = ["./%s" % executable_filename, "0", fifo]
        first_executables_to_get = {executable_filename: executable_digest}
        first_files_to_get = {
            TwoSteps.INPUT_FILENAME: job.input
            }
        first_allow_path = [fifo_dir]

        # Put the required files into the sandbox
        for filename, digest in iteritems(first_executables_to_get):
            first_sandbox.create_file_from_storage(filename,
                                                   digest,
                                                   executable=True)
        for filename, digest in iteritems(first_files_to_get):
            first_sandbox.create_file_from_storage(filename, digest)

        first = evaluation_step_before_run(
            first_sandbox,
            first_command,
            job.time_limit,
            job.memory_limit,
            first_allow_path,
            stdin_redirect=TwoSteps.INPUT_FILENAME,
            multiprocess=job.multithreaded_sandbox,
            wait=False)

        # Second step: we start the second manager.
        second_command = ["./%s" % executable_filename, "1", fifo]
        second_executables_to_get = {executable_filename: executable_digest}
        second_files_to_get = {}
        second_allow_path = [fifo_dir]

        # Put the required files into the second sandbox
        for filename, digest in iteritems(second_executables_to_get):
            second_sandbox.create_file_from_storage(filename,
                                                    digest,
                                                    executable=True)
        for filename, digest in iteritems(second_files_to_get):
            second_sandbox.create_file_from_storage(filename, digest)

        second = evaluation_step_before_run(
            second_sandbox,
            second_command,
            job.time_limit,
            job.memory_limit,
            second_allow_path,
            stdout_redirect=TwoSteps.OUTPUT_FILENAME,
            multiprocess=job.multithreaded_sandbox,
            wait=False)

        # Consume output.
        wait_without_std([second, first])

        box_success_first, evaluation_success_first, first_stats = \
            evaluation_step_after_run(first_sandbox)
        box_success_second, evaluation_success_second, second_stats = \
            evaluation_step_after_run(second_sandbox)

        box_success = box_success_first and box_success_second
        evaluation_success = \
            evaluation_success_first and evaluation_success_second
        stats = merge_execution_stats(first_stats, second_stats)

        outcome = None
        text = None

        # Error in the sandbox: nothing to do!
        if not box_success:
            pass

        # Contestant's error: the marks won't be good
        elif not evaluation_success:
            outcome = 0.0
            text = human_evaluation_message(stats)
            if job.get_output:
                job.user_output = None

        # Otherwise, advance to checking the solution
        else:

            # Check that the output file was created
            if not second_sandbox.file_exists(TwoSteps.OUTPUT_FILENAME):
                outcome = 0.0
                text = [N_("Evaluation didn't produce file %s"),
                        TwoSteps.OUTPUT_FILENAME]
                if job.get_output:
                    job.user_output = None

            else:
                # If asked so, put the output file into the storage
                if job.get_output:
                    job.user_output = second_sandbox.get_file_to_storage(
                        TwoSteps.OUTPUT_FILENAME,
                        "Output file in job %s" % job.info,
                        trunc_len=100 * 1024)

                # If just asked to execute, fill text and set dummy outcome.
                if job.only_execution:
                    outcome = 0.0
                    text = [N_("Execution completed successfully")]

                # Otherwise evaluate the output file.
                else:
                    box_success, outcome, text = eval_output(
                        file_cacher, job,
                        TwoSteps.CHECKER_CODENAME
                        if self._uses_checker() else None,
                        user_output_path=second_sandbox.relative_path(
                            TwoSteps.OUTPUT_FILENAME))

        # Fill in the job with the results.
        job.success = box_success
        job.outcome = str(outcome) if outcome is not None else None
        job.text = text
        job.plus = stats

        delete_sandbox(first_sandbox, job.success)
        delete_sandbox(second_sandbox, job.success)
示例#27
0
    def evaluate_testcase(self, test_number):
        """See TaskType.evaluate_testcase."""
        # f stand for first, s for second.
        first_sandbox = create_sandbox(self)
        second_sandbox = create_sandbox(self)
        fifo_dir = tempfile.mkdtemp(dir=config.temp_dir)
        fifo = os.path.join(fifo_dir, "fifo")
        os.mkfifo(fifo)

        # First step: we start the first manager.
        first_filename = "manager"
        first_command = ["./%s" % first_filename, "0", fifo]
        first_executables_to_get = {
            first_filename:
            self.submission.executables[first_filename].digest
            }
        first_files_to_get = {
            "input.txt": self.submission.task.testcases[test_number].input
            }
        first_allow_path = ["input.txt", fifo]
        first = self.evaluation_step_before_run(
            first_sandbox,
            first_command,
            first_executables_to_get,
            first_files_to_get,
            self.submission.task.time_limit,
            self.submission.task.memory_limit,
            first_allow_path,
            stdin_redirect="input.txt")

        # Second step: we start the second manager.
        second_filename = "manager"
        second_command = ["./%s" % second_filename, "1", fifo]
        second_executables_to_get = {
            second_filename:
            self.submission.executables[second_filename].digest
            }
        second_files_to_get = {}
        second_allow_path = [fifo, "output.txt"]
        second = self.evaluation_step_before_run(
            second_sandbox,
            second_command,
            second_executables_to_get,
            second_files_to_get,
            self.submission.task.time_limit,
            self.submission.task.memory_limit,
            second_allow_path)

        # Consume output.
        wait_without_std([second, first])
        # TODO: check exit codes with translate_box_exitcode.

        success_first, outcome_first, text_first, _ = \
            self.evaluation_step_after_run(first_sandbox, final=False)
        success_second, outcome_second, text_second, plus = \
            self.evaluation_step_after_run(second_sandbox, final=True)

        # If at least one evaluation had problems, we report the
        # problems.
        if not success_first:
            success, outcome, text = False, outcome_first, text_first
        elif not success_second:
            success, outcome, text = False, outcome_second, text_second
        # Otherwise, compare the second output to the results
        # (there is no comparator since the manager is expected to turn the
        # output in diffable form)
        else:
            success, outcome, text = self.white_diff_step(
                second_sandbox,
                "output.txt", "res.txt",
                {"res.txt":
                 self.submission.task.testcases[test_number].output})

        delete_sandbox(first_sandbox)
        delete_sandbox(second_sandbox)
        return self.finish_evaluation_testcase(
            test_number, success, outcome, text, plus)
示例#28
0
文件: Batch.py 项目: akmohtashami/cms
    def evaluate(self, job, file_cacher):
        """See TaskType.evaluate."""
        if len(job.executables) != 1:
            raise ValueError("Unexpected number of executables (%s)" %
                             len(job.executables))

        # Create the sandbox
        sandbox = create_sandbox(
            file_cacher,
            multithreaded=job.multithreaded_sandbox,
            name="evaluate")

        # Prepare the execution
        executable_filename = next(iterkeys(job.executables))
        language = get_language(job.language)
        main = Batch.GRADER_BASENAME \
            if self._uses_grader() else executable_filename
        commands = language.get_evaluation_commands(
            executable_filename, main=main)
        executables_to_get = {
            executable_filename:
            job.executables[executable_filename].digest
        }
        stdin_redirect = None
        stdout_redirect = None
        files_allowing_write = []
        if len(self.input_filename) == 0:
            self.input_filename = Batch.DEFAULT_INPUT_FILENAME
            stdin_redirect = self.input_filename
        if len(self.output_filename) == 0:
            self.output_filename = Batch.DEFAULT_OUTPUT_FILENAME
            stdout_redirect = self.output_filename
        else:
            files_allowing_write.append(self.output_filename)
        files_to_get = {
            self.input_filename: job.input
        }

        # Put the required files into the sandbox
        for filename, digest in iteritems(executables_to_get):
            sandbox.create_file_from_storage(filename, digest, executable=True)
        for filename, digest in iteritems(files_to_get):
            sandbox.create_file_from_storage(filename, digest)

        # Actually performs the execution
        success, plus = evaluation_step(
            sandbox,
            commands,
            job.time_limit,
            job.memory_limit,
            writable_files=files_allowing_write,
            stdin_redirect=stdin_redirect,
            stdout_redirect=stdout_redirect)

        job.sandboxes = [sandbox.path]
        job.plus = plus

        outcome = None
        text = []

        # Error in the sandbox: nothing to do!
        if not success:
            pass

        # Contestant's error: the marks won't be good
        elif not is_evaluation_passed(plus):
            outcome = 0.0
            text = human_evaluation_message(plus)
            if job.get_output:
                job.user_output = None

        # Otherwise, advance to checking the solution
        else:

            # Check that the output file was created
            if not sandbox.file_exists(self.output_filename):
                outcome = 0.0
                text = [N_("Evaluation didn't produce file %s"),
                        self.output_filename]
                if job.get_output:
                    job.user_output = None

            else:
                # If asked so, put the output file into the storage.
                if job.get_output:
                    job.user_output = sandbox.get_file_to_storage(
                        self.output_filename,
                        "Output file in job %s" % job.info,
                        trunc_len=100 * 1024)

                # If just asked to execute, fill text and set dummy outcome.
                if job.only_execution:
                    outcome = 0.0
                    text = [N_("Execution completed successfully")]

                # Otherwise evaluate the output file.
                else:

                    # Create a brand-new sandbox just for checking. Only admin
                    # code runs in it, so we allow multithreading and many
                    # processes (still with a limit to avoid fork-bombs).
                    checkbox = create_sandbox(
                        file_cacher,
                        multithreaded=True,
                        name="check")
                    checkbox.max_processes = 1000

                    checker_success, outcome, text = self._eval_output(
                        checkbox, job, sandbox.get_root_path())
                    success = success and checker_success

        # Whatever happened, we conclude.
        job.success = success
        job.outcome = "%s" % outcome if outcome is not None else None
        job.text = text

        delete_sandbox(sandbox, job.success)
示例#29
0
    def evaluate_testcase(self, test_number):
        """See TaskType.evaluate_testcase."""
        # f stand for first, s for second.
        first_sandbox = create_sandbox(self)
        second_sandbox = create_sandbox(self)
        fifo_dir = tempfile.mkdtemp(dir=config.temp_dir)
        fifo = os.path.join(fifo_dir, "fifo")
        os.mkfifo(fifo)

        # First step: we start the first manager.
        first_filename = "manager"
        first_command = ["./%s" % first_filename, "0", fifo]
        first_executables_to_get = {
            first_filename: self.job.executables[first_filename].digest
        }
        first_files_to_get = {
            "input.txt": self.job.testcases[test_number].input
        }
        first_allow_path = ["input.txt", fifo]

        # Put the required files into the sandbox
        for filename, digest in first_executables_to_get.iteritems():
            first_sandbox.create_file_from_storage(filename,
                                                   digest,
                                                   executable=True)
        for filename, digest in first_files_to_get.iteritems():
            first_sandbox.create_file_from_storage(filename, digest)

        first = evaluation_step_before_run(first_sandbox,
                                           first_command,
                                           self.job.time_limit,
                                           self.job.memory_limit,
                                           first_allow_path,
                                           stdin_redirect="input.txt",
                                           wait=False)

        # Second step: we start the second manager.
        second_filename = "manager"
        second_command = ["./%s" % second_filename, "1", fifo]
        second_executables_to_get = {
            second_filename: self.job.executables[second_filename].digest
        }
        second_files_to_get = {}
        second_allow_path = [fifo, "output.txt"]

        # Put the required files into the second sandbox
        for filename, digest in second_executables_to_get.iteritems():
            second_sandbox.create_file_from_storage(filename,
                                                    digest,
                                                    executable=True)
        for filename, digest in second_files_to_get.iteritems():
            second_sandbox.create_file_from_storage(filename, digest)

        second = evaluation_step_before_run(second_sandbox,
                                            second_command,
                                            self.job.time_limit,
                                            self.job.memory_limit,
                                            second_allow_path,
                                            stdout_redirect="output.txt",
                                            wait=False)

        # Consume output.
        wait_without_std([second, first])
        # TODO: check exit codes with translate_box_exitcode.

        success_first, first_plus = \
            evaluation_step_after_run(first_sandbox)
        success_second, second_plus = \
            evaluation_step_after_run(second_sandbox)

        self.job.evaluations[test_number] = {
            'sandboxes': [first_sandbox.path, second_sandbox.path],
            'plus': second_plus
        }
        outcome = None
        text = None
        evaluation = self.job.evaluations[test_number]
        success = True

        # Error in the sandbox: report failure!
        if not success_first or not success_second:
            success = False

        # Contestant's error: the marks won't be good
        elif not is_evaluation_passed(first_plus) or \
                not is_evaluation_passed(second_plus):
            outcome = 0.0
            if not is_evaluation_passed(first_plus):
                text = human_evaluation_message(first_plus)
            else:
                text = human_evaluation_message(second_plus)
            if self.job.get_output:
                evaluation['output'] = None

        # Otherwise, advance to checking the solution
        else:

            # Check that the output file was created
            if not second_sandbox.file_exists('output.txt'):
                outcome = 0.0
                text = "Execution didn't produce file output.txt"
                if self.job.get_output:
                    evaluation['output'] = None

            else:
                # If asked so, put the output file into the storage
                if self.job.get_output:
                    evaluation['output'] = second_sandbox.get_file_to_storage(
                        "output.txt", "Output file for testcase %d in job %s" %
                        (test_number, self.job.info))

                # If not asked otherwise, evaluate the output file
                if not self.job.only_execution:
                    # Put the reference solution into the sandbox
                    second_sandbox.create_file_from_storage(
                        "res.txt", self.job.testcases[test_number].output)

                    outcome, text = white_diff_step(second_sandbox,
                                                    "output.txt", "res.txt")

        # Whatever happened, we conclude.
        evaluation['success'] = success
        evaluation['outcome'] = outcome
        evaluation['text'] = text

        delete_sandbox(first_sandbox)
        delete_sandbox(second_sandbox)

        return success
示例#30
0
    def evaluate(self, job, file_cacher):
        """See TaskType.evaluate."""
        if not check_executables_number(job, 1):
            return

        # Prepare the execution
        executable_filename = next(iterkeys(job.executables))
        language = get_language(job.language)
        main = Batch.GRADER_BASENAME \
            if self._uses_grader() else executable_filename
        commands = language.get_evaluation_commands(executable_filename,
                                                    main=main)
        executables_to_get = {
            executable_filename: job.executables[executable_filename].digest
        }
        files_to_get = {self._actual_input: job.input}

        # Check which redirect we need to perform, and in case we don't
        # manage the output via redirect, the submission needs to be able
        # to write on it.
        files_allowing_write = []
        stdin_redirect = None
        stdout_redirect = None
        if len(self.input_filename) == 0:
            stdin_redirect = self._actual_input
        if len(self.output_filename) == 0:
            stdout_redirect = self._actual_output
        else:
            files_allowing_write.append(self._actual_output)

        # Create the sandbox
        sandbox = create_sandbox(file_cacher, name="evaluate")
        job.sandboxes.append(sandbox.path)

        # Put the required files into the sandbox
        for filename, digest in iteritems(executables_to_get):
            sandbox.create_file_from_storage(filename, digest, executable=True)
        for filename, digest in iteritems(files_to_get):
            sandbox.create_file_from_storage(filename, digest)

        # Actually performs the execution
        box_success, evaluation_success, stats = evaluation_step(
            sandbox,
            commands,
            job.time_limit,
            job.memory_limit,
            writable_files=files_allowing_write,
            stdin_redirect=stdin_redirect,
            stdout_redirect=stdout_redirect,
            multiprocess=job.multithreaded_sandbox)

        outcome = None
        text = None

        # Error in the sandbox: nothing to do!
        if not box_success:
            pass

        # Contestant's error: the marks won't be good
        elif not evaluation_success:
            outcome = 0.0
            text = human_evaluation_message(stats)
            if job.get_output:
                job.user_output = None

        # Otherwise, advance to checking the solution
        else:

            # Check that the output file was created
            if not sandbox.file_exists(self._actual_output):
                outcome = 0.0
                text = [
                    N_("Evaluation didn't produce file %s"),
                    self._actual_output
                ]
                if job.get_output:
                    job.user_output = None

            else:
                # If asked so, put the output file into the storage.
                if job.get_output:
                    job.user_output = sandbox.get_file_to_storage(
                        self._actual_output,
                        "Output file in job %s" % job.info,
                        trunc_len=100 * 1024)

                # If just asked to execute, fill text and set dummy outcome.
                if job.only_execution:
                    outcome = 0.0
                    text = [N_("Execution completed successfully")]

                # Otherwise evaluate the output file.
                else:
                    box_success, outcome, text = eval_output(
                        file_cacher,
                        job,
                        Batch.CHECKER_CODENAME
                        if self._uses_checker() else None,
                        user_output_path=sandbox.relative_path(
                            self._actual_output),
                        user_output_filename=self.output_filename)

        # Fill in the job with the results.
        job.success = box_success
        job.outcome = str(outcome) if outcome is not None else None
        job.text = text
        job.plus = stats

        delete_sandbox(sandbox, job.success)
示例#31
0
    def evaluate_testcase(self, test_number):
        """See TaskType.evaluate_testcase."""
        sandbox = create_sandbox(self)

        # First step: execute the contestant program. This is also the
        # final step if we have a grader, otherwise we need to run also
        # a white_diff or a comparator.
        executable_filename = self.submission.executables.keys()[0]
        command = ["./%s" % executable_filename]
        executables_to_get = {
            executable_filename:
            self.submission.executables[executable_filename].digest
            }
        input_filename, output_filename = self.parameters[1]
        stdin_redirect = None
        stdout_redirect = None
        if input_filename == "":
            input_filename = "input.txt"
            stdin_redirect = input_filename
        if output_filename == "":
            output_filename = "output.txt"
            stdout_redirect = output_filename
        files_to_get = {
            input_filename: self.submission.task.testcases[test_number].input
            }
        allow_path = [input_filename, output_filename]
        success, outcome, text, plus = self.evaluation_step(
            sandbox,
            command,
            executables_to_get,
            files_to_get,
            self.submission.task.time_limit,
            self.submission.task.memory_limit,
            allow_path,
            stdin_redirect=stdin_redirect,
            stdout_redirect=stdout_redirect,
            final=False)
        # If an error occur (our or contestant's), return immediately.
        if not success or outcome is not None:
            delete_sandbox(sandbox)
            return self.finish_evaluation_testcase(
                test_number, success, outcome, text, plus)

        # Second step: diffing (manual or with comparator).
        if self.parameters[2] == "diff":
            # We white_diff output.txt and res.txt.
            success, outcome, text = self.white_diff_step(
                sandbox,
                output_filename, "res.txt",
                {"res.txt":
                 self.submission.task.testcases[test_number].output})
        elif self.parameters[2] == "comparator":
            # Manager present: wonderful, it'll do all the job.
            manager_filename = self.submission.task.managers.keys()[0]
            success, outcome, text, _ = self.evaluation_step(
                sandbox,
                ["./%s" % manager_filename,
                 input_filename, "res.txt", output_filename],
                {manager_filename:
                 self.submission.task.managers[manager_filename].digest},
                {"res.txt":
                 self.submission.task.testcases[test_number].output},
                allow_path=[input_filename, "res.txt", output_filename],
                final=True)
        else:
            raise ValueError("Unrecognized third parameter `%s' in for Batch "
                             "tasktype." % self.parameters[2])

        # Whatever happened, we conclude.
        delete_sandbox(sandbox)
        return self.finish_evaluation_testcase(
            test_number, success, outcome, text, plus)
    def evaluate_testcase(self, test_number):
        """See TaskType.evaluate_testcase."""
        # Create sandboxes and FIFOs
        sandbox_mgr = create_sandbox(self)
        sandbox_user = create_sandbox(self)
        fifo_dir = tempfile.mkdtemp(dir=config.temp_dir)
        fifo_in = os.path.join(fifo_dir, "in")
        fifo_out = os.path.join(fifo_dir, "out")
        os.mkfifo(fifo_in)
        os.mkfifo(fifo_out)

        # First step: we start the manager.
        manager_filename = "manager"
        manager_command = ["./%s" % manager_filename, fifo_in, fifo_out]
        manager_executables_to_get = {
            manager_filename:
            self.job.managers[manager_filename].digest
            }
        manager_files_to_get = {
            "input.txt": self.job.testcases[test_number].input
            }
        manager_allow_path = ["input.txt", "output.txt", fifo_in, fifo_out]
        for filename, digest in manager_executables_to_get.iteritems():
            sandbox_mgr.create_file_from_storage(
                filename, digest, executable=True)
        for filename, digest in manager_files_to_get.iteritems():
            sandbox_mgr.create_file_from_storage(filename, digest)
        manager = evaluation_step_before_run(
            sandbox_mgr,
            manager_command,
            self.job.time_limit,
            0,
            manager_allow_path,
            stdin_redirect="input.txt")

        # Second step: we start the user submission compiled with the
        # stub.
        executable_filename = self.job.executables.keys()[0]
        command = ["./%s" % executable_filename, fifo_out, fifo_in]
        executables_to_get = {
            executable_filename:
            self.job.executables[executable_filename].digest
            }
        allow_path = [fifo_in, fifo_out]
        for filename, digest in executables_to_get.iteritems():
            sandbox_user.create_file_from_storage(
                filename, digest, executable=True)
        process = evaluation_step_before_run(
            sandbox_user,
            command,
            self.job.time_limit,
            self.job.memory_limit,
            allow_path)

        # Consume output.
        wait_without_std([process, manager])
        # TODO: check exit codes with translate_box_exitcode.

        success_user, plus_user = \
            evaluation_step_after_run(sandbox_user)
        success_mgr, plus_mgr = \
            evaluation_step_after_run(sandbox_mgr)

        self.job.evaluations[test_number] = \
            {'sandboxes': [sandbox_user.path,
                           sandbox_mgr.path],
             'plus': plus_user}
        evaluation = self.job.evaluations[test_number]

        # If at least one evaluation had problems, we report the
        # problems. (TODO: shouldn't outcome and text better be None
        # and None?)
        if not success_user or not success_mgr:
            success, outcome, text = False, None, None
        # If outcome_user is not None, it is 0.0 and it means that
        # there has been some errors in the user solution, and outcome
        # and text are meaningful, so we use them.
        elif not is_evaluation_passed(plus_user):
            success = True
            outcome, text = 0.0, human_evaluation_message(plus_user)
        # Otherwise, we use the manager to obtain the outcome.
        else:
            success = True
            outcome, text = extract_outcome_and_text(sandbox_mgr)

        # If asked so, save the output file, provided that it exists
        if self.job.get_output:
            if sandbox_mgr.file_exists("output.txt"):
                evaluation['output'] = sandbox_mgr.get_file_to_storage(
                    "output.txt",
                    "Output file for testcase %d in job %s" %
                    (test_number, self.job.info))
            else:
                evaluation['output'] = None

        # Whatever happened, we conclude.
        evaluation['success'] = success
        evaluation['outcome'] = str(outcome) if outcome is not None else None
        evaluation['text'] = text
        delete_sandbox(sandbox_mgr)
        delete_sandbox(sandbox_user)
        return success
示例#33
0
    def evaluate(self, job, file_cacher):
        """See TaskType.evaluate."""
        sandbox = create_sandbox(file_cacher, job.multithreaded_sandbox)
        job.sandboxes.append(sandbox.path)

        # Immediately prepare the skeleton to return
        job.sandboxes = [sandbox.path]
        job.plus = {}

        outcome = None
        text = None

        # Since we allow partial submission, if the file is not
        # present we report that the outcome is 0.
        if "output_%s.txt" % job.operation["testcase_codename"] \
                not in job.files:
            job.success = True
            job.outcome = "0.0"
            job.text = [N_("File not submitted")]
            return True

        # First and only one step: diffing (manual or with manager).
        output_digest = job.files["output_%s.txt" %
                                  job.operation["testcase_codename"]].digest

        # Put the files into the sandbox
        sandbox.create_file_from_storage(
            "res.txt",
            job.output)
        sandbox.create_file_from_storage(
            "output.txt",
            output_digest)

        if self.parameters[0] == "diff":
            # No manager: I'll do a white_diff between the submission
            # file and the correct output res.txt.
            success = True
            outcome, text = white_diff_step(
                sandbox, "output.txt", "res.txt")

        elif self.parameters[0] == "comparator":
            # Manager present: wonderful, he'll do all the job.
            manager_filename = "checker"
            if manager_filename not in job.managers:
                logger.error("Configuration error: missing or "
                             "invalid comparator (it must be "
                             "named `checker')", extra={"operation": job.info})
                success = False
            else:
                sandbox.create_file_from_storage(
                    manager_filename,
                    job.managers[manager_filename].digest,
                    executable=True)
                input_digest = job.input
                sandbox.create_file_from_storage(
                    "input.txt",
                    input_digest)
                success, _ = evaluation_step(
                    sandbox,
                    [["./%s" % manager_filename,
                      "input.txt", "res.txt", "output.txt"]])
                if success:
                    outcome, text = extract_outcome_and_text(sandbox)

        else:
            raise ValueError("Unrecognized first parameter "
                             "`%s' for OutputOnly tasktype. "
                             "Should be `diff' or `comparator'." %
                             self.parameters[0])

        # Whatever happened, we conclude.
        job.success = success
        job.outcome = "%s" % outcome if outcome is not None else None
        job.text = text

        delete_sandbox(sandbox, job.success)
示例#34
0
    def compile(self, job, file_cacher):
        """See TaskType.compile."""
        # Detect the submission's language. The checks about the
        # formal correctedness of the submission are done in CWS,
        # before accepting it.
        language = get_language(job.language)
        source_ext = language.source_extension
        header_ext = language.header_extension

        # TODO: here we are sure that submission.files are the same as
        # task.submission_format. The following check shouldn't be
        # here, but in the definition of the task, since this actually
        # checks that task's task type and submission format agree.
        if len(job.files) != 2:
            job.success = True
            job.compilation_success = False
            job.text = [N_("Invalid files in submission")]
            logger.error("Submission contains %d files, expecting 2",
                         len(job.files),
                         extra={"operation": job.info})
            return

        # First and only one compilation.
        sandbox = create_sandbox(file_cacher,
                                 multithreaded=job.multithreaded_sandbox,
                                 name="compile")
        job.sandboxes.append(sandbox.path)
        files_to_get = {}

        source_filenames = []

        # Manager.
        manager_filename = "manager%s" % source_ext
        source_filenames.append(manager_filename)
        files_to_get[manager_filename] = \
            job.managers[manager_filename].digest
        # Manager's header.
        if header_ext is not None:
            manager_filename = "manager%s" % header_ext
            source_filenames.append(manager_filename)
            files_to_get[manager_filename] = \
                job.managers[manager_filename].digest

        # User's submissions and headers.
        for filename, file_ in iteritems(job.files):
            source_filename = filename.replace(".%l", source_ext)
            source_filenames.append(source_filename)
            files_to_get[source_filename] = file_.digest
            # Headers (fixing compile error again here).
            if header_ext is not None:
                header_filename = filename.replace(".%l", header_ext)
                source_filenames.append(header_filename)
                files_to_get[header_filename] = \
                    job.managers[header_filename].digest

        for filename, digest in iteritems(files_to_get):
            sandbox.create_file_from_storage(filename, digest)

        # Get compilation command and compile.
        executable_filename = "manager"
        commands = language.get_compilation_commands(source_filenames,
                                                     executable_filename)
        operation_success, compilation_success, text, plus = \
            compilation_step(sandbox, commands)

        # Retrieve the compiled executables
        job.success = operation_success
        job.compilation_success = compilation_success
        job.plus = plus
        job.text = text
        if operation_success and compilation_success:
            digest = sandbox.get_file_to_storage(
                executable_filename,
                "Executable %s for %s" % (executable_filename, job.info))
            job.executables[executable_filename] = \
                Executable(executable_filename, digest)

        # Cleanup
        delete_sandbox(sandbox, job.success)
示例#35
0
    def evaluate(self, job, file_cacher):
        """See TaskType.evaluate."""
        # Create sandboxes and FIFOs
        sandbox_mgr = create_sandbox(file_cacher)
        sandbox_user = create_sandbox(file_cacher)
        fifo_dir = tempfile.mkdtemp(dir=config.temp_dir)
        fifo_in = os.path.join(fifo_dir, "in")
        fifo_out = os.path.join(fifo_dir, "out")
        os.mkfifo(fifo_in)
        os.mkfifo(fifo_out)
        os.chmod(fifo_dir, 0o755)
        os.chmod(fifo_in, 0o666)
        os.chmod(fifo_out, 0o666)

        # First step: we start the manager.
        manager_filename = "manager"
        manager_command = ["./%s" % manager_filename, fifo_in, fifo_out]
        manager_executables_to_get = {
            manager_filename:
            job.managers[manager_filename].digest
            }
        manager_files_to_get = {
            "input.txt": job.input
            }
        manager_allow_dirs = [fifo_dir]
        for filename, digest in manager_executables_to_get.iteritems():
            sandbox_mgr.create_file_from_storage(
                filename, digest, executable=True)
        for filename, digest in manager_files_to_get.iteritems():
            sandbox_mgr.create_file_from_storage(filename, digest)
        manager = evaluation_step_before_run(
            sandbox_mgr,
            manager_command,
            job.time_limit,
            0,
            allow_dirs=manager_allow_dirs,
            stdin_redirect="input.txt")

        # Second step: we start the user submission compiled with the
        # stub.
        executable_filename = job.executables.keys()[0]
        command = ["./%s" % executable_filename, fifo_out, fifo_in]
        executables_to_get = {
            executable_filename:
            job.executables[executable_filename].digest
            }
        user_allow_dirs = [fifo_dir]
        for filename, digest in executables_to_get.iteritems():
            sandbox_user.create_file_from_storage(
                filename, digest, executable=True)
        process = evaluation_step_before_run(
            sandbox_user,
            command,
            job.time_limit,
            job.memory_limit,
            allow_dirs=user_allow_dirs)

        # Consume output.
        wait_without_std([process, manager])
        # TODO: check exit codes with translate_box_exitcode.

        success_user, plus_user = \
            evaluation_step_after_run(sandbox_user)
        success_mgr, plus_mgr = \
            evaluation_step_after_run(sandbox_mgr)

        job.sandboxes = [sandbox_user.path,
                         sandbox_mgr.path]
        job.plus = plus_user

        # If at least one evaluation had problems, we report the
        # problems.
        if not success_user or not success_mgr:
            success, outcome, text = False, None, None
        # If the user sandbox detected some problem (timeout, ...),
        # the outcome is 0.0 and the text describes that problem.
        elif not is_evaluation_passed(plus_user):
            success = True
            outcome, text = 0.0, human_evaluation_message(plus_user)
        # Otherwise, we use the manager to obtain the outcome.
        else:
            success = True
            outcome, text = extract_outcome_and_text(sandbox_mgr)

        # If asked so, save the output file, provided that it exists
        if job.get_output:
            if sandbox_mgr.file_exists("output.txt"):
                job.user_output = sandbox_mgr.get_file_to_storage(
                    "output.txt",
                    "Output file in job %s" % job.info)
            else:
                job.user_output = None

        # Whatever happened, we conclude.
        job.success = success
        job.outcome = str(outcome) if outcome is not None else None
        job.text = text

        delete_sandbox(sandbox_mgr)
        delete_sandbox(sandbox_user)
        shutil.rmtree(fifo_dir)
示例#36
0
    def evaluate(self, job, file_cacher):
        """See TaskType.evaluate."""
        # f stand for first, s for second.
        first_sandbox = create_sandbox(file_cacher,
                                       multithreaded=job.multithreaded_sandbox,
                                       name="first_evaluate")
        second_sandbox = create_sandbox(
            file_cacher,
            multithreaded=job.multithreaded_sandbox,
            name="second_evaluate")
        fifo_dir = tempfile.mkdtemp(dir=config.temp_dir)
        fifo = os.path.join(fifo_dir, "fifo")
        os.mkfifo(fifo)
        os.chmod(fifo_dir, 0o755)
        os.chmod(fifo, 0o666)

        # First step: we start the first manager.
        first_filename = "manager"
        first_command = ["./%s" % first_filename, "0", fifo]
        first_executables_to_get = {
            first_filename: job.executables[first_filename].digest
        }
        first_files_to_get = {"input.txt": job.input}
        first_allow_path = [fifo_dir]

        # Put the required files into the sandbox
        for filename, digest in iteritems(first_executables_to_get):
            first_sandbox.create_file_from_storage(filename,
                                                   digest,
                                                   executable=True)
        for filename, digest in iteritems(first_files_to_get):
            first_sandbox.create_file_from_storage(filename, digest)

        first = evaluation_step_before_run(first_sandbox,
                                           first_command,
                                           job.time_limit,
                                           job.memory_limit,
                                           first_allow_path,
                                           stdin_redirect="input.txt",
                                           wait=False)

        # Second step: we start the second manager.
        second_filename = "manager"
        second_command = ["./%s" % second_filename, "1", fifo]
        second_executables_to_get = {
            second_filename: job.executables[second_filename].digest
        }
        second_files_to_get = {}
        second_allow_path = [fifo_dir]

        # Put the required files into the second sandbox
        for filename, digest in iteritems(second_executables_to_get):
            second_sandbox.create_file_from_storage(filename,
                                                    digest,
                                                    executable=True)
        for filename, digest in iteritems(second_files_to_get):
            second_sandbox.create_file_from_storage(filename, digest)

        second = evaluation_step_before_run(second_sandbox,
                                            second_command,
                                            job.time_limit,
                                            job.memory_limit,
                                            second_allow_path,
                                            stdout_redirect="output.txt",
                                            wait=False)

        # Consume output.
        wait_without_std([second, first])
        # TODO: check exit codes with translate_box_exitcode.

        success_first, first_plus = \
            evaluation_step_after_run(first_sandbox)
        success_second, second_plus = \
            evaluation_step_after_run(second_sandbox)

        job.sandboxes = [first_sandbox.path, second_sandbox.path]
        job.plus = second_plus

        success = True
        outcome = None
        text = []

        # Error in the sandbox: report failure!
        if not success_first or not success_second:
            success = False

        # Contestant's error: the marks won't be good
        elif not is_evaluation_passed(first_plus) or \
                not is_evaluation_passed(second_plus):
            outcome = 0.0
            if not is_evaluation_passed(first_plus):
                text = human_evaluation_message(first_plus)
            else:
                text = human_evaluation_message(second_plus)
            if job.get_output:
                job.user_output = None

        # Otherwise, advance to checking the solution
        else:

            # Check that the output file was created
            if not second_sandbox.file_exists('output.txt'):
                outcome = 0.0
                text = [N_("Evaluation didn't produce file %s"), "output.txt"]
                if job.get_output:
                    job.user_output = None

            else:
                # If asked so, put the output file into the storage
                if job.get_output:
                    job.user_output = second_sandbox.get_file_to_storage(
                        "output.txt", "Output file in job %s" % job.info)

                # If not asked otherwise, evaluate the output file
                if not job.only_execution:
                    # Put the reference solution into the sandbox
                    second_sandbox.create_file_from_storage(
                        "res.txt", job.output)

                    # If a checker is not provided, use white-diff
                    if self.parameters[0] == "diff":
                        outcome, text = white_diff_step(
                            second_sandbox, "output.txt", "res.txt")

                    elif self.parameters[0] == "comparator":
                        if TwoSteps.CHECKER_FILENAME not in job.managers:
                            logger.error(
                                "Configuration error: missing or "
                                "invalid comparator (it must be "
                                "named `checker')",
                                extra={"operation": job.info})
                            success = False
                        else:
                            second_sandbox.create_file_from_storage(
                                TwoSteps.CHECKER_FILENAME,
                                job.managers[TwoSteps.CHECKER_FILENAME].digest,
                                executable=True)
                            # Rewrite input file, as in Batch.py
                            try:
                                second_sandbox.remove_file("input.txt")
                            except OSError as e:
                                assert not second_sandbox.file_exists(
                                    "input.txt")
                            second_sandbox.create_file_from_storage(
                                "input.txt", job.input)
                            success, _ = evaluation_step(
                                second_sandbox, [[
                                    "./%s" % TwoSteps.CHECKER_FILENAME,
                                    "input.txt", "res.txt", "output.txt"
                                ]])
                            if success:
                                try:
                                    outcome, text = extract_outcome_and_text(
                                        second_sandbox)
                                except ValueError as e:
                                    logger.error(
                                        "Invalid output from "
                                        "comparator: %s",
                                        e,
                                        extra={"operation": job.info})
                                    success = False
                    else:
                        raise ValueError("Uncrecognized first parameter"
                                         " `%s' for TwoSteps tasktype." %
                                         self.parameters[0])

        # Whatever happened, we conclude.
        job.success = success
        job.outcome = str(outcome) if outcome is not None else None
        job.text = text

        delete_sandbox(first_sandbox, job.success)
        delete_sandbox(second_sandbox, job.success)
示例#37
0
    def compile(self, job, file_cacher):
        """See TaskType.compile."""
        # Detect the submission's language. The checks about the
        # formal correctedness of the submission are done in CWS,
        # before accepting it.
        language = job.language
        source_ext = LANGUAGE_TO_SOURCE_EXT_MAP[language]

        # TODO: here we are sure that submission.files are the same as
        # task.submission_format. The following check shouldn't be
        # here, but in the definition of the task, since this actually
        # checks that task's task type and submission format agree.
        if len(job.files) != 1:
            job.success = True
            job.compilation_success = False
            job.text = [N_("Invalid files in submission")]
            logger.error("Submission contains %d files, expecting 1",
                         len(job.files),
                         extra={"operation": job.info})
            return True

        # Create the sandbox
        sandbox = create_sandbox(file_cacher)
        job.sandboxes.append(sandbox.path)

        # Prepare the source files in the sandbox
        files_to_get = {}
        format_filename = job.files.keys()[0]
        source_filenames = []
        # Stub.
        source_filenames.append("stub%s" % source_ext)
        files_to_get[source_filenames[-1]] = \
            job.managers["stub%s" % source_ext].digest
        # User's submission.
        source_filenames.append(format_filename.replace(".%l", source_ext))
        files_to_get[source_filenames[-1]] = \
            job.files[format_filename].digest

        # Also copy all managers that might be useful during compilation.
        for filename in job.managers.iterkeys():
            if any(
                    filename.endswith(header)
                    for header in LANGUAGE_TO_HEADER_EXT_MAP.itervalues()):
                files_to_get[filename] = \
                    job.managers[filename].digest
            elif any(
                    filename.endswith(source)
                    for source in LANGUAGE_TO_SOURCE_EXT_MAP.itervalues()):
                files_to_get[filename] = \
                    job.managers[filename].digest
            elif any(
                    filename.endswith(obj)
                    for obj in LANGUAGE_TO_OBJ_EXT_MAP.itervalues()):
                files_to_get[filename] = \
                    job.managers[filename].digest

        for filename, digest in files_to_get.iteritems():
            sandbox.create_file_from_storage(filename, digest)

        # Prepare the compilation command
        executable_filename = format_filename.replace(".%l", "")
        commands = get_compilation_commands(language, source_filenames,
                                            executable_filename)

        # Run the compilation
        operation_success, compilation_success, text, plus = \
            compilation_step(sandbox, commands)

        # Retrieve the compiled executables
        job.success = operation_success
        job.compilation_success = compilation_success
        job.plus = plus
        job.text = text
        if operation_success and compilation_success:
            digest = sandbox.get_file_to_storage(
                executable_filename,
                "Executable %s for %s" % (executable_filename, job.info))
            job.executables[executable_filename] = \
                Executable(executable_filename, digest)

        # Cleanup
        delete_sandbox(sandbox)
示例#38
0
文件: Batch.py 项目: MegaAlex/cms
    def compile(self, job, file_cacher):
        """See TaskType.compile."""
        # Detect the submission's language. The checks about the
        # formal correctedness of the submission are done in CWS,
        # before accepting it.
        language = job.language
        source_ext = LANGUAGE_TO_SOURCE_EXT_MAP[language]

        # TODO: here we are sure that submission.files are the same as
        # task.submission_format. The following check shouldn't be
        # here, but in the definition of the task, since this actually
        # checks that task's task type and submission format agree.
        if len(job.files) != 1:
            job.success = True
            job.compilation_success = False
            job.text = [N_("Invalid files in submission")]
            logger.error("Submission contains %d files, expecting 1",
                         len(job.files), extra={"operation": job.info})
            return True

        # Create the sandbox
        sandbox = create_sandbox(file_cacher)
        job.sandboxes.append(sandbox.path)

        # Prepare the source files in the sandbox
        files_to_get = {}
        format_filename = job.files.keys()[0]
        source_filenames = []
        source_filenames.append(format_filename.replace(".%l", source_ext))
        files_to_get[source_filenames[0]] = \
            job.files[format_filename].digest
        # If a grader is specified, we add to the command line (and to
        # the files to get) the corresponding manager. The grader must
        # be the first file in source_filenames.
        if self.parameters[0] == "grader":
            source_filenames.insert(0, "grader%s" % source_ext)
            files_to_get["grader%s" % source_ext] = \
                job.managers["grader%s" % source_ext].digest

        # Also copy all *.h and *lib.pas graders
        for filename in job.managers.iterkeys():
            if any(filename.endswith(header)
                   for header in LANGUAGE_TO_HEADER_EXT_MAP.itervalues()):
                files_to_get[filename] = \
                    job.managers[filename].digest

        for filename, digest in files_to_get.iteritems():
            sandbox.create_file_from_storage(filename, digest)

        # Prepare the compilation command
        executable_filename = format_filename.replace(".%l", "")
        commands = get_compilation_commands(language,
                                            source_filenames,
                                            executable_filename)

        # Run the compilation
        operation_success, compilation_success, text, plus = \
            compilation_step(sandbox, commands)

        # Retrieve the compiled executables
        job.success = operation_success
        job.compilation_success = compilation_success
        job.plus = plus
        job.text = text
        if operation_success and compilation_success:
            digest = sandbox.get_file_to_storage(
                executable_filename,
                "Executable %s for %s" %
                (executable_filename, job.info))
            job.executables[executable_filename] = \
                Executable(executable_filename, digest)

        # Cleanup
        delete_sandbox(sandbox)
示例#39
0
    def compile(self, job, file_cacher):
        """See TaskType.compile."""
        # Detect the submission's language. The checks about the
        # formal correctedness of the submission are done in CWS,
        # before accepting it.
        language = job.language
        source_ext = LANGUAGE_TO_SOURCE_EXT_MAP[language]

        # TODO: here we are sure that submission.files are the same as
        # task.submission_format. The following check shouldn't be
        # here, but in the definition of the task, since this actually
        # checks that task's task type and submission format agree.
        if len(job.files) != 1:
            job.success = True
            job.compilation_success = False
            job.text = [N_("Invalid files in submission")]
            logger.error("Submission contains %d files, expecting 1", len(job.files), extra={"operation": job.info})
            return True

        # Create the sandbox
        sandbox = create_sandbox(file_cacher)
        job.sandboxes.append(sandbox.path)

        # Prepare the source files in the sandbox
        files_to_get = {}
        format_filename = job.files.keys()[0]
        source_filenames = []
        # Stub.
        source_filenames.append("stub%s" % source_ext)
        files_to_get[source_filenames[-1]] = job.managers["stub%s" % source_ext].digest
        # User's submission.
        source_filenames.append(format_filename.replace(".%l", source_ext))
        files_to_get[source_filenames[-1]] = job.files[format_filename].digest

        # Also copy all managers that might be useful during compilation.
        # We likely want to compile with .cpp or .o files, so add them to our
        # command line
        for filename in job.managers.iterkeys():
            if any(filename.endswith(header) for header in LANGUAGE_TO_HEADER_EXT_MAP.itervalues()):
                files_to_get[filename] = job.managers[filename].digest
            elif any(filename.endswith(source) for source in LANGUAGE_TO_SOURCE_EXT_MAP.itervalues()):
                files_to_get[filename] = job.managers[filename].digest
                if filename not in source_filenames:
                    source_filenames.insert(1, filename)
            elif any(filename.endswith(obj) for obj in LANGUAGE_TO_OBJ_EXT_MAP.itervalues()):
                files_to_get[filename] = job.managers[filename].digest
                if filename not in source_filenames:
                    source_filenames.insert(1, filename)

        for filename, digest in files_to_get.iteritems():
            sandbox.create_file_from_storage(filename, digest)

        # Prepare the compilation command
        executable_filename = format_filename.replace(".%l", "")
        commands = get_compilation_commands(language, source_filenames, executable_filename)

        # Run the compilation
        operation_success, compilation_success, text, plus = compilation_step(sandbox, commands)

        # Retrieve the compiled executables
        job.success = operation_success
        job.compilation_success = compilation_success
        job.plus = plus
        job.text = text
        if operation_success and compilation_success:
            digest = sandbox.get_file_to_storage(
                executable_filename, "Executable %s for %s" % (executable_filename, job.info)
            )
            job.executables[executable_filename] = Executable(executable_filename, digest)

        # Cleanup
        delete_sandbox(sandbox)
示例#40
0
    def compile(self, job, file_cacher):
        """See TaskType.compile."""
        # Detect the submission's language. The checks about the
        # formal correctedness of the submission are done in CWS,
        # before accepting it.
        language = job.language
        source_ext = LANGUAGE_TO_SOURCE_EXT_MAP[language]

        # TODO: here we are sure that submission.files are the same as
        # task.submission_format. The following check shouldn't be
        # here, but in the definition of the task, since this actually
        # checks that task's task type and submission format agree.
        if len(job.files) != 1:
            job.success = True
            job.compilation_success = False
            job.text = [N_("Invalid files in submission")]
            logger.error("Submission contains %d files, expecting 1" %
                         len(job.files),
                         extra={"operation": job.info})
            return True

        # Create the sandbox
        sandbox = create_sandbox(file_cacher)
        job.sandboxes.append(sandbox.path)

        # Prepare the source files in the sandbox
        files_to_get = {}
        format_filename = job.files.keys()[0]
        source_filenames = []
        source_filenames.append(format_filename.replace(".%l", source_ext))
        files_to_get[source_filenames[0]] = \
            job.files[format_filename].digest
        # If a grader is specified, we add to the command line (and to
        # the files to get) the corresponding manager. The grader must
        # be the first file in source_filenames.
        if self.parameters[0] == "grader":
            source_filenames.insert(0, "grader%s" % source_ext)
            files_to_get["grader%s" % source_ext] = \
                job.managers["grader%s" % source_ext].digest

        # Also copy all *.h and *lib.pas graders
        for filename in job.managers.iterkeys():
            if filename.endswith('.h') or \
                    filename.endswith('lib.pas'):
                files_to_get[filename] = \
                    job.managers[filename].digest

        for filename, digest in files_to_get.iteritems():
            sandbox.create_file_from_storage(filename, digest)

        # Prepare the compilation command
        executable_filename = format_filename.replace(".%l", "")
        commands = get_compilation_commands(language, source_filenames,
                                            executable_filename)

        # Run the compilation
        operation_success, compilation_success, text, plus = \
            compilation_step(sandbox, commands)

        # Retrieve the compiled executables
        job.success = operation_success
        job.compilation_success = compilation_success
        job.plus = plus
        job.text = text
        if operation_success and compilation_success:
            digest = sandbox.get_file_to_storage(
                executable_filename,
                "Executable %s for %s" % (executable_filename, job.info))
            job.executables[executable_filename] = \
                Executable(executable_filename, digest)

        # Cleanup
        delete_sandbox(sandbox)
示例#41
0
    def compile(self):
        """See TaskType.compile."""
        # Detect the submission's language. The checks about the
        # formal correctedness of the submission are done in CWS,
        # before accepting it.
        language = self.job.language
        header = HEADERS_MAP[language]

        # TODO: here we are sure that submission.files are the same as
        # task.submission_format. The following check shouldn't be
        # here, but in the definition of the task, since this actually
        # checks that task's task type and submission format agree.
        if len(self.job.files) != 2:
            self.job.success = True
            self.job.compilation_success = False
            self.job.text = "Invalid files in submission"
            logger.warning("Submission contains %d files, expecting 2" %
                           len(self.job.files))
            return True

        # First and only one compilation.
        sandbox = create_sandbox(self)
        self.job.sandboxes.append(sandbox.path)
        files_to_get = {}

        # User's submissions and headers.
        source_filenames = []
        for filename, _file in self.job.files.iteritems():
            source_filename = filename.replace("%l", language)
            source_filenames.append(source_filename)
            files_to_get[source_filename] = _file.digest
            # Headers.
            header_filename = filename.replace("%l", header)
            source_filenames.append(header_filename)
            files_to_get[header_filename] = \
                self.job.managers[header_filename].digest

        # Manager.
        manager_filename = "manager.%s" % language
        source_filenames.append(manager_filename)
        files_to_get[manager_filename] = \
                self.job.managers[manager_filename].digest
        # Manager's header.
        manager_filename = "manager.%s" % header
        source_filenames.append(manager_filename)
        files_to_get[manager_filename] = \
                self.job.managers[manager_filename].digest

        for filename, digest in files_to_get.iteritems():
            sandbox.create_file_from_storage(filename, digest)

        # Get compilation command and compile.
        executable_filename = "manager"
        command = get_compilation_command(language,
                                          source_filenames,
                                          executable_filename)
        operation_success, compilation_success, text, plus = \
            compilation_step(sandbox, command)

        # Retrieve the compiled executables
        self.job.success = operation_success
        self.job.compilation_success = compilation_success
        self.job.plus = plus
        self.job.text = text
        if operation_success and compilation_success:
            digest = sandbox.get_file_to_storage(
                executable_filename,
                "Executable %s for %s" %
                (executable_filename, self.job.info))
            self.job.executables[executable_filename] = \
                Executable(digest, executable_filename)

        # Cleanup
        delete_sandbox(sandbox)
示例#42
0
    def evaluate(self, job, file_cacher):
        """See TaskType.evaluate."""
        # Create the sandbox
        sandbox = create_sandbox(file_cacher)

        # Prepare the execution
        executable_filename = job.executables.keys()[0]
        language = job.language
        commands = get_evaluation_commands(language, executable_filename)
        executables_to_get = {
            executable_filename: job.executables[executable_filename].digest
        }
        input_filename, output_filename = self.parameters[1]
        stdin_redirect = None
        stdout_redirect = None
        if input_filename == "":
            input_filename = "input.txt"
            stdin_redirect = input_filename
        if output_filename == "":
            output_filename = "output.txt"
            stdout_redirect = output_filename
        files_to_get = {input_filename: job.input}

        # Put the required files into the sandbox
        for filename, digest in executables_to_get.iteritems():
            sandbox.create_file_from_storage(filename, digest, executable=True)
        for filename, digest in files_to_get.iteritems():
            sandbox.create_file_from_storage(filename, digest)

        # Actually performs the execution
        success, plus = evaluation_step(sandbox,
                                        commands,
                                        job.time_limit,
                                        job.memory_limit,
                                        stdin_redirect=stdin_redirect,
                                        stdout_redirect=stdout_redirect)

        job.sandboxes = [sandbox.path]
        job.plus = plus

        outcome = None
        text = None

        # Error in the sandbox: nothing to do!
        if not success:
            pass

        # Contestant's error: the marks won't be good
        elif not is_evaluation_passed(plus):
            outcome = 0.0
            text = human_evaluation_message(plus)
            if job.get_output:
                job.user_output = None

        # Otherwise, advance to checking the solution
        else:

            # Check that the output file was created
            if not sandbox.file_exists(output_filename):
                outcome = 0.0
                text = [
                    N_("Evaluation didn't produce file %s"), output_filename
                ]
                if job.get_output:
                    job.user_output = None

            else:
                # If asked so, put the output file into the storage
                if job.get_output:
                    job.user_output = sandbox.get_file_to_storage(
                        output_filename,
                        "Output file in job %s" % job.info,
                        trunc_len=100 * 1024)

                # If not asked otherwise, evaluate the output file
                if not job.only_execution:

                    # Put the reference solution into the sandbox
                    sandbox.create_file_from_storage("res.txt", job.output)

                    # Check the solution with white_diff
                    if self.parameters[2] == "diff":
                        outcome, text = white_diff_step(
                            sandbox, output_filename, "res.txt")

                    # Check the solution with a comparator
                    elif self.parameters[2] == "comparator":
                        manager_filename = "checker"

                        if not manager_filename in job.managers:
                            logger.error(
                                "Configuration error: missing or "
                                "invalid comparator (it must be "
                                "named 'checker')",
                                extra={"operation": job.info})
                            success = False

                        else:
                            sandbox.create_file_from_storage(
                                manager_filename,
                                job.managers[manager_filename].digest,
                                executable=True)
                            success, _ = evaluation_step(
                                sandbox, [[
                                    "./%s" % manager_filename, input_filename,
                                    "res.txt", output_filename
                                ]])
                        if success:
                            try:
                                outcome, text = \
                                    extract_outcome_and_text(sandbox)
                            except ValueError, e:
                                logger.error("Invalid output from "
                                             "comparator: %s" % (e.message, ),
                                             extra={"operation": job.info})
                                success = False

                    else:
                        raise ValueError("Unrecognized third parameter"
                                         " `%s' for Batch tasktype." %
                                         self.parameters[2])
示例#43
0
    def evaluate(self, job, file_cacher):
        """See TaskType.evaluate."""
        if len(self.parameters) <= 0:
            num_processes = 1
        else:
            num_processes = self.parameters[0]
        indices = range(num_processes)
        # Create sandboxes and FIFOs
        sandbox_mgr = create_sandbox(file_cacher)
        sandbox_user = [create_sandbox(file_cacher) for i in indices]
        fifo_dir = [tempfile.mkdtemp(dir=config.temp_dir) for i in indices]
        fifo_in = [os.path.join(fifo_dir[i], "in%d" % i) for i in indices]
        fifo_out = [os.path.join(fifo_dir[i], "out%d" % i) for i in indices]
        for i in indices:
            os.mkfifo(fifo_in[i])
            os.mkfifo(fifo_out[i])
            os.chmod(fifo_dir[i], 0o755)
            os.chmod(fifo_in[i], 0o666)
            os.chmod(fifo_out[i], 0o666)

        # First step: we start the manager.
        manager_filename = "manager"
        manager_command = ["./%s" % manager_filename]
        for i in indices:
            manager_command.append(fifo_in[i])
            manager_command.append(fifo_out[i])
        manager_executables_to_get = {
            manager_filename:
            job.managers[manager_filename].digest
            }
        manager_files_to_get = {
            "input.txt": job.input
            }
        manager_allow_dirs = fifo_dir
        for filename, digest in manager_executables_to_get.iteritems():
            sandbox_mgr.create_file_from_storage(
                filename, digest, executable=True)
        for filename, digest in manager_files_to_get.iteritems():
            sandbox_mgr.create_file_from_storage(filename, digest)
        manager = evaluation_step_before_run(
            sandbox_mgr,
            manager_command,
            num_processes * job.time_limit,
            0,
            allow_dirs=manager_allow_dirs,
            writable_files=["output.txt"],
            stdin_redirect="input.txt")

        # Second step: we start the user submission compiled with the
        # stub.
        executable_filename = job.executables.keys()[0]
        executables_to_get = {
            executable_filename:
            job.executables[executable_filename].digest
            }
        processes = [None for i in indices]
        for i in indices:
            command = ["./%s" % executable_filename, fifo_out[i], fifo_in[i]]
            if num_processes != 1:
                command.append(str(i))
            user_allow_dirs = [fifo_dir[i]]
            for filename, digest in executables_to_get.iteritems():
                sandbox_user[i].create_file_from_storage(
                    filename, digest, executable=True)
            processes[i] = evaluation_step_before_run(
                sandbox_user[i],
                command,
                job.time_limit,
                job.memory_limit,
                allow_dirs=user_allow_dirs)

        # Consume output.
        wait_without_std(processes + [manager])
        # TODO: check exit codes with translate_box_exitcode.

        user_results = [evaluation_step_after_run(s) for s in sandbox_user]
        success_user = all(r[0] for r in user_results)
        plus_user = reduce(merge_evaluation_results,
                           [r[1] for r in user_results])
        success_mgr, unused_plus_mgr = \
            evaluation_step_after_run(sandbox_mgr)

        if plus_user['exit_status'] == Sandbox.EXIT_OK and \
                plus_user["execution_time"] >= job.time_limit:
            plus_user['exit_status'] = Sandbox.EXIT_TIMEOUT

        # Merge results.
        job.sandboxes = [s.path for s in sandbox_user] + [sandbox_mgr.path]
        job.plus = plus_user

        # If at least one evaluation had problems, we report the
        # problems.
        if not success_user or not success_mgr:
            success, outcome, text = False, None, None
        # If the user sandbox detected some problem (timeout, ...),
        # the outcome is 0.0 and the text describes that problem.
        elif not is_evaluation_passed(plus_user):
            success = True
            outcome, text = 0.0, human_evaluation_message(plus_user)
        # Otherwise, we use the manager to obtain the outcome.
        else:
            success = True
            outcome, text = extract_outcome_and_text(sandbox_mgr)

        # If asked so, save the output file, provided that it exists
        if job.get_output:
            if sandbox_mgr.file_exists("output.txt"):
                job.user_output = sandbox_mgr.get_file_to_storage(
                    "output.txt",
                    "Output file in job %s" % job.info)
            else:
                job.user_output = None

        # Whatever happened, we conclude.
        job.success = success
        job.outcome = "%s" % outcome if outcome is not None else None
        job.text = text

        delete_sandbox(sandbox_mgr)
        for s in sandbox_user:
            delete_sandbox(s)
        if not config.keep_sandbox:
            for d in fifo_dir:
                rmtree(d)
示例#44
0
文件: Batch.py 项目: romeorizzi/cms
    def compile(self, job, file_cacher):
        """See TaskType.compile."""
        # Detect the submission's language. The checks about the
        # formal correctedness of the submission are done in CWS,
        # before accepting it.
        language = get_language(job.language)
        source_ext = language.source_extension

        # TODO: here we are sure that submission.files are the same as
        # task.submission_format. The following check shouldn't be
        # here, but in the definition of the task, since this actually
        # checks that task's task type and submission format agree.
        if len(job.files) != 1:
            job.success = True
            job.compilation_success = False
            job.text = [N_("Invalid files in submission")]
            logger.error("Submission contains %d files, expecting 1",
                         len(job.files),
                         extra={"operation": job.info})
            return

        # Create the sandbox.
        sandbox = create_sandbox(file_cacher,
                                 multithreaded=job.multithreaded_sandbox,
                                 name="compile")
        job.sandboxes.append(sandbox.path)

        user_file_format = next(iterkeys(job.files))
        user_source_filename = user_file_format.replace(".%l", source_ext)
        executable_filename = user_file_format.replace(".%l", "")

        # Copy required files in the sandbox (includes the grader if present).
        sandbox.create_file_from_storage(user_source_filename,
                                         job.files[user_file_format].digest)
        for filename in iterkeys(job.managers):
            if Batch._is_manager_for_compilation(filename):
                sandbox.create_file_from_storage(filename,
                                                 job.managers[filename].digest)

        # Create the list of filenames to be passed to the compiler. If we use
        # a grader, it needs to be in first position in the command line.
        source_filenames = [user_source_filename]
        if self._uses_grader():
            grader_source_filename = Batch.GRADER_BASENAME + source_ext
            source_filenames.insert(0, grader_source_filename)

        # Prepare the compilation command.
        commands = language.get_compilation_commands(source_filenames,
                                                     executable_filename)

        # Run the compilation
        operation_success, compilation_success, text, plus = \
            compilation_step(sandbox, commands)

        # Retrieve the compiled executables
        job.success = operation_success
        job.compilation_success = compilation_success
        job.plus = plus
        job.text = text
        if operation_success and compilation_success:
            digest = sandbox.get_file_to_storage(
                executable_filename,
                "Executable %s for %s" % (executable_filename, job.info))
            job.executables[executable_filename] = \
                Executable(executable_filename, digest)

        # Cleanup
        delete_sandbox(sandbox, job.success)
示例#45
0
    def evaluate(self, job, file_cacher):
        """See TaskType.evaluate."""
        if len(self.parameters) <= 0:
            num_processes = 1
        else:
            num_processes = self.parameters[0]
        indices = range(num_processes)
        # Create sandboxes and FIFOs
        sandbox_mgr = create_sandbox(file_cacher, job.multithreaded_sandbox)
        sandbox_user = [
            create_sandbox(file_cacher, job.multithreaded_sandbox)
            for i in indices
        ]
        fifo_dir = [tempfile.mkdtemp(dir=config.temp_dir) for i in indices]
        fifo_in = [os.path.join(fifo_dir[i], "in%d" % i) for i in indices]
        fifo_out = [os.path.join(fifo_dir[i], "out%d" % i) for i in indices]
        for i in indices:
            os.mkfifo(fifo_in[i])
            os.mkfifo(fifo_out[i])
            os.chmod(fifo_dir[i], 0o755)
            os.chmod(fifo_in[i], 0o666)
            os.chmod(fifo_out[i], 0o666)

        # First step: we start the manager.
        manager_filename = "manager"
        manager_command = ["./%s" % manager_filename]
        for i in indices:
            manager_command.append(fifo_in[i])
            manager_command.append(fifo_out[i])
        manager_executables_to_get = {
            manager_filename: job.managers[manager_filename].digest
        }
        manager_files_to_get = {"input.txt": job.input}
        manager_allow_dirs = fifo_dir
        for filename, digest in manager_executables_to_get.iteritems():
            sandbox_mgr.create_file_from_storage(filename,
                                                 digest,
                                                 executable=True)
        for filename, digest in manager_files_to_get.iteritems():
            sandbox_mgr.create_file_from_storage(filename, digest)
        manager = evaluation_step_before_run(sandbox_mgr,
                                             manager_command,
                                             num_processes * job.time_limit,
                                             0,
                                             allow_dirs=manager_allow_dirs,
                                             writable_files=["output.txt"],
                                             stdin_redirect="input.txt")

        # Second step: we start the user submission compiled with the
        # stub.
        language = get_language(job.language)
        executable_filename = job.executables.keys()[0]
        executables_to_get = {
            executable_filename: job.executables[executable_filename].digest
        }
        processes = [None for i in indices]
        for i in indices:
            args = [fifo_out[i], fifo_in[i]]
            if num_processes != 1:
                args.append(str(i))
            commands = language.get_evaluation_commands(executable_filename,
                                                        main="stub",
                                                        args=args)
            user_allow_dirs = [fifo_dir[i]]
            for filename, digest in executables_to_get.iteritems():
                sandbox_user[i].create_file_from_storage(filename,
                                                         digest,
                                                         executable=True)
            # Assumes that the actual execution of the user solution
            # is the last command in commands, and that the previous
            # are "setup" that doesn't need tight control.
            if len(commands) > 1:
                evaluation_step(sandbox_user[i], commands[:-1], 10, 256)
            processes[i] = evaluation_step_before_run(
                sandbox_user[i],
                commands[-1],
                job.time_limit,
                job.memory_limit,
                allow_dirs=user_allow_dirs)

        # Consume output.
        wait_without_std(processes + [manager])
        # TODO: check exit codes with translate_box_exitcode.

        user_results = [evaluation_step_after_run(s) for s in sandbox_user]
        success_user = all(r[0] for r in user_results)
        plus_user = reduce(merge_evaluation_results,
                           [r[1] for r in user_results])
        success_mgr, unused_plus_mgr = \
            evaluation_step_after_run(sandbox_mgr)

        if plus_user['exit_status'] == Sandbox.EXIT_OK and \
                plus_user["execution_time"] >= job.time_limit:
            plus_user['exit_status'] = Sandbox.EXIT_TIMEOUT

        # Merge results.
        job.sandboxes = [s.path for s in sandbox_user] + [sandbox_mgr.path]
        job.plus = plus_user

        # If at least one evaluation had problems, we report the
        # problems.
        if not success_user or not success_mgr:
            success, outcome, text = False, None, None
        # If the user sandbox detected some problem (timeout, ...),
        # the outcome is 0.0 and the text describes that problem.
        elif not is_evaluation_passed(plus_user):
            success = True
            outcome, text = 0.0, human_evaluation_message(plus_user)
        # Otherwise, we use the manager to obtain the outcome.
        else:
            success = True
            outcome, text = extract_outcome_and_text(sandbox_mgr)

        # If asked so, save the output file, provided that it exists
        if job.get_output:
            if sandbox_mgr.file_exists("output.txt"):
                job.user_output = sandbox_mgr.get_file_to_storage(
                    "output.txt", "Output file in job %s" % job.info)
            else:
                job.user_output = None

        # Whatever happened, we conclude.
        job.success = success
        job.outcome = "%s" % outcome if outcome is not None else None
        job.text = text

        delete_sandbox(sandbox_mgr, job.success)
        for s in sandbox_user:
            delete_sandbox(s, job.success)
        if not config.keep_sandbox:
            for d in fifo_dir:
                rmtree(d)
    def evaluate_testcase(self, test_number):
        """See TaskType.evaluate_testcase."""
        # Create sandboxes and FIFOs
        sandbox_mgr = create_sandbox(self)
        sandbox_user = create_sandbox(self)
        fifo_dir = tempfile.mkdtemp(dir=config.temp_dir)
        fifo_in = os.path.join(fifo_dir, "in")
        fifo_out = os.path.join(fifo_dir, "out")
        os.mkfifo(fifo_in)
        os.mkfifo(fifo_out)

        # First step: we start the manager.
        manager_filename = "manager"
        manager_command = ["./%s" % manager_filename, fifo_in, fifo_out]
        manager_executables_to_get = {
            manager_filename: self.job.managers[manager_filename].digest
        }
        manager_files_to_get = {
            "input.txt": self.job.testcases[test_number].input
        }
        manager_allow_path = ["input.txt", "output.txt", fifo_in, fifo_out]
        for filename, digest in manager_executables_to_get.iteritems():
            sandbox_mgr.create_file_from_storage(filename,
                                                 digest,
                                                 executable=True)
        for filename, digest in manager_files_to_get.iteritems():
            sandbox_mgr.create_file_from_storage(filename, digest)
        manager = evaluation_step_before_run(sandbox_mgr,
                                             manager_command,
                                             self.job.time_limit,
                                             0,
                                             manager_allow_path,
                                             stdin_redirect="input.txt")

        # Second step: we start the user submission compiled with the
        # stub.
        executable_filename = self.job.executables.keys()[0]
        command = ["./%s" % executable_filename, fifo_out, fifo_in]
        executables_to_get = {
            executable_filename:
            self.job.executables[executable_filename].digest
        }
        allow_path = [fifo_in, fifo_out]
        for filename, digest in executables_to_get.iteritems():
            sandbox_user.create_file_from_storage(filename,
                                                  digest,
                                                  executable=True)
        process = evaluation_step_before_run(sandbox_user, command,
                                             self.job.time_limit,
                                             self.job.memory_limit, allow_path)

        # Consume output.
        wait_without_std([process, manager])
        # TODO: check exit codes with translate_box_exitcode.

        success_user, plus_user = \
            evaluation_step_after_run(sandbox_user)
        success_mgr, plus_mgr = \
            evaluation_step_after_run(sandbox_mgr)

        self.job.evaluations[test_number] = \
            {'sandboxes': [sandbox_user.path,
                           sandbox_mgr.path],
             'plus': plus_user}
        evaluation = self.job.evaluations[test_number]

        # If at least one evaluation had problems, we report the
        # problems. (TODO: shouldn't outcome and text better be None
        # and None?)
        if not success_user or not success_mgr:
            success, outcome, text = False, None, None
        # If outcome_user is not None, it is 0.0 and it means that
        # there has been some errors in the user solution, and outcome
        # and text are meaningful, so we use them.
        elif not is_evaluation_passed(plus_user):
            success = True
            outcome, text = 0.0, human_evaluation_message(plus_user)
        # Otherwise, we use the manager to obtain the outcome.
        else:
            success = True
            outcome, text = extract_outcome_and_text(sandbox_mgr)

        # If asked so, save the output file, provided that it exists
        if self.job.get_output:
            if sandbox_mgr.file_exists("output.txt"):
                evaluation['output'] = sandbox_mgr.get_file_to_storage(
                    "output.txt", "Output file for testcase %d in job %s" %
                    (test_number, self.job.info))
            else:
                evaluation['output'] = None

        # Whatever happened, we conclude.
        evaluation['success'] = success
        evaluation['outcome'] = str(outcome) if outcome is not None else None
        evaluation['text'] = text
        delete_sandbox(sandbox_mgr)
        delete_sandbox(sandbox_user)
        return success
示例#47
0
    def evaluate_testcase(self, test_number):
        """See TaskType.evaluate_testcase."""
        sandbox = create_sandbox(self)
        self.job.sandboxes.append(sandbox.path)

        # Immediately prepare the skeleton to return
        self.job.evaluations[test_number] = {
            'sandboxes': [sandbox.path],
            'plus': {}
        }
        evaluation = self.job.evaluations[test_number]
        outcome = None
        text = None

        # Since we allow partial submission, if the file is not
        # present we report that the outcome is 0.
        if "output_%03d.txt" % test_number not in self.job.files:
            evaluation['success'] = True
            evaluation['outcome'] = 0.0
            evaluation['text'] = "File not submitted."
            return True

        # First and only one step: diffing (manual or with manager).
        output_digest = self.job.files["output_%03d.txt" % test_number].digest

        # Put the files into the sandbox
        sandbox.create_file_from_storage(
            "res.txt", self.job.testcases[test_number].output)
        sandbox.create_file_from_storage("output.txt", output_digest)

        # TODO: this should check self.parameters, not managers.
        if len(self.job.managers) == 0:
            # No manager: I'll do a white_diff between the submission
            # file and the correct output res.txt.
            success = True
            outcome, text = white_diff_step(sandbox, "output.txt", "res.txt")

        else:
            # Manager present: wonderful, he'll do all the job.
            manager_filename = self.job.managers.keys()[0]
            sandbox.create_file_from_storage(
                manager_filename,
                self.job.managers[manager_filename].digest,
                executable=True)
            input_digest = self.job.testcases[test_number].input
            sandbox.create_file_from_storage("input.txt", input_digest)
            success, _ = evaluation_step(sandbox, [
                "./%s" % manager_filename, "input.txt", "res.txt", "output.txt"
            ],
                                         allow_path=[
                                             "input.txt", "output.txt",
                                             "res.txt"
                                         ])
            if success:
                outcome, text = extract_outcome_and_text(sandbox)

        # Whatever happened, we conclude.
        evaluation['success'] = success
        evaluation['outcome'] = str(outcome) if outcome is not None else None
        evaluation['text'] = text
        delete_sandbox(sandbox)
        return success
示例#48
0
    def compile(self):
        """See TaskType.compile."""
        # Detect the submission's language. The checks about the
        # formal correctedness of the submission are done in CWS,
        # before accepting it.
        language = self.job.language

        # TODO: here we are sure that submission.files are the same as
        # task.submission_format. The following check shouldn't be
        # here, but in the definition of the task, since this actually
        # checks that task's task type and submission format agree.
        if len(self.job.files) != 1:
            self.job.success = True
            self.job.compilation_success = False
            self.job.text = "Invalid files in submission"
            logger.warning("Submission contains %d files, expecting 1" %
                           len(self.job.files))
            return True

        # Create the sandbox
        sandbox = create_sandbox(self)
        self.job.sandboxes.append(sandbox.path)

        # Prepare the source files in the sandbox
        files_to_get = {}
        format_filename = self.job.files.keys()[0]
        source_filenames = []
        source_filenames.append(format_filename.replace("%l", language))
        files_to_get[source_filenames[0]] = \
            self.job.files[format_filename].digest
        # If a grader is specified, we add to the command line (and to
        # the files to get) the corresponding manager. The grader must
        # be the first file in source_filenames.
        if self.job.task_type_parameters[0] == "grader":
            source_filenames.insert(0, "grader.%s" % language)
            files_to_get["grader.%s" % language] = \
                self.job.managers["grader.%s" % language].digest

        # Also copy all *.h and *lib.pas graders
        for filename in self.job.managers.iterkeys():
            if filename.endswith('.h') or \
                    filename.endswith('lib.pas'):
                files_to_get[filename] = \
                    self.job.managers[filename].digest

        for filename, digest in files_to_get.iteritems():
            sandbox.create_file_from_storage(filename, digest)

        # Prepare the compilation command
        executable_filename = format_filename.replace(".%l", "")
        command = get_compilation_command(language,
                                          source_filenames,
                                          executable_filename)

        # Run the compilation
        operation_success, compilation_success, text, plus = \
            compilation_step(sandbox, command)

        # Retrieve the compiled executables
        self.job.success = operation_success
        self.job.compilation_success = compilation_success
        self.job.plus = plus
        self.job.text = text
        if operation_success and compilation_success:
            digest = sandbox.get_file_to_storage(
                executable_filename,
                "Executable %s for %s" %
                (executable_filename, self.job.info))
            self.job.executables[executable_filename] = \
                Executable(digest, executable_filename)

        # Cleanup
        delete_sandbox(sandbox)
示例#49
0
    def evaluate(self, job, file_cacher):
        """See TaskType.evaluate."""
        sandbox = create_sandbox(file_cacher, job.multithreaded_sandbox)
        job.sandboxes.append(sandbox.path)

        # Immediately prepare the skeleton to return
        job.sandboxes = [sandbox.path]
        job.plus = {}

        outcome = None
        text = None

        # Since we allow partial submission, if the file is not
        # present we report that the outcome is 0.
        if "output_%s.txt" % job.operation["testcase_codename"] \
                not in job.files:
            job.success = True
            job.outcome = "0.0"
            job.text = [N_("File not submitted")]
            return True

        # First and only one step: diffing (manual or with manager).
        output_digest = job.files["output_%s.txt" %
                                  job.operation["testcase_codename"]].digest

        # Put the files into the sandbox
        sandbox.create_file_from_storage("res.txt", job.output)
        sandbox.create_file_from_storage("output.txt", output_digest)

        if self.parameters[0] == "diff":
            # No manager: I'll do a white_diff between the submission
            # file and the correct output res.txt.
            success = True
            outcome, text = white_diff_step(sandbox, "output.txt", "res.txt")

        elif self.parameters[0] == "comparator":
            # Manager present: wonderful, it will do all the work.
            manager_filename = "checker"
            if manager_filename not in job.managers:
                logger.error(
                    "Configuration error: missing or "
                    "invalid comparator (it must be "
                    "named `checker')",
                    extra={"operation": job.info})
                success = False
            else:
                sandbox.create_file_from_storage(
                    manager_filename,
                    job.managers[manager_filename].digest,
                    executable=True)
                input_digest = job.input
                sandbox.create_file_from_storage("input.txt", input_digest)
                success, _ = evaluation_step(sandbox, [[
                    "./%s" % manager_filename, "input.txt", "res.txt",
                    "output.txt"
                ]])
                if success:
                    outcome, text = extract_outcome_and_text(sandbox)

        else:
            raise ValueError("Unrecognized first parameter "
                             "`%s' for OutputOnly tasktype. "
                             "Should be `diff' or `comparator'." %
                             self.parameters[0])

        # Whatever happened, we conclude.
        job.success = success
        job.outcome = "%s" % outcome if outcome is not None else None
        job.text = text

        delete_sandbox(sandbox, job.success)
示例#50
0
    def evaluate_testcase(self, test_number):
        """See TaskType.evaluate_testcase."""
        sandbox_mgr = create_sandbox(self)
        sandbox_user = create_sandbox(self)
        fifo_dir = tempfile.mkdtemp(dir=config.temp_dir)
        fifo_in = os.path.join(fifo_dir, "in")
        fifo_out = os.path.join(fifo_dir, "out")
        os.mkfifo(fifo_in)
        os.mkfifo(fifo_out)

        # First step: we start the manager.
        manager_filename = "manager"
        manager_command = ["./%s" % manager_filename, fifo_in, fifo_out]
        manager_executables_to_get = {
            manager_filename:
            self.submission.task.managers[manager_filename].digest
            }
        manager_files_to_get = {
            "input.txt": self.submission.task.testcases[test_number].input
            }
        manager_allow_path = ["input.txt", "output.txt", fifo_in, fifo_out]
        manager = self.evaluation_step_before_run(
            sandbox_mgr,
            manager_command,
            manager_executables_to_get,
            manager_files_to_get,
            self.submission.task.time_limit,
            0,
            manager_allow_path,
            stdin_redirect="input.txt")

        # Second step: we start the user submission compiled with the
        # stub.
        executable_filename = self.submission.executables.keys()[0]
        command = ["./%s" % executable_filename, fifo_out, fifo_in]
        executables_to_get = {
            executable_filename:
            self.submission.executables[executable_filename].digest
            }
        allow_path = [fifo_in, fifo_out]
        process = self.evaluation_step_before_run(
            sandbox_user,
            command,
            executables_to_get,
            {},
            self.submission.task.time_limit,
            self.submission.task.memory_limit,
            allow_path)

        # Consume output.
        wait_without_std([process, manager])
        # TODO: check exit codes with translate_box_exitcode.

        success_user, outcome_user, text_user, plus = \
                      self.evaluation_step_after_run(sandbox_user, final=False)
        success_mgr, outcome_mgr, text_mgr, _ = \
                     self.evaluation_step_after_run(sandbox_mgr, final=True)

        # If at least one evaluation had problems, we report the
        # problems.
        if not success_user or not success_mgr:
            success, outcome, text = False, outcome_user, text_user
        # If outcome_user is not None, it is 0.0 and it means that
        # there has been some errors in the user solution, and outcome
        # and text are meaningful, so we use them.
        elif outcome_user is not None:
            success, outcome, text = success_user, outcome_user, text_user
        # Otherwise, we use the manager to obtain the outcome.
        else:
            success, outcome, text = success_mgr, outcome_mgr, text_mgr

        delete_sandbox(sandbox_mgr)
        delete_sandbox(sandbox_user)
        return self.finish_evaluation_testcase(
            test_number, success, outcome, text, plus)
示例#51
0
文件: Batch.py 项目: romeorizzi/cms
    def evaluate(self, job, file_cacher):
        """See TaskType.evaluate."""
        if len(job.executables) != 1:
            raise ValueError("Unexpected number of executables (%s)" %
                             len(job.executables))

        # Create the sandbox
        sandbox = create_sandbox(file_cacher,
                                 multithreaded=job.multithreaded_sandbox,
                                 name="evaluate")

        # Prepare the execution
        executable_filename = next(iterkeys(job.executables))
        language = get_language(job.language)
        main = Batch.GRADER_BASENAME \
            if self._uses_grader() else executable_filename
        commands = language.get_evaluation_commands(executable_filename,
                                                    main=main)
        executables_to_get = {
            executable_filename: job.executables[executable_filename].digest
        }
        stdin_redirect = None
        stdout_redirect = None
        files_allowing_write = []
        if len(self.input_filename) == 0:
            self.input_filename = Batch.DEFAULT_INPUT_FILENAME
            stdin_redirect = self.input_filename
        if len(self.output_filename) == 0:
            self.output_filename = Batch.DEFAULT_OUTPUT_FILENAME
            stdout_redirect = self.output_filename
        else:
            files_allowing_write.append(self.output_filename)
        files_to_get = {self.input_filename: job.input}

        # Put the required files into the sandbox
        for filename, digest in iteritems(executables_to_get):
            sandbox.create_file_from_storage(filename, digest, executable=True)
        for filename, digest in iteritems(files_to_get):
            sandbox.create_file_from_storage(filename, digest)

        # Actually performs the execution
        success, plus = evaluation_step(sandbox,
                                        commands,
                                        job.time_limit,
                                        job.memory_limit,
                                        writable_files=files_allowing_write,
                                        stdin_redirect=stdin_redirect,
                                        stdout_redirect=stdout_redirect)

        job.sandboxes = [sandbox.path]
        job.plus = plus

        outcome = None
        text = []

        # Error in the sandbox: nothing to do!
        if not success:
            pass

        # Contestant's error: the marks won't be good
        elif not is_evaluation_passed(plus):
            outcome = 0.0
            text = human_evaluation_message(plus)
            if job.get_output:
                job.user_output = None

        # Otherwise, advance to checking the solution
        else:

            # Check that the output file was created
            if not sandbox.file_exists(self.output_filename):
                outcome = 0.0
                text = [
                    N_("Evaluation didn't produce file %s"),
                    self.output_filename
                ]
                if job.get_output:
                    job.user_output = None

            else:
                # If asked so, put the output file into the storage.
                if job.get_output:
                    job.user_output = sandbox.get_file_to_storage(
                        self.output_filename,
                        "Output file in job %s" % job.info,
                        trunc_len=100 * 1024)

                # If just asked to execute, fill text and set dummy outcome.
                if job.only_execution:
                    outcome = 0.0
                    text = [N_("Execution completed successfully")]

                # Otherwise evaluate the output file.
                else:

                    # Create a brand-new sandbox just for checking. Only admin
                    # code runs in it, so we allow multithreading and many
                    # processes (still with a limit to avoid fork-bombs).
                    checkbox = create_sandbox(file_cacher,
                                              multithreaded=True,
                                              name="check")
                    checkbox.max_processes = 1000

                    checker_success, outcome, text = self._eval_output(
                        checkbox, job, sandbox.get_root_path())
                    success = success and checker_success

        # Whatever happened, we conclude.
        job.success = success
        job.outcome = "%s" % outcome if outcome is not None else None
        job.text = text

        delete_sandbox(sandbox, job.success)
示例#52
0
文件: Batch.py 项目: laskarcyber/cms
    def evaluate(self, job, file_cacher):
        """See TaskType.evaluate."""
        # Create the sandbox
        sandbox = create_sandbox(file_cacher)

        # Prepare the execution
        executable_filename = job.executables.keys()[0]
        command = [os.path.join(".", executable_filename)]
        executables_to_get = {
            executable_filename:
            job.executables[executable_filename].digest
            }
        input_filename, output_filename = self.parameters[1]
        stdin_redirect = None
        stdout_redirect = None
        if input_filename == "":
            input_filename = "input.txt"
            stdin_redirect = input_filename
        if output_filename == "":
            output_filename = "output.txt"
            stdout_redirect = output_filename
        files_to_get = {
            input_filename: job.input
            }

        # Put the required files into the sandbox
        for filename, digest in executables_to_get.iteritems():
            sandbox.create_file_from_storage(filename, digest, executable=True)
        for filename, digest in files_to_get.iteritems():
            sandbox.create_file_from_storage(filename, digest)

        # Actually performs the execution
        success, plus = evaluation_step(
            sandbox,
            command,
            job.time_limit,
            job.memory_limit,
            stdin_redirect=stdin_redirect,
            stdout_redirect=stdout_redirect)

        job.sandboxes = [sandbox.path]
        job.plus = plus

        outcome = None
        text = None

        # Error in the sandbox: nothing to do!
        if not success:
            pass

        # Contestant's error: the marks won't be good
        elif not is_evaluation_passed(plus):
            outcome = 0.0
            text = human_evaluation_message(plus)
            if job.get_output:
                job.user_output = None

        # Otherwise, advance to checking the solution
        else:

            # Check that the output file was created
            if not sandbox.file_exists(output_filename):
                outcome = 0.0
                text = [N_("Evaluation didn't produce file %s"),
                        output_filename]
                if job.get_output:
                    job.user_output = None

            else:
                # If asked so, put the output file into the storage
                if job.get_output:
                    job.user_output = sandbox.get_file_to_storage(
                        output_filename,
                        "Output file in job %s" % job.info,
                        trunc_len=100 * 1024)

                # If not asked otherwise, evaluate the output file
                if not job.only_execution:

                    # Put the reference solution into the sandbox
                    sandbox.create_file_from_storage(
                        "res.txt",
                        job.output)

                    # Check the solution with white_diff
                    if self.parameters[2] == "diff":
                        outcome, text = white_diff_step(
                            sandbox, output_filename, "res.txt")

                    # Check the solution with a comparator
                    elif self.parameters[2] == "comparator":
                        manager_filename = "checker"

                        if not manager_filename in job.managers:
                            logger.error("Configuration error: missing or "
                                         "invalid comparator (it must be "
                                         "named 'checker')",
                                         extra={"operation": job.info})
                            success = False

                        else:
                            sandbox.create_file_from_storage(
                                manager_filename,
                                job.managers[manager_filename].digest,
                                executable=True)
                            success, _ = evaluation_step(
                                sandbox,
                                ["./%s" % manager_filename,
                                 input_filename, "res.txt", output_filename])
                        if success:
                            try:
                                outcome, text = \
                                    extract_outcome_and_text(sandbox)
                            except ValueError, e:
                                logger.error("Invalid output from "
                                             "comparator: %s" % (e.message,),
                                             extra={"operation": job.info})
                                success = False

                    else:
                        raise ValueError("Unrecognized third parameter"
                                         " `%s' for Batch tasktype." %
                                         self.parameters[2])
    def compile(self):
        """See TaskType.compile."""
        # Detect the submission's language. The checks about the
        # formal correctedness of the submission are done in CWS,
        # before accepting it.
        language = self.job.language

        # TODO: here we are sure that submission.files are the same as
        # task.submission_format. The following check shouldn't be
        # here, but in the definition of the task, since this actually
        # checks that task's task type and submission format agree.
        if len(self.job.files) != 1:
            self.job.success = True
            self.job.compilation_success = False
            self.job.text = "Invalid files in submission"
            logger.warning("Submission contains %d files, expecting 1" %
                           len(self.job.files))
            return True

        # Create the sandbox
        sandbox = create_sandbox(self)
        self.job.sandboxes.append(sandbox.path)

        # Prepare the source files in the sandbox
        files_to_get = {}
        format_filename = self.job.files.keys()[0]
        source_filenames = []
        # Stub.
        source_filenames.append("stub.%s" % language)
        files_to_get[source_filenames[1]] = \
                self.job.managers["stub.%s" % language].digest
        # User's submission.
        source_filenames.append(format_filename.replace("%l", language))
        files_to_get[source_filenames[0]] = \
            self.job.files[format_filename].digest
        for filename, digest in files_to_get.iteritems():
            sandbox.create_file_from_storage(filename, digest)

        # Prepare the compilation command
        executable_filename = format_filename.replace(".%l", "")
        command = get_compilation_command(language, source_filenames,
                                          executable_filename)

        # Run the compilation
        operation_success, compilation_success, text, plus = \
            compilation_step(sandbox, command)

        # Retrieve the compiled executables
        self.job.success = operation_success
        self.job.compilation_success = compilation_success
        self.job.plus = plus
        self.job.text = text
        if operation_success and compilation_success:
            digest = sandbox.get_file_to_storage(
                executable_filename,
                "Executable %s for %s" % (executable_filename, self.job.info))
            self.job.executables[executable_filename] = \
                Executable(digest, executable_filename)

        # Cleanup
        delete_sandbox(sandbox)
示例#54
0
    def evaluate(self, job, file_cacher):
        """See TaskType.evaluate."""
        if not check_executables_number(job, 1):
            return
        executable_filename = next(iterkeys(job.executables))
        executable_digest = job.executables[executable_filename].digest

        # Make sure the required manager is among the job managers.
        if not check_manager_present(job, Communication.MANAGER_FILENAME):
            return
        manager_digest = job.managers[Communication.MANAGER_FILENAME].digest

        # Indices for the objects related to each user process.
        indices = range(self.num_processes)

        # Create FIFOs.
        fifo_dir = [tempfile.mkdtemp(dir=config.temp_dir) for i in indices]
        fifo_in = [os.path.join(fifo_dir[i], "in%d" % i) for i in indices]
        fifo_out = [os.path.join(fifo_dir[i], "out%d" % i) for i in indices]
        for i in indices:
            os.mkfifo(fifo_in[i])
            os.mkfifo(fifo_out[i])
            os.chmod(fifo_dir[i], 0o755)
            os.chmod(fifo_in[i], 0o666)
            os.chmod(fifo_out[i], 0o666)

        # Create the manager sandbox and copy manager and input.
        sandbox_mgr = create_sandbox(file_cacher, name="manager_evaluate")
        job.sandboxes.append(sandbox_mgr.path)
        sandbox_mgr.create_file_from_storage(Communication.MANAGER_FILENAME,
                                             manager_digest,
                                             executable=True)
        sandbox_mgr.create_file_from_storage(Communication.INPUT_FILENAME,
                                             job.input)

        # Create the user sandbox(es) and copy the executable.
        sandbox_user = [
            create_sandbox(file_cacher, name="user_evaluate") for i in indices
        ]
        job.sandboxes.extend(s.path for s in sandbox_user)
        for i in indices:
            sandbox_user[i].create_file_from_storage(executable_filename,
                                                     executable_digest,
                                                     executable=True)
            if Communication.STUB_PRELOAD_FILENAME in job.managers:
                digest = job.managers[
                    Communication.STUB_PRELOAD_FILENAME].digest
                sandbox_user[i].create_file_from_storage(
                    Communication.STUB_PRELOAD_FILENAME,
                    digest,
                    executable=True)

        # Start the manager. Redirecting to stdin is unnecessary, but for
        # historical reasons the manager can choose to read from there
        # instead than from INPUT_FILENAME.
        manager_command = ["./%s" % Communication.MANAGER_FILENAME]
        for i in indices:
            manager_command += [fifo_in[i], fifo_out[i]]
        # We could use trusted_step for the manager, since it's fully
        # admin-controlled. But trusted_step is only synchronous at the moment.
        # Thus we use evaluation_step, and we set a time limit generous enough
        # to prevent user programs from sending the manager in timeout.
        # This means that:
        # - the manager wall clock timeout must be greater than the sum of all
        #     wall clock timeouts of the user programs;
        # - with the assumption that the work the manager performs is not
        #     greater than the work performed by the user programs, the manager
        #     user timeout must be greater than the maximum allowed total time
        #     of the user programs; in theory, this is the task's time limit,
        #     but in practice is num_processes times that because the
        #     constraint on the total time can only be enforced after all user
        #     programs terminated.
        manager_time_limit = max(self.num_processes * (job.time_limit + 1.0),
                                 config.trusted_sandbox_max_time_s)
        manager = evaluation_step_before_run(
            sandbox_mgr,
            manager_command,
            manager_time_limit,
            config.trusted_sandbox_max_memory_kib // 1024,
            allow_dirs=fifo_dir,
            writable_files=[Communication.OUTPUT_FILENAME],
            stdin_redirect=Communication.INPUT_FILENAME,
            multiprocess=job.multithreaded_sandbox)

        # Start the user submissions compiled with the stub.
        language = get_language(job.language)
        processes = [None for i in indices]
        for i in indices:
            args = [fifo_out[i], fifo_in[i]]
            if self.num_processes != 1:
                args.append(str(i))
            commands = language.get_evaluation_commands(executable_filename,
                                                        main="stub",
                                                        args=args)
            # Assumes that the actual execution of the user solution is the
            # last command in commands, and that the previous are "setup"
            # that don't need tight control.
            if len(commands) > 1:
                trusted_step(sandbox_user[i], commands[:-1])

            last_cmd = commands[-1]

            # Inject preload program if needed
            if Communication.STUB_PRELOAD_FILENAME in job.managers:
                last_cmd = [
                    "./%s" % Communication.STUB_PRELOAD_FILENAME, fifo_out[i],
                    fifo_in[i]
                ] + commands[-1]

            processes[i] = evaluation_step_before_run(
                sandbox_user[i],
                last_cmd,
                job.time_limit,
                job.memory_limit,
                allow_dirs=[fifo_dir[i]],
                multiprocess=job.multithreaded_sandbox)

        # Wait for the processes to conclude, without blocking them on I/O.
        wait_without_std(processes + [manager])

        # Get the results of the manager sandbox.
        box_success_mgr, evaluation_success_mgr, unused_stats_mgr = \
            evaluation_step_after_run(sandbox_mgr)

        # Coalesce the results of the user sandboxes.
        user_results = [evaluation_step_after_run(s) for s in sandbox_user]
        box_success_user = all(r[0] for r in user_results)
        evaluation_success_user = all(r[1] for r in user_results)
        stats_user = reduce(merge_execution_stats,
                            [r[2] for r in user_results])
        # The actual running time is the sum of every user process, but each
        # sandbox can only check its own; if the sum is greater than the time
        # limit we adjust the result.
        if box_success_user and evaluation_success_user and \
                stats_user["execution_time"] >= job.time_limit:
            evaluation_success_user = False
            stats_user['exit_status'] = Sandbox.EXIT_TIMEOUT

        success = box_success_user \
            and box_success_mgr and evaluation_success_mgr
        outcome = None
        text = None

        # If at least one sandbox had problems, or the manager did not
        # terminate correctly, we report an error (and no need for user stats).
        if not success:
            stats_user = None
            pass

        # If just asked to execute, fill text and set dummy outcome.
        elif job.only_execution:
            outcome = 0.0
            text = [N_("Execution completed successfully")]

        # If the user sandbox detected some problem (timeout, ...),
        # the outcome is 0.0 and the text describes that problem.
        elif not evaluation_success_user:
            outcome = 0.0
            text = human_evaluation_message(stats_user, job.feedback_level)

        # Otherwise, we use the manager to obtain the outcome.
        else:
            outcome, text = extract_outcome_and_text(sandbox_mgr)

        # If asked so, save the output file with additional information,
        # provided that it exists.
        if job.get_output:
            if sandbox_mgr.file_exists(Communication.OUTPUT_FILENAME):
                job.user_output = sandbox_mgr.get_file_to_storage(
                    Communication.OUTPUT_FILENAME,
                    "Output file in job %s" % job.info,
                    trunc_len=100 * 1024)
            else:
                job.user_output = None

        # Fill in the job with the results.
        job.success = success
        job.outcome = "%s" % outcome if outcome is not None else None
        job.text = text
        job.plus = stats_user

        delete_sandbox(sandbox_mgr, job.success)
        for s in sandbox_user:
            delete_sandbox(s, job.success)
        if not config.keep_sandbox:
            for d in fifo_dir:
                rmtree(d)
示例#55
0
    def evaluate_testcase(self, test_number):
        """See TaskType.evaluate_testcase."""
        # Create the sandbox
        sandbox = create_sandbox(self)

        # Prepare the execution
        executable_filename = self.job.executables.keys()[0]
        command = [sandbox.relative_path(executable_filename)]
        executables_to_get = {
            executable_filename:
            self.job.executables[executable_filename].digest
            }
        input_filename, output_filename = self.job.task_type_parameters[1]
        stdin_redirect = None
        stdout_redirect = None
        if input_filename == "":
            input_filename = "input.txt"
            stdin_redirect = input_filename
        if output_filename == "":
            output_filename = "output.txt"
            stdout_redirect = output_filename
        files_to_get = {
            input_filename: self.job.testcases[test_number].input
            }
        allow_path = [input_filename, output_filename]

        # Put the required files into the sandbox
        for filename, digest in executables_to_get.iteritems():
            sandbox.create_file_from_storage(filename, digest, executable=True)
        for filename, digest in files_to_get.iteritems():
            sandbox.create_file_from_storage(filename, digest)

        # Actually performs the execution
        success, plus = evaluation_step(
            sandbox,
            command,
            self.job.time_limit,
            self.job.memory_limit,
            allow_path,
            stdin_redirect=stdin_redirect,
            stdout_redirect=stdout_redirect)

        self.job.evaluations[test_number] = {'sandboxes': [sandbox.path],
                                             'plus': plus}
        outcome = None
        text = None
        evaluation = self.job.evaluations[test_number]

        # Error in the sandbox: nothing to do!
        if not success:
            pass

        # Contestant's error: the marks won't be good
        elif not is_evaluation_passed(plus):
            outcome = 0.0
            text = human_evaluation_message(plus)
            if self.job.get_output:
                evaluation['output'] = None

        # Otherwise, advance to checking the solution
        else:

            # Check that the output file was created
            if not sandbox.file_exists(output_filename):
                outcome = 0.0
                text = "Execution didn't produce file %s" % \
                    (output_filename)
                if self.job.get_output:
                    evaluation['output'] = None

            else:
                # If asked so, put the output file into the storage
                if self.job.get_output:
                    evaluation['output'] = sandbox.get_file_to_storage(
                        output_filename,
                        "Output file for testcase %d in job %s" %
                        (test_number, self.job.info),
                        trunc_len=100 * 1024)

                # If not asked otherwise, evaluate the output file
                if not self.job.only_execution:

                    # Put the reference solution into the sandbox
                    sandbox.create_file_from_storage(
                        "res.txt",
                        self.job.testcases[test_number].output)

                    # Check the solution with white_diff
                    if self.job.task_type_parameters[2] == "diff":
                        outcome, text = white_diff_step(
                            sandbox, output_filename, "res.txt")

                    # Check the solution with a comparator
                    elif self.job.task_type_parameters[2] == "comparator":
                        manager_filename = "checker"

                        if not manager_filename in self.job.managers:
                            logger.error("Configuration error: missing or "
                                         "invalid comparator (it must be "
                                         "named 'checker')")
                            success = False

                        else:
                            sandbox.create_file_from_storage(
                                manager_filename,
                                self.job.managers[manager_filename].digest,
                                executable=True)
                            success, _ = evaluation_step(
                                sandbox,
                                ["./%s" % manager_filename,
                                 input_filename, "res.txt", output_filename],
                                allow_path=[input_filename,
                                            "res.txt",
                                            output_filename])
                        if success:
                            try:
                                outcome, text = \
                                    extract_outcome_and_text(sandbox)
                            except ValueError, e:
                                logger.error("Invalid output from "
                                             "comparator: %s" % (e.message,))
                                success = False

                    # Unknown evaluationg parameter!
                    else:
                        raise ValueError("Unrecognized third parameter"
                                         " `%s' for Batch tasktype." %
                                         self.job.task_type_parameters[2])
示例#56
0
文件: TwoSteps.py 项目: PetarV-/cms
    def evaluate(self, job, file_cacher):
        """See TaskType.evaluate."""
        # f stand for first, s for second.
        first_sandbox = create_sandbox(file_cacher)
        second_sandbox = create_sandbox(file_cacher)
        fifo_dir = tempfile.mkdtemp(dir=config.temp_dir)
        fifo = os.path.join(fifo_dir, "fifo")
        os.mkfifo(fifo)
        os.chmod(fifo_dir, 0o755)
        os.chmod(fifo, 0o666)

        # First step: we start the first manager.
        first_filename = "manager"
        first_command = ["./%s" % first_filename, "0", fifo]
        first_executables_to_get = {
            first_filename:
            job.executables[first_filename].digest
            }
        first_files_to_get = {
            "input.txt": job.input
            }
        first_allow_path = [fifo_dir]

        # Put the required files into the sandbox
        for filename, digest in first_executables_to_get.iteritems():
            first_sandbox.create_file_from_storage(filename,
                                                   digest,
                                                   executable=True)
        for filename, digest in first_files_to_get.iteritems():
            first_sandbox.create_file_from_storage(filename, digest)

        first = evaluation_step_before_run(
            first_sandbox,
            first_command,
            job.time_limit,
            job.memory_limit,
            first_allow_path,
            stdin_redirect="input.txt",
            wait=False)

        # Second step: we start the second manager.
        second_filename = "manager"
        second_command = ["./%s" % second_filename, "1", fifo]
        second_executables_to_get = {
            second_filename:
            job.executables[second_filename].digest
            }
        second_files_to_get = {}
        second_allow_path = [fifo_dir]

        # Put the required files into the second sandbox
        for filename, digest in second_executables_to_get.iteritems():
            second_sandbox.create_file_from_storage(filename,
                                                    digest,
                                                    executable=True)
        for filename, digest in second_files_to_get.iteritems():
            second_sandbox.create_file_from_storage(filename, digest)

        second = evaluation_step_before_run(
            second_sandbox,
            second_command,
            job.time_limit,
            job.memory_limit,
            second_allow_path,
            stdout_redirect="output.txt",
            wait=False)

        # Consume output.
        wait_without_std([second, first])
        # TODO: check exit codes with translate_box_exitcode.

        success_first, first_plus = \
            evaluation_step_after_run(first_sandbox)
        success_second, second_plus = \
            evaluation_step_after_run(second_sandbox)

        job.sandboxes = [first_sandbox.path,
                         second_sandbox.path]
        job.plus = second_plus

        success = True
        outcome = None
        text = None

        # Error in the sandbox: report failure!
        if not success_first or not success_second:
            success = False

        # Contestant's error: the marks won't be good
        elif not is_evaluation_passed(first_plus) or \
                not is_evaluation_passed(second_plus):
            outcome = 0.0
            if not is_evaluation_passed(first_plus):
                text = human_evaluation_message(first_plus)
            else:
                text = human_evaluation_message(second_plus)
            if job.get_output:
                job.user_output = None

        # Otherwise, advance to checking the solution
        else:

            # Check that the output file was created
            if not second_sandbox.file_exists('output.txt'):
                outcome = 0.0
                text = [N_("Evaluation didn't produce file %s"), "output.txt"]
                if job.get_output:
                    job.user_output = None

            else:
                # If asked so, put the output file into the storage
                if job.get_output:
                    job.user_output = second_sandbox.get_file_to_storage(
                        "output.txt",
                        "Output file in job %s" % job.info)

                # If not asked otherwise, evaluate the output file
                if not job.only_execution:
                    # Put the reference solution into the sandbox
                    second_sandbox.create_file_from_storage(
                        "res.txt",
                        job.output)

                    # If a checker is not provided, use white-diff
                    if self.parameters[0] == "diff":
                        outcome, text = white_diff_step(
                            second_sandbox, "output.txt", "res.txt")

                    elif self.parameters[0] == "comparator":
                        if TwoSteps.CHECKER_FILENAME not in job.managers:
                            logger.error("Configuration error: missing or "
                                         "invalid comparator (it must be "
                                         "named `checker')",
                                         extra={"operation": job.info})
                            success = False
                        else:
                            second_sandbox.create_file_from_storage(
                                TwoSteps.CHECKER_FILENAME,
                                job.managers[TwoSteps.CHECKER_FILENAME].digest,
                                executable=True)
                            # Rewrite input file, as in Batch.py
                            try:
                                second_sandbox.remove_file("input.txt")
                            except OSError as e:
                                assert not second_sandbox.file_exists(
                                    "input.txt")
                            second_sandbox.create_file_from_storage(
                                "input.txt",
                                job.input)
                            success, _ = evaluation_step(
                                second_sandbox,
                                [["./%s" % TwoSteps.CHECKER_FILENAME,
                                  "input.txt", "res.txt", "output.txt"]])
                            if success:
                                try:
                                    outcome, text = extract_outcome_and_text(
                                        second_sandbox)
                                except ValueError, e:
                                    logger.error("Invalid output from "
                                                 "comparator: %s", e.message,
                                                 extra={"operation": job.info})
                                    success = False
                    else:
                        raise ValueError("Uncrecognized first parameter"
                                         " `%s' for TwoSteps tasktype." %
                                         self.parameters[0])