Пример #1
0
    def _run_checker(self, sandbox, job):
        """Run the explicit checker given by the admins

        sandbox (Sandbox): the sandbox to run the checker in; should already
            contain input, correct output, and user output.
        job (Job): the job triggering this checker run.

        return (bool, float|None, [str]): success (true if the checker was able
            to check the solution successfully), outcome and text.

        """
        # Copy the checker in the sandbox, after making sure it was provided.
        if Batch.CHECKER_FILENAME not in job.managers:
            logger.error(
                "Configuration error: missing or invalid comparator "
                "(it must be named '%s')",
                Batch.CHECKER_FILENAME,
                extra={"operation": job.info})
            return False, None, []
        sandbox.create_file_from_storage(
            Batch.CHECKER_FILENAME,
            job.managers[Batch.CHECKER_FILENAME].digest,
            executable=True)

        command = [
            "./%s" % Batch.CHECKER_FILENAME, self.input_filename,
            Batch.CORRECT_OUTPUT_FILENAME, self.output_filename
        ]
        success, _ = evaluation_step(sandbox, [command])
        if not success:
            return False, None, []

        try:
            outcome, text = extract_outcome_and_text(sandbox)
        except ValueError as e:
            logger.error("Invalid output from comparator: %s",
                         e,
                         extra={"operation": job.info})
            return False, None, []

        return True, outcome, text
Пример #2
0
    def _run_checker(sandbox, job):
        """Run the explicit checker given by the admins

        sandbox (Sandbox): the sandbox to run the checker in; should already
            contain input, correct output, and user output.
        job (Job): the job triggering this checker run.

        return (bool, float|None, [str]): success (true if the checker was able
            to check the solution successfully), outcome and text.

        """
        # Copy the checker in the sandbox, after making sure it was provided.
        if OutputOnly.CHECKER_FILENAME not in job.managers:
            logger.error("Configuration error: missing or invalid comparator "
                         "(it must be named `%s')",
                         OutputOnly.CHECKER_FILENAME,
                         extra={"operation": job.info})
            return False, None, []
        sandbox.create_file_from_storage(
            OutputOnly.CHECKER_FILENAME,
            job.managers[OutputOnly.CHECKER_FILENAME].digest,
            executable=True)

        command = [
            "./%s" % OutputOnly.CHECKER_FILENAME,
            OutputOnly.INPUT_FILENAME,
            OutputOnly.CORRECT_OUTPUT_FILENAME,
            OutputOnly.OUTPUT_FILENAME]
        success, _ = evaluation_step(sandbox, [command])
        if not success:
            return False, None, []

        try:
            outcome, text = extract_outcome_and_text(sandbox)
        except ValueError as e:
            logger.error("Invalid output from comparator: %s", e,
                         extra={"operation": job.info})
            return False, None, []

        return True, outcome, text
Пример #3
0
    def evaluate(self, job, file_cacher):
        """See TaskType.evaluate."""
        if len(self.parameters) <= 0:
            num_processes = 1
        else:
            num_processes = self.parameters[0]
        indices = range(num_processes)
        # Create sandboxes and FIFOs
        sandbox_mgr = create_sandbox(file_cacher, job.multithreaded_sandbox)
        sandbox_user = [
            create_sandbox(file_cacher, job.multithreaded_sandbox)
            for i in indices
        ]
        fifo_dir = [tempfile.mkdtemp(dir=config.temp_dir) for i in indices]
        fifo_in = [os.path.join(fifo_dir[i], "in%d" % i) for i in indices]
        fifo_out = [os.path.join(fifo_dir[i], "out%d" % i) for i in indices]
        for i in indices:
            os.mkfifo(fifo_in[i])
            os.mkfifo(fifo_out[i])
            os.chmod(fifo_dir[i], 0o755)
            os.chmod(fifo_in[i], 0o666)
            os.chmod(fifo_out[i], 0o666)

        # First step: we start the manager.
        manager_filename = "manager"
        manager_command = ["./%s" % manager_filename]
        for i in indices:
            manager_command.append(fifo_in[i])
            manager_command.append(fifo_out[i])
        manager_executables_to_get = {
            manager_filename: job.managers[manager_filename].digest
        }
        manager_files_to_get = {"input.txt": job.input}
        manager_allow_dirs = fifo_dir
        for filename, digest in manager_executables_to_get.iteritems():
            sandbox_mgr.create_file_from_storage(filename,
                                                 digest,
                                                 executable=True)
        for filename, digest in manager_files_to_get.iteritems():
            sandbox_mgr.create_file_from_storage(filename, digest)
        manager = evaluation_step_before_run(sandbox_mgr,
                                             manager_command,
                                             num_processes * job.time_limit,
                                             0,
                                             allow_dirs=manager_allow_dirs,
                                             writable_files=["output.txt"],
                                             stdin_redirect="input.txt")

        # Second step: we start the user submission compiled with the
        # stub.
        language = get_language(job.language)
        executable_filename = job.executables.keys()[0]
        executables_to_get = {
            executable_filename: job.executables[executable_filename].digest
        }
        processes = [None for i in indices]
        for i in indices:
            args = [fifo_out[i], fifo_in[i]]
            if num_processes != 1:
                args.append(str(i))
            commands = language.get_evaluation_commands(executable_filename,
                                                        main="stub",
                                                        args=args)
            user_allow_dirs = [fifo_dir[i]]
            for filename, digest in executables_to_get.iteritems():
                sandbox_user[i].create_file_from_storage(filename,
                                                         digest,
                                                         executable=True)
            # Assumes that the actual execution of the user solution
            # is the last command in commands, and that the previous
            # are "setup" that doesn't need tight control.
            if len(commands) > 1:
                evaluation_step(sandbox_user[i], commands[:-1], 10, 256)
            processes[i] = evaluation_step_before_run(
                sandbox_user[i],
                commands[-1],
                job.time_limit,
                job.memory_limit,
                allow_dirs=user_allow_dirs)

        # Consume output.
        wait_without_std(processes + [manager])
        # TODO: check exit codes with translate_box_exitcode.

        user_results = [evaluation_step_after_run(s) for s in sandbox_user]
        success_user = all(r[0] for r in user_results)
        plus_user = reduce(merge_evaluation_results,
                           [r[1] for r in user_results])
        success_mgr, unused_plus_mgr = \
            evaluation_step_after_run(sandbox_mgr)

        if plus_user['exit_status'] == Sandbox.EXIT_OK and \
                plus_user["execution_time"] >= job.time_limit:
            plus_user['exit_status'] = Sandbox.EXIT_TIMEOUT

        # Merge results.
        job.sandboxes = [s.path for s in sandbox_user] + [sandbox_mgr.path]
        job.plus = plus_user

        # If at least one evaluation had problems, we report the
        # problems.
        if not success_user or not success_mgr:
            success, outcome, text = False, None, None
        # If the user sandbox detected some problem (timeout, ...),
        # the outcome is 0.0 and the text describes that problem.
        elif not is_evaluation_passed(plus_user):
            success = True
            outcome, text = 0.0, human_evaluation_message(plus_user)
        # Otherwise, we use the manager to obtain the outcome.
        else:
            success = True
            outcome, text = extract_outcome_and_text(sandbox_mgr)

        # If asked so, save the output file, provided that it exists
        if job.get_output:
            if sandbox_mgr.file_exists("output.txt"):
                job.user_output = sandbox_mgr.get_file_to_storage(
                    "output.txt", "Output file in job %s" % job.info)
            else:
                job.user_output = None

        # Whatever happened, we conclude.
        job.success = success
        job.outcome = "%s" % outcome if outcome is not None else None
        job.text = text

        delete_sandbox(sandbox_mgr, job.success)
        for s in sandbox_user:
            delete_sandbox(s, job.success)
        if not config.keep_sandbox:
            for d in fifo_dir:
                rmtree(d)
Пример #4
0
    def evaluate(self, job, file_cacher):
        """See TaskType.evaluate."""
        # Create the sandbox
        sandbox = create_sandbox(file_cacher, job.multithreaded_sandbox)

        # Prepare the execution
        assert len(job.executables) == 1
        executable_filename = next(iterkeys(job.executables))
        language = get_language(job.language)
        commands = language.get_evaluation_commands(
            executable_filename,
            main="grader" if self._uses_grader() else executable_filename)
        executables_to_get = {
            executable_filename: job.executables[executable_filename].digest
        }
        input_filename, output_filename = self.parameters[1]
        stdin_redirect = None
        stdout_redirect = None
        files_allowing_write = []
        if len(input_filename) == 0:
            input_filename = "input.txt"
            stdin_redirect = input_filename
        if len(output_filename) == 0:
            output_filename = "output.txt"
            stdout_redirect = output_filename
        else:
            files_allowing_write.append(output_filename)
        files_to_get = {input_filename: job.input}

        # Put the required files into the sandbox
        for filename, digest in iteritems(executables_to_get):
            sandbox.create_file_from_storage(filename, digest, executable=True)
        for filename, digest in iteritems(files_to_get):
            sandbox.create_file_from_storage(filename, digest)

        # Actually performs the execution
        success, plus = evaluation_step(sandbox,
                                        commands,
                                        job.time_limit,
                                        job.memory_limit,
                                        writable_files=files_allowing_write,
                                        stdin_redirect=stdin_redirect,
                                        stdout_redirect=stdout_redirect)

        job.sandboxes = [sandbox.path]
        job.plus = plus

        outcome = None
        text = []

        # Error in the sandbox: nothing to do!
        if not success:
            pass

        # Contestant's error: the marks won't be good
        elif not is_evaluation_passed(plus):
            outcome = 0.0
            text = human_evaluation_message(plus)
            if job.get_output:
                job.user_output = None

        # Otherwise, advance to checking the solution
        else:

            # Check that the output file was created
            if not sandbox.file_exists(output_filename):
                outcome = 0.0
                text = [
                    N_("Evaluation didn't produce file %s"), output_filename
                ]
                if job.get_output:
                    job.user_output = None

            else:
                # If asked so, put the output file into the storage
                if job.get_output:
                    job.user_output = sandbox.get_file_to_storage(
                        output_filename,
                        "Output file in job %s" % job.info,
                        trunc_len=100 * 1024)

                # If just asked to execute, fill text and set dummy
                # outcome.
                if job.only_execution:
                    outcome = 0.0
                    text = [N_("Execution completed successfully")]

                # Otherwise evaluate the output file.
                else:

                    # Put the reference solution into the sandbox
                    sandbox.create_file_from_storage("res.txt", job.output)

                    # Check the solution with white_diff
                    if self.parameters[2] == "diff":
                        outcome, text = white_diff_step(
                            sandbox, output_filename, "res.txt")

                    # Check the solution with a comparator
                    elif self.parameters[2] == "comparator":
                        manager_filename = "checker"

                        if manager_filename not in job.managers:
                            logger.error(
                                "Configuration error: missing or "
                                "invalid comparator (it must be "
                                "named 'checker')",
                                extra={"operation": job.info})
                            success = False

                        else:
                            sandbox.create_file_from_storage(
                                manager_filename,
                                job.managers[manager_filename].digest,
                                executable=True)
                            # Rewrite input file. The untrusted
                            # contestant program should not be able to
                            # modify it; however, the grader may
                            # destroy the input file to prevent the
                            # contestant's program from directly
                            # accessing it. Since we cannot create
                            # files already existing in the sandbox,
                            # we try removing the file first.
                            try:
                                sandbox.remove_file(input_filename)
                            except OSError as e:
                                # Let us be extra sure that the file
                                # was actually removed and we did not
                                # mess up with permissions.
                                assert not sandbox.file_exists(input_filename)
                            sandbox.create_file_from_storage(
                                input_filename, job.input)

                            # Allow using any number of processes (because e.g.
                            # one may want to write a bash checker who calls
                            # other processes). Set to a high number because
                            # to avoid fork-bombing the worker.
                            sandbox.max_processes = 1000

                            success, _ = evaluation_step(
                                sandbox, [[
                                    "./%s" % manager_filename, input_filename,
                                    "res.txt", output_filename
                                ]])
                        if success:
                            try:
                                outcome, text = \
                                    extract_outcome_and_text(sandbox)
                            except ValueError as e:
                                logger.error(
                                    "Invalid output from "
                                    "comparator: %s",
                                    e.message,
                                    extra={"operation": job.info})
                                success = False

                    else:
                        raise ValueError("Unrecognized third parameter"
                                         " `%s' for Batch tasktype." %
                                         self.parameters[2])

        # Whatever happened, we conclude.
        job.success = success
        job.outcome = "%s" % outcome if outcome is not None else None
        job.text = text

        delete_sandbox(sandbox, job.success)
Пример #5
0
    def evaluate(self, job, file_cacher):
        """See TaskType.evaluate."""

        if len(self.parameters) <= 0:
            num_processes = 1
        else:
            num_processes = self.parameters[0]
        indices = range(num_processes)
        # Create sandboxes and FIFOs
        sandbox_mgr = create_sandbox(file_cacher, job.multithreaded_sandbox)
        sandbox_user = [
            create_sandbox(file_cacher, job.multithreaded_sandbox)
            for i in indices
        ]
        fifo_dir = [tempfile.mkdtemp(dir=config.temp_dir) for i in indices]
        fifo_in = [os.path.join(fifo_dir[i], "in%d" % i) for i in indices]
        fifo_out = [os.path.join(fifo_dir[i], "out%d" % i) for i in indices]
        for i in indices:
            os.mkfifo(fifo_in[i])
            os.mkfifo(fifo_out[i])
            os.chmod(fifo_dir[i], 0o755)
            os.chmod(fifo_in[i], 0o666)
            os.chmod(fifo_out[i], 0o666)

        # First step: we start the manager.
        manager_filename = "manager"
        manager_command = ["./%s" % manager_filename]
        for i in indices:
            manager_command.append(fifo_in[i])
            manager_command.append(fifo_out[i])
        manager_executables_to_get = {
            manager_filename: job.managers[manager_filename].digest
        }
        manager_files_to_get = {"input.txt": job.input}
        manager_allow_dirs = fifo_dir
        for filename, digest in manager_executables_to_get.iteritems():
            sandbox_mgr.create_file_from_storage(filename,
                                                 digest,
                                                 executable=True)
        for filename, digest in manager_files_to_get.iteritems():
            sandbox_mgr.create_file_from_storage(filename, digest)
        manager = evaluation_step_before_run(
            sandbox_mgr,
            manager_command,
            num_processes * job.time_limit,
            0,
            allow_dirs=manager_allow_dirs,
            writable_files=["output.txt"],
            stdin_redirect="input.txt",
            stdout_redirect="output.txt",
        )

        # Second step: we start the user submission compiled with the
        # stub.
        language = get_language(job.language)
        executable_filename = job.executables.keys()[0]
        executables_to_get = {
            executable_filename: job.executables[executable_filename].digest
        }
        processes = [None for i in indices]
        for i in indices:
            args = [fifo_out[i], fifo_in[i]]
            if num_processes != 1:
                args.append(str(i))
            commands = language.get_evaluation_commands(executable_filename,
                                                        main="grader",
                                                        args=args)
            user_allow_dirs = [fifo_dir[i]]
            for filename, digest in executables_to_get.iteritems():
                sandbox_user[i].create_file_from_storage(filename,
                                                         digest,
                                                         executable=True)
            # Assumes that the actual execution of the user solution
            # is the last command in commands, and that the previous
            # are "setup" that doesn't need tight control.
            if len(commands) > 1:
                evaluation_step(sandbox_user[i], commands[:-1], 10, 256)
            processes[i] = evaluation_step_before_run(
                sandbox_user[i],
                commands[-1],
                job.time_limit,
                job.memory_limit,
                allow_dirs=user_allow_dirs)

        # Consume output.
        wait_without_std(processes + [manager])
        # TODO: check exit codes with translate_box_exitcode.

        user_results = [evaluation_step_after_run(s) for s in sandbox_user]
        success_user = all(r[0] for r in user_results)
        plus_user = reduce(merge_evaluation_results,
                           [r[1] for r in user_results])
        success_mgr, unused_plus_mgr = \
            evaluation_step_after_run(sandbox_mgr)

        if plus_user['exit_status'] == Sandbox.EXIT_OK and \
                plus_user["execution_time"] >= job.time_limit:
            plus_user['exit_status'] = Sandbox.EXIT_TIMEOUT

        # Merge results.
        job.sandboxes = [s.path for s in sandbox_user] + [sandbox_mgr.path]
        job.plus = plus_user

        # If at least one evaluation had problems, we report the
        # problems.
        if not success_user or not success_mgr:
            success, outcome, text = False, None, None
        # If the user sandbox detected some problem (timeout, ...),
        # the outcome is 0.0 and the text describes that problem.
        elif not is_evaluation_passed(plus_user):
            success = True
            outcome, text = 0.0, human_evaluation_message(plus_user)
            if job.get_output:
                job.user_output = None
        # Otherwise, we use the manager to obtain the outcome.
        else:
            success = True
            outcome = None
            text = None

            input_filename = "input.txt"
            output_filename = "output.txt"
            # Check that the output file was created
            if not sandbox_mgr.file_exists(output_filename):
                outcome = 0.0
                text = [
                    N_("Evaluation didn't produce file %s"), output_filename
                ]
                if job.get_output:
                    job.user_output = None

            else:
                # If asked so, put the output file into the storage
                if job.get_output:
                    job.user_output = sandbox_mgr.get_file_to_storage(
                        output_filename,
                        "Output file in job %s" % job.info,
                        trunc_len=1024 * 1024 * 10)

                # If just asked to execute, fill text and set dummy
                # outcome.
                if job.only_execution:
                    outcome = 0.0
                    text = [N_("Execution completed successfully")]

                # Otherwise evaluate the output file.
                else:

                    # Put the reference solution into the sandbox
                    sandbox_mgr.create_file_from_storage("res.txt", job.output)

                    # Check the solution with white_diff
                    if self.parameters[1] == "diff":
                        outcome, text = white_diff_step(
                            sandbox_mgr, output_filename, "res.txt")

                    # Check the solution with a comparator
                    elif self.parameters[1] == "comparator":
                        manager_filename = "checker"

                        if manager_filename not in job.managers:
                            logger.error(
                                "Configuration error: missing or "
                                "invalid comparator (it must be "
                                "named 'checker')",
                                extra={"operation": job.info})
                            success = False

                        else:
                            sandbox_mgr.create_file_from_storage(
                                manager_filename,
                                job.managers[manager_filename].digest,
                                executable=True)
                            # Rewrite input file. The untrusted
                            # contestant program should not be able to
                            # modify it; however, the grader may
                            # destroy the input file to prevent the
                            # contestant's program from directly
                            # accessing it. Since we cannot create
                            # files already existing in the sandbox,
                            # we try removing the file first.
                            try:
                                sandbox_mgr.remove_file(input_filename)
                            except OSError as e:
                                # Let us be extra sure that the file
                                # was actually removed and we did not
                                # mess up with permissions.
                                assert not sandbox_mgr.file_exists(
                                    input_filename)
                            sandbox_mgr.create_file_from_storage(
                                input_filename, job.input)

                            # Allow using any number of processes (because e.g.
                            # one may want to write a bash checker who calls
                            # other processes). Set to a high number because
                            # to avoid fork-bombing the worker.
                            sandbox_mgr.max_processes = 1000

                            success, _ = evaluation_step(
                                sandbox_mgr, [[
                                    "./%s" % manager_filename, input_filename,
                                    "res.txt", output_filename
                                ]])
                        if success:
                            try:
                                outcome, text = \
                                    extract_outcome_and_text(sandbox_mgr)
                            except ValueError as e:
                                logger.error(
                                    "Invalid output from "
                                    "comparator: %s",
                                    e.message,
                                    extra={"operation": job.info})
                                success = False

                    else:
                        raise ValueError("Unrecognized second parameter"
                                         " `%s' for Communication tasktype." %
                                         self.parameters[2])

        # Whatever happened, we conclude.
        job.success = success
        job.outcome = "%s" % outcome if outcome is not None else None
        job.text = text

        delete_sandbox(sandbox_mgr, job.success)
        for s in sandbox_user:
            delete_sandbox(s, job.success)
        if not config.keep_sandbox:
            for d in fifo_dir:
                rmtree(d)
Пример #6
0
    def evaluate(self, job, file_cacher):
        """See TaskType.evaluate."""
        sandbox = create_sandbox(file_cacher, job.multithreaded_sandbox)
        job.sandboxes.append(sandbox.path)

        # Immediately prepare the skeleton to return
        job.sandboxes = [sandbox.path]
        job.plus = {}

        outcome = None
        text = None

        # Since we allow partial submission, if the file is not
        # present we report that the outcome is 0.
        if "output_%s.txt" % job.operation["testcase_codename"] \
                not in job.files:
            job.success = True
            job.outcome = "0.0"
            job.text = [N_("File not submitted")]
            return True

        # First and only one step: diffing (manual or with manager).
        output_digest = job.files["output_%s.txt" %
                                  job.operation["testcase_codename"]].digest

        # Put the files into the sandbox
        sandbox.create_file_from_storage(
            "res.txt",
            job.output)
        sandbox.create_file_from_storage(
            "output.txt",
            output_digest)

        if self.parameters[0] == "diff":
            # No manager: I'll do a white_diff between the submission
            # file and the correct output res.txt.
            success = True
            outcome, text = white_diff_step(
                sandbox, "output.txt", "res.txt")

        elif self.parameters[0] == "comparator":
            # Manager present: wonderful, he'll do all the job.
            manager_filename = "checker"
            if manager_filename not in job.managers:
                logger.error("Configuration error: missing or "
                             "invalid comparator (it must be "
                             "named `checker')", extra={"operation": job.info})
                success = False
            else:
                sandbox.create_file_from_storage(
                    manager_filename,
                    job.managers[manager_filename].digest,
                    executable=True)
                input_digest = job.input
                sandbox.create_file_from_storage(
                    "input.txt",
                    input_digest)
                success, _ = evaluation_step(
                    sandbox,
                    [["./%s" % manager_filename,
                      "input.txt", "res.txt", "output.txt"]])
                if success:
                    outcome, text = extract_outcome_and_text(sandbox)

        else:
            raise ValueError("Unrecognized first parameter "
                             "`%s' for OutputOnly tasktype. "
                             "Should be `diff' or `comparator'." %
                             self.parameters[0])

        # Whatever happened, we conclude.
        job.success = success
        job.outcome = "%s" % outcome if outcome is not None else None
        job.text = text

        delete_sandbox(sandbox, job.success)
Пример #7
0
    def evaluate(self, job, file_cacher):
        """See TaskType.evaluate."""
        if len(job.executables) != 1:
            raise ValueError("Unexpected number of executables (%s)" %
                             len(job.executables))

        # Create the sandbox
        sandbox = create_sandbox(
            file_cacher,
            multithreaded=job.multithreaded_sandbox,
            name="evaluate")

        # Prepare the execution
        executable_filename = next(iterkeys(job.executables))
        language = get_language(job.language)
        main = Batch.GRADER_BASENAME \
            if self._uses_grader() else executable_filename
        commands = language.get_evaluation_commands(
            executable_filename, main=main)
        executables_to_get = {
            executable_filename:
            job.executables[executable_filename].digest
        }
        stdin_redirect = None
        stdout_redirect = None
        files_allowing_write = []
        if len(self.input_filename) == 0:
            self.input_filename = Batch.DEFAULT_INPUT_FILENAME
            stdin_redirect = self.input_filename
        if len(self.output_filename) == 0:
            self.output_filename = Batch.DEFAULT_OUTPUT_FILENAME
            stdout_redirect = self.output_filename
        else:
            files_allowing_write.append(self.output_filename)
        files_to_get = {
            self.input_filename: job.input
        }

        # Put the required files into the sandbox
        for filename, digest in iteritems(executables_to_get):
            sandbox.create_file_from_storage(filename, digest, executable=True)
        for filename, digest in iteritems(files_to_get):
            sandbox.create_file_from_storage(filename, digest)

        # Actually performs the execution
        success, plus = evaluation_step(
            sandbox,
            commands,
            job.time_limit,
            job.memory_limit,
            writable_files=files_allowing_write,
            stdin_redirect=stdin_redirect,
            stdout_redirect=stdout_redirect)

        job.sandboxes = [sandbox.path]
        job.plus = plus

        outcome = None
        text = []

        # Error in the sandbox: nothing to do!
        if not success:
            pass

        # Contestant's error: the marks won't be good
        elif not is_evaluation_passed(plus):
            outcome = 0.0
            text = human_evaluation_message(plus)
            if job.get_output:
                job.user_output = None

        # Otherwise, advance to checking the solution
        else:

            # Check that the output file was created
            if not sandbox.file_exists(self.output_filename):
                outcome = 0.0
                text = [N_("Evaluation didn't produce file %s"),
                        self.output_filename]
                if job.get_output:
                    job.user_output = None

            else:
                # If asked so, put the output file into the storage.
                if job.get_output:
                    job.user_output = sandbox.get_file_to_storage(
                        self.output_filename,
                        "Output file in job %s" % job.info,
                        trunc_len=100 * 1024)

                # If just asked to execute, fill text and set dummy outcome.
                if job.only_execution:
                    outcome = 0.0
                    text = [N_("Execution completed successfully")]

                # Otherwise evaluate the output file.
                else:

                    # Create a brand-new sandbox just for checking. Only admin
                    # code runs in it, so we allow multithreading and many
                    # processes (still with a limit to avoid fork-bombs).
                    checkbox = create_sandbox(
                        file_cacher,
                        multithreaded=True,
                        name="check")
                    checkbox.max_processes = 1000

                    checker_success, outcome, text = self._eval_output(
                        checkbox, job, sandbox.get_root_path())
                    success = success and checker_success

        # Whatever happened, we conclude.
        job.success = success
        job.outcome = "%s" % outcome if outcome is not None else None
        job.text = text

        delete_sandbox(sandbox, job.success)
Пример #8
0
    def evaluate(self, job, file_cacher):
        """See TaskType.evaluate."""
        # f stand for first, s for second.
        first_sandbox = create_sandbox(file_cacher,
                                       multithreaded=job.multithreaded_sandbox,
                                       name="first_evaluate")
        second_sandbox = create_sandbox(
            file_cacher,
            multithreaded=job.multithreaded_sandbox,
            name="second_evaluate")
        fifo_dir = tempfile.mkdtemp(dir=config.temp_dir)
        fifo = os.path.join(fifo_dir, "fifo")
        os.mkfifo(fifo)
        os.chmod(fifo_dir, 0o755)
        os.chmod(fifo, 0o666)

        # First step: we start the first manager.
        first_filename = "manager"
        first_command = ["./%s" % first_filename, "0", fifo]
        first_executables_to_get = {
            first_filename: job.executables[first_filename].digest
        }
        first_files_to_get = {"input.txt": job.input}
        first_allow_path = [fifo_dir]

        # Put the required files into the sandbox
        for filename, digest in iteritems(first_executables_to_get):
            first_sandbox.create_file_from_storage(filename,
                                                   digest,
                                                   executable=True)
        for filename, digest in iteritems(first_files_to_get):
            first_sandbox.create_file_from_storage(filename, digest)

        first = evaluation_step_before_run(first_sandbox,
                                           first_command,
                                           job.time_limit,
                                           job.memory_limit,
                                           first_allow_path,
                                           stdin_redirect="input.txt",
                                           wait=False)

        # Second step: we start the second manager.
        second_filename = "manager"
        second_command = ["./%s" % second_filename, "1", fifo]
        second_executables_to_get = {
            second_filename: job.executables[second_filename].digest
        }
        second_files_to_get = {}
        second_allow_path = [fifo_dir]

        # Put the required files into the second sandbox
        for filename, digest in iteritems(second_executables_to_get):
            second_sandbox.create_file_from_storage(filename,
                                                    digest,
                                                    executable=True)
        for filename, digest in iteritems(second_files_to_get):
            second_sandbox.create_file_from_storage(filename, digest)

        second = evaluation_step_before_run(second_sandbox,
                                            second_command,
                                            job.time_limit,
                                            job.memory_limit,
                                            second_allow_path,
                                            stdout_redirect="output.txt",
                                            wait=False)

        # Consume output.
        wait_without_std([second, first])
        # TODO: check exit codes with translate_box_exitcode.

        success_first, first_plus = \
            evaluation_step_after_run(first_sandbox)
        success_second, second_plus = \
            evaluation_step_after_run(second_sandbox)

        job.sandboxes = [first_sandbox.path, second_sandbox.path]
        job.plus = second_plus

        success = True
        outcome = None
        text = []

        # Error in the sandbox: report failure!
        if not success_first or not success_second:
            success = False

        # Contestant's error: the marks won't be good
        elif not is_evaluation_passed(first_plus) or \
                not is_evaluation_passed(second_plus):
            outcome = 0.0
            if not is_evaluation_passed(first_plus):
                text = human_evaluation_message(first_plus)
            else:
                text = human_evaluation_message(second_plus)
            if job.get_output:
                job.user_output = None

        # Otherwise, advance to checking the solution
        else:

            # Check that the output file was created
            if not second_sandbox.file_exists('output.txt'):
                outcome = 0.0
                text = [N_("Evaluation didn't produce file %s"), "output.txt"]
                if job.get_output:
                    job.user_output = None

            else:
                # If asked so, put the output file into the storage
                if job.get_output:
                    job.user_output = second_sandbox.get_file_to_storage(
                        "output.txt", "Output file in job %s" % job.info)

                # If not asked otherwise, evaluate the output file
                if not job.only_execution:
                    # Put the reference solution into the sandbox
                    second_sandbox.create_file_from_storage(
                        "res.txt", job.output)

                    # If a checker is not provided, use white-diff
                    if self.parameters[0] == "diff":
                        outcome, text = white_diff_step(
                            second_sandbox, "output.txt", "res.txt")

                    elif self.parameters[0] == "comparator":
                        if TwoSteps.CHECKER_FILENAME not in job.managers:
                            logger.error(
                                "Configuration error: missing or "
                                "invalid comparator (it must be "
                                "named `checker')",
                                extra={"operation": job.info})
                            success = False
                        else:
                            second_sandbox.create_file_from_storage(
                                TwoSteps.CHECKER_FILENAME,
                                job.managers[TwoSteps.CHECKER_FILENAME].digest,
                                executable=True)
                            # Rewrite input file, as in Batch.py
                            try:
                                second_sandbox.remove_file("input.txt")
                            except OSError as e:
                                assert not second_sandbox.file_exists(
                                    "input.txt")
                            second_sandbox.create_file_from_storage(
                                "input.txt", job.input)
                            success, _ = evaluation_step(
                                second_sandbox, [[
                                    "./%s" % TwoSteps.CHECKER_FILENAME,
                                    "input.txt", "res.txt", "output.txt"
                                ]])
                            if success:
                                try:
                                    outcome, text = extract_outcome_and_text(
                                        second_sandbox)
                                except ValueError as e:
                                    logger.error(
                                        "Invalid output from "
                                        "comparator: %s",
                                        e,
                                        extra={"operation": job.info})
                                    success = False
                    else:
                        raise ValueError("Uncrecognized first parameter"
                                         " `%s' for TwoSteps tasktype." %
                                         self.parameters[0])

        # Whatever happened, we conclude.
        job.success = success
        job.outcome = str(outcome) if outcome is not None else None
        job.text = text

        delete_sandbox(first_sandbox, job.success)
        delete_sandbox(second_sandbox, job.success)
Пример #9
0
    def evaluate_testcase(self, test_number):
        """See TaskType.evaluate_testcase."""
        # Create the sandbox
        sandbox = create_sandbox(self)

        # Prepare the execution
        executable_filename = self.job.executables.keys()[0]
        command = [os.path.join(".", executable_filename)]
        executables_to_get = {
            executable_filename:
            self.job.executables[executable_filename].digest
        }
        input_filename, output_filename = self.job.task_type_parameters[1]
        stdin_redirect = None
        stdout_redirect = None
        if input_filename == "":
            input_filename = "input.txt"
            stdin_redirect = input_filename
        if output_filename == "":
            output_filename = "output.txt"
            stdout_redirect = output_filename
        files_to_get = {input_filename: self.job.testcases[test_number].input}

        # Put the required files into the sandbox
        for filename, digest in executables_to_get.iteritems():
            sandbox.create_file_from_storage(filename, digest, executable=True)
        for filename, digest in files_to_get.iteritems():
            sandbox.create_file_from_storage(filename, digest)

        # Actually performs the execution
        success, plus = evaluation_step(sandbox,
                                        command,
                                        self.job.time_limit,
                                        self.job.memory_limit,
                                        stdin_redirect=stdin_redirect,
                                        stdout_redirect=stdout_redirect)

        self.job.evaluations[test_number] = {
            'sandboxes': [sandbox.path],
            'plus': plus
        }
        outcome = None
        text = None
        evaluation = self.job.evaluations[test_number]

        # Error in the sandbox: nothing to do!
        if not success:
            pass

        # Contestant's error: the marks won't be good
        elif not is_evaluation_passed(plus):
            outcome = 0.0
            text = human_evaluation_message(plus)
            if self.job.get_output:
                evaluation['output'] = None

        # Otherwise, advance to checking the solution
        else:

            # Check that the output file was created
            if not sandbox.file_exists(output_filename):
                outcome = 0.0
                text = "Execution didn't produce file %s" % \
                    (output_filename)
                if self.job.get_output:
                    evaluation['output'] = None

            else:
                # If asked so, put the output file into the storage
                if self.job.get_output:
                    evaluation['output'] = sandbox.get_file_to_storage(
                        output_filename,
                        "Output file for testcase %d in job %s" %
                        (test_number, self.job.info),
                        trunc_len=100 * 1024)

                # If not asked otherwise, evaluate the output file
                if not self.job.only_execution:

                    # Put the reference solution into the sandbox
                    sandbox.create_file_from_storage(
                        "res.txt", self.job.testcases[test_number].output)

                    # Check the solution with white_diff
                    if self.job.task_type_parameters[2] == "diff":
                        outcome, text = white_diff_step(
                            sandbox, output_filename, "res.txt")

                    # Check the solution with a comparator
                    elif self.job.task_type_parameters[2] == "comparator":
                        manager_filename = "checker"

                        if not manager_filename in self.job.managers:
                            logger.error("Configuration error: missing or "
                                         "invalid comparator (it must be "
                                         "named 'checker')")
                            success = False

                        else:
                            sandbox.create_file_from_storage(
                                manager_filename,
                                self.job.managers[manager_filename].digest,
                                executable=True)
                            success, _ = evaluation_step(
                                sandbox, [
                                    "./%s" % manager_filename, input_filename,
                                    "res.txt", output_filename
                                ])
                        if success:
                            try:
                                outcome, text = \
                                    extract_outcome_and_text(sandbox)
                            except ValueError, e:
                                logger.error("Invalid output from "
                                             "comparator: %s" % (e.message, ))
                                success = False

                    else:
                        raise ValueError("Unrecognized third parameter"
                                         " `%s' for Batch tasktype." %
                                         self.job.task_type_parameters[2])
Пример #10
0
    def evaluate(self, job, file_cacher):
        """See TaskType.evaluate."""
        if len(self.parameters) <= 0:
            num_processes = 1
        else:
            num_processes = self.parameters[0]
        indices = range(num_processes)
        # Create sandboxes and FIFOs
        sandbox_mgr = create_sandbox(file_cacher, job.multithreaded_sandbox)
        sandbox_user = [create_sandbox(file_cacher, job.multithreaded_sandbox)
                        for i in indices]
        fifo_dir = [tempfile.mkdtemp(dir=config.temp_dir) for i in indices]
        fifo_in = [os.path.join(fifo_dir[i], "in%d" % i) for i in indices]
        fifo_out = [os.path.join(fifo_dir[i], "out%d" % i) for i in indices]
        for i in indices:
            os.mkfifo(fifo_in[i])
            os.mkfifo(fifo_out[i])
            os.chmod(fifo_dir[i], 0o755)
            os.chmod(fifo_in[i], 0o666)
            os.chmod(fifo_out[i], 0o666)

        # First step: prepare the manager.
        manager_filename = "manager"
        manager_command = ["./%s" % manager_filename]
        for i in indices:
            manager_command.append(fifo_in[i])
            manager_command.append(fifo_out[i])
        manager_executables_to_get = {
            manager_filename:
            job.managers[manager_filename].digest
            }
        manager_files_to_get = {
            "input.txt": job.input
            }
        manager_allow_dirs = fifo_dir
        for filename, digest in manager_executables_to_get.iteritems():
            sandbox_mgr.create_file_from_storage(
                filename, digest, executable=True)
        for filename, digest in manager_files_to_get.iteritems():
            sandbox_mgr.create_file_from_storage(filename, digest)

        # Second step: load the executables for the user processes
        # (done before launching the manager so that it does not
        # impact its wall clock time).
        executable_filename = job.executables.keys()[0]
        executables_to_get = {
            executable_filename:
            job.executables[executable_filename].digest
            }
        for i in indices:
            for filename, digest in executables_to_get.iteritems():
                sandbox_user[i].create_file_from_storage(
                    filename, digest, executable=True)

        # Third step: start the manager.
        manager = evaluation_step_before_run(
            sandbox_mgr,
            manager_command,
            num_processes * job.time_limit,
            0,
            allow_dirs=manager_allow_dirs,
            writable_files=["output.txt"],
            stdin_redirect="input.txt")

        # Fourth step: start the user submissions compiled with the stub.
        language = get_language(job.language)
        processes = [None for i in indices]
        for i in indices:
            args = [fifo_out[i], fifo_in[i]]
            if num_processes != 1:
                args.append(str(i))
            commands = language.get_evaluation_commands(
                executable_filename,
                main="stub",
                args=args)
            user_allow_dirs = [fifo_dir[i]]
            # Assumes that the actual execution of the user solution
            # is the last command in commands, and that the previous
            # are "setup" that doesn't need tight control.
            if len(commands) > 1:
                evaluation_step(sandbox_user[i], commands[:-1], 10, 256)
            processes[i] = evaluation_step_before_run(
                sandbox_user[i],
                commands[-1],
                job.time_limit,
                job.memory_limit,
                allow_dirs=user_allow_dirs)

        # Consume output.
        wait_without_std(processes + [manager])
        # TODO: check exit codes with translate_box_exitcode.

        user_results = [evaluation_step_after_run(s) for s in sandbox_user]
        success_user = all(r[0] for r in user_results)
        plus_user = reduce(merge_evaluation_results,
                           [r[1] for r in user_results])
        success_mgr, unused_plus_mgr = \
            evaluation_step_after_run(sandbox_mgr)

        if plus_user['exit_status'] == Sandbox.EXIT_OK and \
                plus_user["execution_time"] >= job.time_limit:
            plus_user['exit_status'] = Sandbox.EXIT_TIMEOUT

        # Merge results.
        job.sandboxes = [s.path for s in sandbox_user] + [sandbox_mgr.path]
        job.plus = plus_user

        # If at least one evaluation had problems, we report the
        # problems.
        if not success_user or not success_mgr:
            success, outcome, text = False, None, None
        # If the user sandbox detected some problem (timeout, ...),
        # the outcome is 0.0 and the text describes that problem.
        elif not is_evaluation_passed(plus_user):
            success = True
            outcome, text = 0.0, human_evaluation_message(plus_user)
        # Otherwise, we use the manager to obtain the outcome.
        else:
            success = True
            outcome, text = extract_outcome_and_text(sandbox_mgr)

        # If asked so, save the output file, provided that it exists
        if job.get_output:
            if sandbox_mgr.file_exists("output.txt"):
                job.user_output = sandbox_mgr.get_file_to_storage(
                    "output.txt",
                    "Output file in job %s" % job.info)
            else:
                job.user_output = None

        # Whatever happened, we conclude.
        job.success = success
        job.outcome = "%s" % outcome if outcome is not None else None
        job.text = text

        delete_sandbox(sandbox_mgr, job.success)
        for s in sandbox_user:
            delete_sandbox(s, job.success)
        if not config.keep_sandbox:
            for d in fifo_dir:
                rmtree(d)
Пример #11
0
    def evaluate_testcase(self, test_number):
        """See TaskType.evaluate_testcase."""
        sandbox = create_sandbox(self)
        self.job.sandboxes.append(sandbox.path)

        # Immediately prepare the skeleton to return
        self.job.evaluations[test_number] = {"sandboxes": [sandbox.path], "plus": {}}
        evaluation = self.job.evaluations[test_number]
        outcome = None
        text = None

        # Since we allow partial submission, if the file is not
        # present we report that the outcome is 0.
        if "output_%03d.txt" % test_number not in self.job.files:
            evaluation["success"] = True
            evaluation["outcome"] = 0.0
            evaluation["text"] = "File not submitted."
            return True

        # First and only one step: diffing (manual or with manager).
        output_digest = self.job.files["output_%03d.txt" % test_number].digest

        # Put the files into the sandbox
        sandbox.create_file_from_storage("res.txt", self.job.testcases[test_number].output)
        sandbox.create_file_from_storage("output.txt", output_digest)

        if self.job.task_type_parameters[0] == "diff":
            # No manager: I'll do a white_diff between the submission
            # file and the correct output res.txt.
            success = True
            outcome, text = white_diff_step(sandbox, "output.txt", "res.txt")

        elif self.job.task_type_parameters[0] == "comparator":
            # Manager present: wonderful, he'll do all the job.
            manager_filename = "checker"
            if not manager_filename in self.job.managers:
                logger.error("Configuration error: missing or " "invalid comparator (it must be " "named `checker')")
                success = False
            else:
                sandbox.create_file_from_storage(
                    manager_filename, self.job.managers[manager_filename].digest, executable=True
                )
                input_digest = self.job.testcases[test_number].input
                sandbox.create_file_from_storage("input.txt", input_digest)
                success, _ = evaluation_step(
                    sandbox,
                    ["./%s" % manager_filename, "input.txt", "res.txt", "output.txt"],
                    allow_path=["input.txt", "output.txt", "res.txt"],
                )
                if success:
                    outcome, text = extract_outcome_and_text(sandbox)

        else:
            raise ValueError(
                "Unrecognized first parameter "
                "`%s' for OutputOnly tasktype. "
                "Should be `diff' or `comparator'." % self.job.task_type_parameters[0]
            )

        # Whatever happened, we conclude.
        evaluation["success"] = success
        evaluation["outcome"] = str(outcome) if outcome is not None else None
        evaluation["text"] = text
        delete_sandbox(sandbox)
        return success
Пример #12
0
    def evaluate(self, job, file_cacher):
        """See TaskType.evaluate."""
        # Create the sandbox
        sandbox = create_sandbox(file_cacher, job.multithreaded_sandbox)

        # Prepare the execution
        executable_filename = job.executables.keys()[0]
        language = get_language(job.language)
        commands = language.get_evaluation_commands(
            executable_filename,
            main="grader" if self._uses_grader() else executable_filename)
        executables_to_get = {
            executable_filename:
            job.executables[executable_filename].digest
            }
        input_filename, output_filename = self.parameters[1]
        stdin_redirect = None
        stdout_redirect = None
        files_allowing_write = []
        if input_filename == "":
            input_filename = "input.txt"
            stdin_redirect = input_filename
        if output_filename == "":
            output_filename = "output.txt"
            stdout_redirect = output_filename
        else:
            files_allowing_write.append(output_filename)
        files_to_get = {
            input_filename: job.input
            }

        # Put the required files into the sandbox
        for filename, digest in executables_to_get.iteritems():
            sandbox.create_file_from_storage(filename, digest, executable=True)
        for filename, digest in files_to_get.iteritems():
            sandbox.create_file_from_storage(filename, digest)

        # Actually performs the execution
        success, plus = evaluation_step(
            sandbox,
            commands,
            job.time_limit,
            job.memory_limit,
            writable_files=files_allowing_write,
            stdin_redirect=stdin_redirect,
            stdout_redirect=stdout_redirect)

        job.sandboxes = [sandbox.path]
        job.plus = plus

        outcome = None
        text = None

        # Error in the sandbox: nothing to do!
        if not success:
            pass

        # Contestant's error: the marks won't be good
        elif not is_evaluation_passed(plus):
            outcome = 0.0
            text = human_evaluation_message(plus)
            if job.get_output:
                job.user_output = None

        # Otherwise, advance to checking the solution
        else:

            # Check that the output file was created
            if not sandbox.file_exists(output_filename):
                outcome = 0.0
                text = [N_("Evaluation didn't produce file %s"),
                        output_filename]
                if job.get_output:
                    job.user_output = None

            else:
                # If asked so, put the output file into the storage
                if job.get_output:
                    job.user_output = sandbox.get_file_to_storage(
                        output_filename,
                        "Output file in job %s" % job.info,
                        trunc_len=100 * 1024)

                # If just asked to execute, fill text and set dummy
                # outcome.
                if job.only_execution:
                    outcome = 0.0
                    text = [N_("Execution completed successfully")]

                # Otherwise evaluate the output file.
                else:

                    # Put the reference solution into the sandbox
                    sandbox.create_file_from_storage(
                        "res.txt",
                        job.output)

                    # Check the solution with white_diff
                    if self.parameters[2] == "diff":
                        outcome, text = white_diff_step(
                            sandbox, output_filename, "res.txt")

                    # Check the solution with a comparator
                    elif self.parameters[2] == "comparator":
                        manager_filename = "checker"

                        if manager_filename not in job.managers:
                            logger.error("Configuration error: missing or "
                                         "invalid comparator (it must be "
                                         "named 'checker')",
                                         extra={"operation": job.info})
                            success = False

                        else:
                            sandbox.create_file_from_storage(
                                manager_filename,
                                job.managers[manager_filename].digest,
                                executable=True)
                            # Rewrite input file. The untrusted
                            # contestant program should not be able to
                            # modify it; however, the grader may
                            # destroy the input file to prevent the
                            # contestant's program from directly
                            # accessing it. Since we cannot create
                            # files already existing in the sandbox,
                            # we try removing the file first.
                            try:
                                sandbox.remove_file(input_filename)
                            except OSError as e:
                                # Let us be extra sure that the file
                                # was actually removed and we did not
                                # mess up with permissions.
                                assert not sandbox.file_exists(input_filename)
                            sandbox.create_file_from_storage(
                                input_filename,
                                job.input)

                            # Allow using any number of processes (because e.g.
                            # one may want to write a bash checker who calls
                            # other processes). Set to a high number because
                            # to avoid fork-bombing the worker.
                            sandbox.max_processes = 1000

                            success, _ = evaluation_step(
                                sandbox,
                                [["./%s" % manager_filename,
                                  input_filename, "res.txt", output_filename]])
                        if success:
                            try:
                                outcome, text = \
                                    extract_outcome_and_text(sandbox)
                            except ValueError, e:
                                logger.error("Invalid output from "
                                             "comparator: %s", e.message,
                                             extra={"operation": job.info})
                                success = False

                    else:
                        raise ValueError("Unrecognized third parameter"
                                         " `%s' for Batch tasktype." %
                                         self.parameters[2])
Пример #13
0
    def evaluate_testcase(self, test_number):
        """See TaskType.evaluate_testcase."""
        sandbox = create_sandbox(self)
        self.job.sandboxes.append(sandbox.path)

        # Immediately prepare the skeleton to return
        self.job.evaluations[test_number] = {
            'sandboxes': [sandbox.path],
            'plus': {}
        }
        evaluation = self.job.evaluations[test_number]
        outcome = None
        text = None

        # Since we allow partial submission, if the file is not
        # present we report that the outcome is 0.
        if "output_%03d.txt" % test_number not in self.job.files:
            evaluation['success'] = True
            evaluation['outcome'] = "0.0"
            evaluation['text'] = "File not submitted."
            return True

        # First and only one step: diffing (manual or with manager).
        output_digest = self.job.files["output_%03d.txt" % test_number].digest

        # Put the files into the sandbox
        sandbox.create_file_from_storage(
            "res.txt", self.job.testcases[test_number].output)
        sandbox.create_file_from_storage("output.txt", output_digest)

        if self.job.task_type_parameters[0] == "diff":
            # No manager: I'll do a white_diff between the submission
            # file and the correct output res.txt.
            success = True
            outcome, text = white_diff_step(sandbox, "output.txt", "res.txt")

        elif self.job.task_type_parameters[0] == "comparator":
            # Manager present: wonderful, he'll do all the job.
            manager_filename = "checker"
            if not manager_filename in self.job.managers:
                logger.error("Configuration error: missing or "
                             "invalid comparator (it must be "
                             "named `checker')")
                success = False
            else:
                sandbox.create_file_from_storage(
                    manager_filename,
                    self.job.managers[manager_filename].digest,
                    executable=True)
                input_digest = self.job.testcases[test_number].input
                sandbox.create_file_from_storage("input.txt", input_digest)
                success, _ = evaluation_step(sandbox, [
                    "./%s" % manager_filename, "input.txt", "res.txt",
                    "output.txt"
                ])
                if success:
                    outcome, text = extract_outcome_and_text(sandbox)

        else:
            raise ValueError("Unrecognized first parameter "
                             "`%s' for OutputOnly tasktype. "
                             "Should be `diff' or `comparator'." %
                             self.job.task_type_parameters[0])

        # Whatever happened, we conclude.
        evaluation['success'] = success
        evaluation['outcome'] = str(outcome) if outcome is not None else None
        evaluation['text'] = text
        delete_sandbox(sandbox)
        return success
Пример #14
0
    def evaluate_testcase(self, test_number):
        """See TaskType.evaluate_testcase."""
        sandbox = create_sandbox(self)
        self.job.sandboxes.append(sandbox.path)

        # Immediately prepare the skeleton to return
        self.job.evaluations[test_number] = {
            'sandboxes': [sandbox.path],
            'plus': {}
        }
        evaluation = self.job.evaluations[test_number]
        outcome = None
        text = None

        # Since we allow partial submission, if the file is not
        # present we report that the outcome is 0.
        if "output_%03d.txt" % test_number not in self.job.files:
            evaluation['success'] = True
            evaluation['outcome'] = 0.0
            evaluation['text'] = "File not submitted."
            return True

        # First and only one step: diffing (manual or with manager).
        output_digest = self.job.files["output_%03d.txt" % test_number].digest

        # Put the files into the sandbox
        sandbox.create_file_from_storage(
            "res.txt", self.job.testcases[test_number].output)
        sandbox.create_file_from_storage("output.txt", output_digest)

        # TODO: this should check self.parameters, not managers.
        if len(self.job.managers) == 0:
            # No manager: I'll do a white_diff between the submission
            # file and the correct output res.txt.
            success = True
            outcome, text = white_diff_step(sandbox, "output.txt", "res.txt")

        else:
            # Manager present: wonderful, he'll do all the job.
            manager_filename = self.job.managers.keys()[0]
            sandbox.create_file_from_storage(
                manager_filename,
                self.job.managers[manager_filename].digest,
                executable=True)
            input_digest = self.job.testcases[test_number].input
            sandbox.create_file_from_storage("input.txt", input_digest)
            success, _ = evaluation_step(sandbox, [
                "./%s" % manager_filename, "input.txt", "res.txt", "output.txt"
            ],
                                         allow_path=[
                                             "input.txt", "output.txt",
                                             "res.txt"
                                         ])
            if success:
                outcome, text = extract_outcome_and_text(sandbox)

        # Whatever happened, we conclude.
        evaluation['success'] = success
        evaluation['outcome'] = str(outcome) if outcome is not None else None
        evaluation['text'] = text
        delete_sandbox(sandbox)
        return success
Пример #15
0
    def evaluate(self, job, file_cacher):
        """See TaskType.evaluate."""
        # Create the sandbox
        sandbox = create_sandbox(file_cacher)

        # Prepare the execution
        executable_filename = job.executables.keys()[0]
        language = job.language
        commands = get_evaluation_commands(language, executable_filename)
        executables_to_get = {
            executable_filename: job.executables[executable_filename].digest
        }
        input_filename, output_filename = self.parameters[1]
        stdin_redirect = None
        stdout_redirect = None
        if input_filename == "":
            input_filename = "input.txt"
            stdin_redirect = input_filename
        if output_filename == "":
            output_filename = "output.txt"
            stdout_redirect = output_filename
        files_to_get = {input_filename: job.input}

        # Put the required files into the sandbox
        for filename, digest in executables_to_get.iteritems():
            sandbox.create_file_from_storage(filename, digest, executable=True)
        for filename, digest in files_to_get.iteritems():
            sandbox.create_file_from_storage(filename, digest)

        # Actually performs the execution
        success, plus = evaluation_step(sandbox,
                                        commands,
                                        job.time_limit,
                                        job.memory_limit,
                                        stdin_redirect=stdin_redirect,
                                        stdout_redirect=stdout_redirect)

        job.sandboxes = [sandbox.path]
        job.plus = plus

        outcome = None
        text = None

        # Error in the sandbox: nothing to do!
        if not success:
            pass

        # Contestant's error: the marks won't be good
        elif not is_evaluation_passed(plus):
            outcome = 0.0
            text = human_evaluation_message(plus)
            if job.get_output:
                job.user_output = None

        # Otherwise, advance to checking the solution
        else:

            # Check that the output file was created
            if not sandbox.file_exists(output_filename):
                outcome = 0.0
                text = [
                    N_("Evaluation didn't produce file %s"), output_filename
                ]
                if job.get_output:
                    job.user_output = None

            else:
                # If asked so, put the output file into the storage
                if job.get_output:
                    job.user_output = sandbox.get_file_to_storage(
                        output_filename,
                        "Output file in job %s" % job.info,
                        trunc_len=100 * 1024)

                # If not asked otherwise, evaluate the output file
                if not job.only_execution:

                    # Put the reference solution into the sandbox
                    sandbox.create_file_from_storage("res.txt", job.output)

                    # Check the solution with white_diff
                    if self.parameters[2] == "diff":
                        outcome, text = white_diff_step(
                            sandbox, output_filename, "res.txt")

                    # Check the solution with a comparator
                    elif self.parameters[2] == "comparator":
                        manager_filename = "checker"

                        if not manager_filename in job.managers:
                            logger.error(
                                "Configuration error: missing or "
                                "invalid comparator (it must be "
                                "named 'checker')",
                                extra={"operation": job.info})
                            success = False

                        else:
                            sandbox.create_file_from_storage(
                                manager_filename,
                                job.managers[manager_filename].digest,
                                executable=True)
                            success, _ = evaluation_step(
                                sandbox, [[
                                    "./%s" % manager_filename, input_filename,
                                    "res.txt", output_filename
                                ]])
                        if success:
                            try:
                                outcome, text = \
                                    extract_outcome_and_text(sandbox)
                            except ValueError, e:
                                logger.error("Invalid output from "
                                             "comparator: %s" % (e.message, ),
                                             extra={"operation": job.info})
                                success = False

                    else:
                        raise ValueError("Unrecognized third parameter"
                                         " `%s' for Batch tasktype." %
                                         self.parameters[2])
Пример #16
0
    def evaluate_testcase(self, test_number):
        """See TaskType.evaluate_testcase."""
        sandbox = create_sandbox(self)
        self.job.sandboxes.append(sandbox.path)

        # Immediately prepare the skeleton to return
        self.job.evaluations[test_number] = {'sandboxes': [sandbox.path],
                                             'plus': {}}
        evaluation = self.job.evaluations[test_number]
        outcome = None
        text = None

        # Since we allow partial submission, if the file is not
        # present we report that the outcome is 0.
        if "output_%03d.txt" % test_number not in self.job.files:
            evaluation['success'] = True
            evaluation['outcome'] = 0.0
            evaluation['text'] = "File not submitted."
            return True

        # First and only one step: diffing (manual or with manager).
        output_digest = self.job.files["output_%03d.txt" %
                                       test_number].digest

        # Put the files into the sandbox
        sandbox.create_file_from_storage(
            "res.txt",
            self.job.testcases[test_number].output)
        sandbox.create_file_from_storage(
            "output.txt",
            output_digest)

        # TODO: this should check self.parameters, not managers.
        if len(self.job.managers) == 0:
            # No manager: I'll do a white_diff between the submission
            # file and the correct output res.txt.
            success = True
            outcome, text = white_diff_step(
                sandbox, "output.txt", "res.txt")

        else:
            # Manager present: wonderful, he'll do all the job.
            manager_filename = self.job.managers.keys()[0]
            sandbox.create_file_from_storage(
                manager_filename,
                self.job.managers[manager_filename].digest,
                executable=True)
            input_digest = self.job.testcases[test_number].input
            sandbox.create_file_from_storage(
                "input.txt",
                input_digest)
            success, _ = evaluation_step(
                sandbox,
                ["./%s" % manager_filename,
                 "input.txt", "res.txt", "output.txt"],
                allow_path=["input.txt", "output.txt", "res.txt"])
            if success:
                outcome, text = extract_outcome_and_text(sandbox)

        # Whatever happened, we conclude.
        evaluation['success'] = success
        evaluation['outcome'] = str(outcome) if outcome is not None else None
        evaluation['text'] = text
        delete_sandbox(sandbox)
        return success
Пример #17
0
    def evaluate(self, job, file_cacher):
        """See TaskType.evaluate."""
        sandbox = create_sandbox(file_cacher, job.multithreaded_sandbox)
        job.sandboxes.append(sandbox.path)

        # Immediately prepare the skeleton to return
        job.sandboxes = [sandbox.path]
        job.plus = {}

        outcome = None
        text = None

        # Since we allow partial submission, if the file is not
        # present we report that the outcome is 0.
        if "output_%s.txt" % job.operation["testcase_codename"] \
                not in job.files:
            job.success = True
            job.outcome = "0.0"
            job.text = [N_("File not submitted")]
            return True

        # First and only one step: diffing (manual or with manager).
        output_digest = job.files["output_%s.txt" %
                                  job.operation["testcase_codename"]].digest

        # Put the files into the sandbox
        sandbox.create_file_from_storage("res.txt", job.output)
        sandbox.create_file_from_storage("output.txt", output_digest)

        if self.parameters[0] == "diff":
            # No manager: I'll do a white_diff between the submission
            # file and the correct output res.txt.
            success = True
            outcome, text = white_diff_step(sandbox, "output.txt", "res.txt")

        elif self.parameters[0] == "comparator":
            # Manager present: wonderful, it will do all the work.
            manager_filename = "checker"
            if manager_filename not in job.managers:
                logger.error(
                    "Configuration error: missing or "
                    "invalid comparator (it must be "
                    "named `checker')",
                    extra={"operation": job.info})
                success = False
            else:
                sandbox.create_file_from_storage(
                    manager_filename,
                    job.managers[manager_filename].digest,
                    executable=True)
                input_digest = job.input
                sandbox.create_file_from_storage("input.txt", input_digest)
                success, _ = evaluation_step(sandbox, [[
                    "./%s" % manager_filename, "input.txt", "res.txt",
                    "output.txt"
                ]])
                if success:
                    outcome, text = extract_outcome_and_text(sandbox)

        else:
            raise ValueError("Unrecognized first parameter "
                             "`%s' for OutputOnly tasktype. "
                             "Should be `diff' or `comparator'." %
                             self.parameters[0])

        # Whatever happened, we conclude.
        job.success = success
        job.outcome = "%s" % outcome if outcome is not None else None
        job.text = text

        delete_sandbox(sandbox, job.success)
Пример #18
0
    def evaluate(self, job, file_cacher):
        """See TaskType.evaluate."""
        if len(job.executables) != 1:
            raise ValueError("Unexpected number of executables (%s)" %
                             len(job.executables))

        # Create the sandbox
        sandbox = create_sandbox(file_cacher,
                                 multithreaded=job.multithreaded_sandbox,
                                 name="evaluate")

        # Prepare the execution
        executable_filename = next(iterkeys(job.executables))
        language = get_language(job.language)
        main = Batch.GRADER_BASENAME \
            if self._uses_grader() else executable_filename
        commands = language.get_evaluation_commands(executable_filename,
                                                    main=main)
        executables_to_get = {
            executable_filename: job.executables[executable_filename].digest
        }
        stdin_redirect = None
        stdout_redirect = None
        files_allowing_write = []
        if len(self.input_filename) == 0:
            self.input_filename = Batch.DEFAULT_INPUT_FILENAME
            stdin_redirect = self.input_filename
        if len(self.output_filename) == 0:
            self.output_filename = Batch.DEFAULT_OUTPUT_FILENAME
            stdout_redirect = self.output_filename
        else:
            files_allowing_write.append(self.output_filename)
        files_to_get = {self.input_filename: job.input}

        # Put the required files into the sandbox
        for filename, digest in iteritems(executables_to_get):
            sandbox.create_file_from_storage(filename, digest, executable=True)
        for filename, digest in iteritems(files_to_get):
            sandbox.create_file_from_storage(filename, digest)

        # Actually performs the execution
        success, plus = evaluation_step(sandbox,
                                        commands,
                                        job.time_limit,
                                        job.memory_limit,
                                        writable_files=files_allowing_write,
                                        stdin_redirect=stdin_redirect,
                                        stdout_redirect=stdout_redirect)

        job.sandboxes = [sandbox.path]
        job.plus = plus

        outcome = None
        text = []

        # Error in the sandbox: nothing to do!
        if not success:
            pass

        # Contestant's error: the marks won't be good
        elif not is_evaluation_passed(plus):
            outcome = 0.0
            text = human_evaluation_message(plus)
            if job.get_output:
                job.user_output = None

        # Otherwise, advance to checking the solution
        else:

            # Check that the output file was created
            if not sandbox.file_exists(self.output_filename):
                outcome = 0.0
                text = [
                    N_("Evaluation didn't produce file %s"),
                    self.output_filename
                ]
                if job.get_output:
                    job.user_output = None

            else:
                # If asked so, put the output file into the storage.
                if job.get_output:
                    job.user_output = sandbox.get_file_to_storage(
                        self.output_filename,
                        "Output file in job %s" % job.info,
                        trunc_len=100 * 1024)

                # If just asked to execute, fill text and set dummy outcome.
                if job.only_execution:
                    outcome = 0.0
                    text = [N_("Execution completed successfully")]

                # Otherwise evaluate the output file.
                else:

                    # Create a brand-new sandbox just for checking. Only admin
                    # code runs in it, so we allow multithreading and many
                    # processes (still with a limit to avoid fork-bombs).
                    checkbox = create_sandbox(file_cacher,
                                              multithreaded=True,
                                              name="check")
                    checkbox.max_processes = 1000

                    checker_success, outcome, text = self._eval_output(
                        checkbox, job, sandbox.get_root_path())
                    success = success and checker_success

        # Whatever happened, we conclude.
        job.success = success
        job.outcome = "%s" % outcome if outcome is not None else None
        job.text = text

        delete_sandbox(sandbox, job.success)
Пример #19
0
    def evaluate(self, job, file_cacher):
        """See TaskType.evaluate."""
        # Create the sandbox
        sandbox = create_sandbox(file_cacher)

        # Prepare the execution
        executable_filename = job.executables.keys()[0]
        command = [os.path.join(".", executable_filename)]
        executables_to_get = {
            executable_filename:
            job.executables[executable_filename].digest
            }
        input_filename, output_filename = self.parameters[1]
        stdin_redirect = None
        stdout_redirect = None
        if input_filename == "":
            input_filename = "input.txt"
            stdin_redirect = input_filename
        if output_filename == "":
            output_filename = "output.txt"
            stdout_redirect = output_filename
        files_to_get = {
            input_filename: job.input
            }

        # Put the required files into the sandbox
        for filename, digest in executables_to_get.iteritems():
            sandbox.create_file_from_storage(filename, digest, executable=True)
        for filename, digest in files_to_get.iteritems():
            sandbox.create_file_from_storage(filename, digest)

        # Actually performs the execution
        success, plus = evaluation_step(
            sandbox,
            command,
            job.time_limit,
            job.memory_limit,
            stdin_redirect=stdin_redirect,
            stdout_redirect=stdout_redirect)

        job.sandboxes = [sandbox.path]
        job.plus = plus

        outcome = None
        text = None

        # Error in the sandbox: nothing to do!
        if not success:
            pass

        # Contestant's error: the marks won't be good
        elif not is_evaluation_passed(plus):
            outcome = 0.0
            text = human_evaluation_message(plus)
            if job.get_output:
                job.user_output = None

        # Otherwise, advance to checking the solution
        else:

            # Check that the output file was created
            if not sandbox.file_exists(output_filename):
                outcome = 0.0
                text = [N_("Evaluation didn't produce file %s"),
                        output_filename]
                if job.get_output:
                    job.user_output = None

            else:
                # If asked so, put the output file into the storage
                if job.get_output:
                    job.user_output = sandbox.get_file_to_storage(
                        output_filename,
                        "Output file in job %s" % job.info,
                        trunc_len=100 * 1024)

                # If not asked otherwise, evaluate the output file
                if not job.only_execution:

                    # Put the reference solution into the sandbox
                    sandbox.create_file_from_storage(
                        "res.txt",
                        job.output)

                    # Check the solution with white_diff
                    if self.parameters[2] == "diff":
                        outcome, text = white_diff_step(
                            sandbox, output_filename, "res.txt")

                    # Check the solution with a comparator
                    elif self.parameters[2] == "comparator":
                        manager_filename = "checker"

                        if not manager_filename in job.managers:
                            logger.error("Configuration error: missing or "
                                         "invalid comparator (it must be "
                                         "named 'checker')",
                                         extra={"operation": job.info})
                            success = False

                        else:
                            sandbox.create_file_from_storage(
                                manager_filename,
                                job.managers[manager_filename].digest,
                                executable=True)
                            success, _ = evaluation_step(
                                sandbox,
                                ["./%s" % manager_filename,
                                 input_filename, "res.txt", output_filename])
                        if success:
                            try:
                                outcome, text = \
                                    extract_outcome_and_text(sandbox)
                            except ValueError, e:
                                logger.error("Invalid output from "
                                             "comparator: %s" % (e.message,),
                                             extra={"operation": job.info})
                                success = False

                    else:
                        raise ValueError("Unrecognized third parameter"
                                         " `%s' for Batch tasktype." %
                                         self.parameters[2])
Пример #20
0
    def evaluate_testcase(self, test_number):
        """See TaskType.evaluate_testcase."""
        # Create the sandbox
        sandbox = create_sandbox(self)

        # Prepare the execution
        executable_filename = self.job.executables.keys()[0]
        command = [sandbox.relative_path(executable_filename)]
        executables_to_get = {
            executable_filename:
            self.job.executables[executable_filename].digest
            }
        input_filename, output_filename = self.job.task_type_parameters[1]
        stdin_redirect = None
        stdout_redirect = None
        if input_filename == "":
            input_filename = "input.txt"
            stdin_redirect = input_filename
        if output_filename == "":
            output_filename = "output.txt"
            stdout_redirect = output_filename
        files_to_get = {
            input_filename: self.job.testcases[test_number].input
            }
        allow_path = [input_filename, output_filename]

        # Put the required files into the sandbox
        for filename, digest in executables_to_get.iteritems():
            sandbox.create_file_from_storage(filename, digest, executable=True)
        for filename, digest in files_to_get.iteritems():
            sandbox.create_file_from_storage(filename, digest)

        # Actually performs the execution
        success, plus = evaluation_step(
            sandbox,
            command,
            self.job.time_limit,
            self.job.memory_limit,
            allow_path,
            stdin_redirect=stdin_redirect,
            stdout_redirect=stdout_redirect)

        self.job.evaluations[test_number] = {'sandboxes': [sandbox.path],
                                             'plus': plus}
        outcome = None
        text = None
        evaluation = self.job.evaluations[test_number]

        # Error in the sandbox: nothing to do!
        if not success:
            pass

        # Contestant's error: the marks won't be good
        elif not is_evaluation_passed(plus):
            outcome = 0.0
            text = human_evaluation_message(plus)
            if self.job.get_output:
                evaluation['output'] = None

        # Otherwise, advance to checking the solution
        else:

            # Check that the output file was created
            if not sandbox.file_exists(output_filename):
                outcome = 0.0
                text = "Execution didn't produce file %s" % \
                    (output_filename)
                if self.job.get_output:
                    evaluation['output'] = None

            else:
                # If asked so, put the output file into the storage
                if self.job.get_output:
                    evaluation['output'] = sandbox.get_file_to_storage(
                        output_filename,
                        "Output file for testcase %d in job %s" %
                        (test_number, self.job.info),
                        trunc_len=100 * 1024)

                # If not asked otherwise, evaluate the output file
                if not self.job.only_execution:

                    # Put the reference solution into the sandbox
                    sandbox.create_file_from_storage(
                        "res.txt",
                        self.job.testcases[test_number].output)

                    # Check the solution with white_diff
                    if self.job.task_type_parameters[2] == "diff":
                        outcome, text = white_diff_step(
                            sandbox, output_filename, "res.txt")

                    # Check the solution with a comparator
                    elif self.job.task_type_parameters[2] == "comparator":
                        manager_filename = "checker"

                        if not manager_filename in self.job.managers:
                            logger.error("Configuration error: missing or "
                                         "invalid comparator (it must be "
                                         "named 'checker')")
                            success = False

                        else:
                            sandbox.create_file_from_storage(
                                manager_filename,
                                self.job.managers[manager_filename].digest,
                                executable=True)
                            success, _ = evaluation_step(
                                sandbox,
                                ["./%s" % manager_filename,
                                 input_filename, "res.txt", output_filename],
                                allow_path=[input_filename,
                                            "res.txt",
                                            output_filename])
                        if success:
                            try:
                                outcome, text = \
                                    extract_outcome_and_text(sandbox)
                            except ValueError, e:
                                logger.error("Invalid output from "
                                             "comparator: %s" % (e.message,))
                                success = False

                    # Unknown evaluationg parameter!
                    else:
                        raise ValueError("Unrecognized third parameter"
                                         " `%s' for Batch tasktype." %
                                         self.job.task_type_parameters[2])
Пример #21
0
    def evaluate(self, job, file_cacher):
        """See TaskType.evaluate."""
        # f stand for first, s for second.
        first_sandbox = create_sandbox(file_cacher)
        second_sandbox = create_sandbox(file_cacher)
        fifo_dir = tempfile.mkdtemp(dir=config.temp_dir)
        fifo = os.path.join(fifo_dir, "fifo")
        os.mkfifo(fifo)
        os.chmod(fifo_dir, 0o755)
        os.chmod(fifo, 0o666)

        # First step: we start the first manager.
        first_filename = "manager"
        first_command = ["./%s" % first_filename, "0", fifo]
        first_executables_to_get = {
            first_filename:
            job.executables[first_filename].digest
            }
        first_files_to_get = {
            "input.txt": job.input
            }
        first_allow_path = [fifo_dir]

        # Put the required files into the sandbox
        for filename, digest in first_executables_to_get.iteritems():
            first_sandbox.create_file_from_storage(filename,
                                                   digest,
                                                   executable=True)
        for filename, digest in first_files_to_get.iteritems():
            first_sandbox.create_file_from_storage(filename, digest)

        first = evaluation_step_before_run(
            first_sandbox,
            first_command,
            job.time_limit,
            job.memory_limit,
            first_allow_path,
            stdin_redirect="input.txt",
            wait=False)

        # Second step: we start the second manager.
        second_filename = "manager"
        second_command = ["./%s" % second_filename, "1", fifo]
        second_executables_to_get = {
            second_filename:
            job.executables[second_filename].digest
            }
        second_files_to_get = {}
        second_allow_path = [fifo_dir]

        # Put the required files into the second sandbox
        for filename, digest in second_executables_to_get.iteritems():
            second_sandbox.create_file_from_storage(filename,
                                                    digest,
                                                    executable=True)
        for filename, digest in second_files_to_get.iteritems():
            second_sandbox.create_file_from_storage(filename, digest)

        second = evaluation_step_before_run(
            second_sandbox,
            second_command,
            job.time_limit,
            job.memory_limit,
            second_allow_path,
            stdout_redirect="output.txt",
            wait=False)

        # Consume output.
        wait_without_std([second, first])
        # TODO: check exit codes with translate_box_exitcode.

        success_first, first_plus = \
            evaluation_step_after_run(first_sandbox)
        success_second, second_plus = \
            evaluation_step_after_run(second_sandbox)

        job.sandboxes = [first_sandbox.path,
                         second_sandbox.path]
        job.plus = second_plus

        success = True
        outcome = None
        text = None

        # Error in the sandbox: report failure!
        if not success_first or not success_second:
            success = False

        # Contestant's error: the marks won't be good
        elif not is_evaluation_passed(first_plus) or \
                not is_evaluation_passed(second_plus):
            outcome = 0.0
            if not is_evaluation_passed(first_plus):
                text = human_evaluation_message(first_plus)
            else:
                text = human_evaluation_message(second_plus)
            if job.get_output:
                job.user_output = None

        # Otherwise, advance to checking the solution
        else:

            # Check that the output file was created
            if not second_sandbox.file_exists('output.txt'):
                outcome = 0.0
                text = [N_("Evaluation didn't produce file %s"), "output.txt"]
                if job.get_output:
                    job.user_output = None

            else:
                # If asked so, put the output file into the storage
                if job.get_output:
                    job.user_output = second_sandbox.get_file_to_storage(
                        "output.txt",
                        "Output file in job %s" % job.info)

                # If not asked otherwise, evaluate the output file
                if not job.only_execution:
                    # Put the reference solution into the sandbox
                    second_sandbox.create_file_from_storage(
                        "res.txt",
                        job.output)

                    # If a checker is not provided, use white-diff
                    if self.parameters[0] == "diff":
                        outcome, text = white_diff_step(
                            second_sandbox, "output.txt", "res.txt")

                    elif self.parameters[0] == "comparator":
                        if TwoSteps.CHECKER_FILENAME not in job.managers:
                            logger.error("Configuration error: missing or "
                                         "invalid comparator (it must be "
                                         "named `checker')",
                                         extra={"operation": job.info})
                            success = False
                        else:
                            second_sandbox.create_file_from_storage(
                                TwoSteps.CHECKER_FILENAME,
                                job.managers[TwoSteps.CHECKER_FILENAME].digest,
                                executable=True)
                            # Rewrite input file, as in Batch.py
                            try:
                                second_sandbox.remove_file("input.txt")
                            except OSError as e:
                                assert not second_sandbox.file_exists(
                                    "input.txt")
                            second_sandbox.create_file_from_storage(
                                "input.txt",
                                job.input)
                            success, _ = evaluation_step(
                                second_sandbox,
                                [["./%s" % TwoSteps.CHECKER_FILENAME,
                                  "input.txt", "res.txt", "output.txt"]])
                            if success:
                                try:
                                    outcome, text = extract_outcome_and_text(
                                        second_sandbox)
                                except ValueError, e:
                                    logger.error("Invalid output from "
                                                 "comparator: %s", e.message,
                                                 extra={"operation": job.info})
                                    success = False
                    else:
                        raise ValueError("Uncrecognized first parameter"
                                         " `%s' for TwoSteps tasktype." %
                                         self.parameters[0])