Beispiel #1
0
 def setUp(self):
     super().setUp()
     self.p = ParameterTypeCollection("name", "shortname", "description", [
         ParameterTypeInt("name0", "shortname0", "desc0"),
         ParameterTypeString("name1", "shortname1", "desc1"),
         ParameterTypeChoice("name2", "shortname2", "desc2", {
             "c1": "First choice",
             "c2": "Second choice",
         })])
Beispiel #2
0
 def setUp(self):
     super(TestParameterTypeCollection, self).setUp()
     self.p = ParameterTypeCollection("name", "shortname", "description", [
         ParameterTypeInt("name0", "shortname0", "desc0"),
         ParameterTypeString("name1", "shortname1", "desc1"),
         ParameterTypeChoice("name2", "shortname2", "desc2", {
             "c1": "First choice",
             "c2": "Second choice",
         })])
Beispiel #3
0
class Batch(TaskType):
    """Task type class for a unique standalone submission source, with
    comparator (or not).

    Parameters needs to be a list of three elements.

    The first element is 'grader' or 'alone': in the first
    case, the source file is to be compiled with a provided piece of
    software ('grader'); in the other by itself.

    The second element is a 2-tuple of the input file name and output file
    name. The input file may be '' to denote stdin, and similarly the
    output filename may be '' to denote stdout.

    The third element is 'diff' or 'comparator' and says whether the
    output is compared with a simple diff algorithm or using a
    comparator.

    Note: the first element is used only in the compilation step; the
    others only in the evaluation step.

    A comparator can read argv[1], argv[2], argv[3] (respectively,
    input, correct output and user output) and should write the
    outcome to stdout and the text to stderr.

    """
    # Filename of the reference solution in the sandbox evaluating the output.
    CORRECT_OUTPUT_FILENAME = "res.txt"
    # Filename of the admin-provided comparator.
    CHECKER_FILENAME = "checker"
    # Basename of the grader, used in the manager filename and as the main
    # class in languages that require us to specify it.
    GRADER_BASENAME = "grader"
    # Default input and output filenames when not provided as parameters.
    DEFAULT_INPUT_FILENAME = "input.txt"
    DEFAULT_OUTPUT_FILENAME = "output.txt"

    # Constants used in the parameter definition.
    OUTPUT_EVAL_DIFF = "diff"
    OUTPUT_EVAL_CHECKER = "comparator"
    COMPILATION_ALONE = "alone"
    COMPILATION_GRADER = "grader"

    # Other constants to specify the task type behaviour and parameters.
    ALLOW_PARTIAL_SUBMISSION = False

    _COMPILATION = ParameterTypeChoice(
        "Compilation", "compilation", "", {
            COMPILATION_ALONE: "Submissions are self-sufficient",
            COMPILATION_GRADER: "Submissions are compiled with a grader"
        })

    _USE_FILE = ParameterTypeCollection(
        "I/O (blank for stdin/stdout)", "io", "", [
            ParameterTypeString("Input file", "inputfile", ""),
            ParameterTypeString("Output file", "outputfile", ""),
        ])

    _EVALUATION = ParameterTypeChoice(
        "Output evaluation", "output_eval", "", {
            OUTPUT_EVAL_DIFF: "Outputs compared with white diff",
            OUTPUT_EVAL_CHECKER: "Outputs are compared by a comparator"
        })

    ACCEPTED_PARAMETERS = [_COMPILATION, _USE_FILE, _EVALUATION]

    @property
    def name(self):
        """See TaskType.name."""
        # TODO add some details if a grader/comparator is used, etc...
        return "Batch"

    def __init__(self, parameters):
        super(Batch, self).__init__(parameters)
        self.compilation = self.parameters[0]
        self.input_filename, self.output_filename = self.parameters[1]
        self.output_eval = self.parameters[2]

    def get_compilation_commands(self, submission_format):
        """See TaskType.get_compilation_commands."""
        source_filenames = []
        # If a grader is specified, we add to the command line (and to
        # the files to get) the corresponding manager.
        if self._uses_grader():
            source_filenames.append(Batch.GRADER_BASENAME + ".%l")
        source_filenames.append(submission_format[0])
        executable_filename = submission_format[0].replace(".%l", "")
        res = dict()
        for language in LANGUAGES:
            res[language.name] = language.get_compilation_commands([
                source.replace(".%l", language.source_extension)
                for source in source_filenames
            ], executable_filename)
        return res

    def get_user_managers(self, unused_submission_format):
        """See TaskType.get_user_managers."""
        return []

    def get_auto_managers(self):
        """See TaskType.get_auto_managers."""
        return []

    def _uses_grader(self):
        return self.compilation == Batch.COMPILATION_GRADER

    def _uses_checker(self):
        return self.output_eval == Batch.OUTPUT_EVAL_CHECKER

    @staticmethod
    def _is_manager_for_compilation(filename):
        """Return if a manager should be copied in the compilation sandbox"""
        return any(filename.endswith(header) for header in HEADER_EXTS) or \
            any(filename.endswith(source) for source in SOURCE_EXTS) or \
            any(filename.endswith(obj) for obj in OBJECT_EXTS)

    def compile(self, job, file_cacher):
        """See TaskType.compile."""
        # Detect the submission's language. The checks about the
        # formal correctedness of the submission are done in CWS,
        # before accepting it.
        language = get_language(job.language)
        source_ext = language.source_extension

        # TODO: here we are sure that submission.files are the same as
        # task.submission_format. The following check shouldn't be
        # here, but in the definition of the task, since this actually
        # checks that task's task type and submission format agree.
        if len(job.files) != 1:
            job.success = True
            job.compilation_success = False
            job.text = [N_("Invalid files in submission")]
            logger.error("Submission contains %d files, expecting 1",
                         len(job.files),
                         extra={"operation": job.info})
            return

        # Create the sandbox.
        sandbox = create_sandbox(file_cacher,
                                 multithreaded=job.multithreaded_sandbox,
                                 name="compile")
        job.sandboxes.append(sandbox.path)

        user_file_format = next(iterkeys(job.files))
        user_source_filename = user_file_format.replace(".%l", source_ext)
        executable_filename = user_file_format.replace(".%l", "")

        # Copy required files in the sandbox (includes the grader if present).
        sandbox.create_file_from_storage(user_source_filename,
                                         job.files[user_file_format].digest)
        for filename in iterkeys(job.managers):
            if Batch._is_manager_for_compilation(filename):
                sandbox.create_file_from_storage(filename,
                                                 job.managers[filename].digest)

        # Create the list of filenames to be passed to the compiler. If we use
        # a grader, it needs to be in first position in the command line.
        source_filenames = [user_source_filename]
        if self._uses_grader():
            grader_source_filename = Batch.GRADER_BASENAME + source_ext
            source_filenames.insert(0, grader_source_filename)

        # Prepare the compilation command.
        commands = language.get_compilation_commands(source_filenames,
                                                     executable_filename)

        # Run the compilation
        operation_success, compilation_success, text, plus = \
            compilation_step(sandbox, commands)

        # Retrieve the compiled executables
        job.success = operation_success
        job.compilation_success = compilation_success
        job.plus = plus
        job.text = text
        if operation_success and compilation_success:
            digest = sandbox.get_file_to_storage(
                executable_filename,
                "Executable %s for %s" % (executable_filename, job.info))
            job.executables[executable_filename] = \
                Executable(executable_filename, digest)

        # Cleanup
        delete_sandbox(sandbox, job.success)

    def evaluate(self, job, file_cacher):
        """See TaskType.evaluate."""
        if len(job.executables) != 1:
            raise ValueError("Unexpected number of executables (%s)" %
                             len(job.executables))

        # Create the sandbox
        sandbox = create_sandbox(file_cacher,
                                 multithreaded=job.multithreaded_sandbox,
                                 name="evaluate")

        # Prepare the execution
        executable_filename = next(iterkeys(job.executables))
        language = get_language(job.language)
        main = Batch.GRADER_BASENAME \
            if self._uses_grader() else executable_filename
        commands = language.get_evaluation_commands(executable_filename,
                                                    main=main)
        executables_to_get = {
            executable_filename: job.executables[executable_filename].digest
        }
        stdin_redirect = None
        stdout_redirect = None
        files_allowing_write = []
        if len(self.input_filename) == 0:
            self.input_filename = Batch.DEFAULT_INPUT_FILENAME
            stdin_redirect = self.input_filename
        if len(self.output_filename) == 0:
            self.output_filename = Batch.DEFAULT_OUTPUT_FILENAME
            stdout_redirect = self.output_filename
        else:
            files_allowing_write.append(self.output_filename)
        files_to_get = {self.input_filename: job.input}

        # Put the required files into the sandbox
        for filename, digest in iteritems(executables_to_get):
            sandbox.create_file_from_storage(filename, digest, executable=True)
        for filename, digest in iteritems(files_to_get):
            sandbox.create_file_from_storage(filename, digest)

        # Actually performs the execution
        success, plus = evaluation_step(sandbox,
                                        commands,
                                        job.time_limit,
                                        job.memory_limit,
                                        writable_files=files_allowing_write,
                                        stdin_redirect=stdin_redirect,
                                        stdout_redirect=stdout_redirect)

        job.sandboxes = [sandbox.path]
        job.plus = plus

        outcome = None
        text = []

        # Error in the sandbox: nothing to do!
        if not success:
            pass

        # Contestant's error: the marks won't be good
        elif not is_evaluation_passed(plus):
            outcome = 0.0
            text = human_evaluation_message(plus)
            if job.get_output:
                job.user_output = None

        # Otherwise, advance to checking the solution
        else:

            # Check that the output file was created
            if not sandbox.file_exists(self.output_filename):
                outcome = 0.0
                text = [
                    N_("Evaluation didn't produce file %s"),
                    self.output_filename
                ]
                if job.get_output:
                    job.user_output = None

            else:
                # If asked so, put the output file into the storage.
                if job.get_output:
                    job.user_output = sandbox.get_file_to_storage(
                        self.output_filename,
                        "Output file in job %s" % job.info,
                        trunc_len=100 * 1024)

                # If just asked to execute, fill text and set dummy outcome.
                if job.only_execution:
                    outcome = 0.0
                    text = [N_("Execution completed successfully")]

                # Otherwise evaluate the output file.
                else:

                    # Create a brand-new sandbox just for checking. Only admin
                    # code runs in it, so we allow multithreading and many
                    # processes (still with a limit to avoid fork-bombs).
                    checkbox = create_sandbox(file_cacher,
                                              multithreaded=True,
                                              name="check")
                    checkbox.max_processes = 1000

                    checker_success, outcome, text = self._eval_output(
                        checkbox, job, sandbox.get_root_path())
                    success = success and checker_success

        # Whatever happened, we conclude.
        job.success = success
        job.outcome = "%s" % outcome if outcome is not None else None
        job.text = text

        delete_sandbox(sandbox, job.success)

    def _eval_output(self, sandbox, job, eval_sandbox_path):
        """Evaluate ("check") the output using a white diff or a checker.

        sandbox (Sandbox): the sandbox to use to eval the output.
        job (Job): the job triggering this checker run.

        return (bool, float|None, [str]): success (true if the checker was able
            to check the solution successfully), outcome and text.

        """
        # Put the reference solution and input into the checkbox.
        sandbox.create_file_from_storage(Batch.CORRECT_OUTPUT_FILENAME,
                                         job.output)
        sandbox.create_file_from_storage(self.input_filename, job.input)

        # Put the user-produced output file into the checkbox
        output_src = os.path.join(eval_sandbox_path, self.output_filename)
        output_dst = os.path.join(sandbox.get_root_path(),
                                  self.output_filename)
        try:
            if os.path.islink(output_src):
                raise FileNotFoundError
            shutil.copyfile(output_src, output_dst)
        except FileNotFoundError:
            pass

        if self._uses_checker():
            success, outcome, text = self._run_checker(sandbox, job)
        else:
            success = True
            outcome, text = white_diff_step(sandbox, self.output_filename,
                                            Batch.CORRECT_OUTPUT_FILENAME)

        delete_sandbox(sandbox, success)
        return success, outcome, text

    def _run_checker(self, sandbox, job):
        """Run the explicit checker given by the admins

        sandbox (Sandbox): the sandbox to run the checker in; should already
            contain input, correct output, and user output.
        job (Job): the job triggering this checker run.

        return (bool, float|None, [str]): success (true if the checker was able
            to check the solution successfully), outcome and text.

        """
        # Copy the checker in the sandbox, after making sure it was provided.
        if Batch.CHECKER_FILENAME not in job.managers:
            logger.error(
                "Configuration error: missing or invalid comparator "
                "(it must be named '%s')",
                Batch.CHECKER_FILENAME,
                extra={"operation": job.info})
            return False, None, []
        sandbox.create_file_from_storage(
            Batch.CHECKER_FILENAME,
            job.managers[Batch.CHECKER_FILENAME].digest,
            executable=True)

        command = [
            "./%s" % Batch.CHECKER_FILENAME, self.input_filename,
            Batch.CORRECT_OUTPUT_FILENAME, self.output_filename
        ]
        success, _ = evaluation_step(sandbox, [command])
        if not success:
            return False, None, []

        try:
            outcome, text = extract_outcome_and_text(sandbox)
        except ValueError as e:
            logger.error("Invalid output from comparator: %s",
                         e,
                         extra={"operation": job.info})
            return False, None, []

        return True, outcome, text
Beispiel #4
0
class Batch(TaskType):
    """Task type class for a unique standalone submission source, with
    comparator (or not).

    Parameters needs to be a list of three elements.

    The first element is 'grader' or 'alone': in the first
    case, the source file is to be compiled with a provided piece of
    software ('grader'); in the other by itself.

    The second element is a 2-tuple of the input file name and output file
    name. The input file may be '' to denote stdin, and similarly the
    output filename may be '' to denote stdout.

    The third element is 'diff' or 'comparator' and says whether the
    output is compared with a simple diff algorithm or using a
    comparator.

    Note: the first element is used only in the compilation step; the
    others only in the evaluation step.

    A comparator can read argv[1], argv[2], argv[3] (respectively,
    input, correct output and user output) and should write the
    outcome to stdout and the text to stderr.

    """
    ALLOW_PARTIAL_SUBMISSION = False

    _COMPILATION = ParameterTypeChoice(
        "Compilation", "compilation", "", {
            "alone": "Submissions are self-sufficient",
            "grader": "Submissions are compiled with a grader"
        })

    _USE_FILE = ParameterTypeCollection(
        "I/O (blank for stdin/stdout)", "io", "", [
            ParameterTypeString("Input file", "inputfile", ""),
            ParameterTypeString("Output file", "outputfile", ""),
        ])

    _EVALUATION = ParameterTypeChoice(
        "Output evaluation", "output_eval", "", {
            "diff": "Outputs compared with white diff",
            "comparator": "Outputs are compared by a comparator"
        })

    ACCEPTED_PARAMETERS = [_COMPILATION, _USE_FILE, _EVALUATION]

    @property
    def name(self):
        """See TaskType.name."""
        # TODO add some details if a grader/comparator is used, etc...
        return "Batch"

    def get_compilation_commands(self, submission_format):
        """See TaskType.get_compilation_commands."""
        source_filenames = []
        # If a grader is specified, we add to the command line (and to
        # the files to get) the corresponding manager.
        if self._uses_grader():
            source_filenames.append("grader.%l")
        source_filenames.append(submission_format[0])
        executable_filename = submission_format[0].replace(".%l", "")
        res = dict()
        for language in LANGUAGES:
            res[language.name] = language.get_compilation_commands([
                source.replace(".%l", language.source_extension)
                for source in source_filenames
            ], executable_filename)
        return res

    def get_user_managers(self, unused_submission_format):
        """See TaskType.get_user_managers."""
        return []

    def get_auto_managers(self):
        """See TaskType.get_auto_managers."""
        return []

    def _uses_grader(self):
        return self.parameters[0] == "grader"

    def compile(self, job, file_cacher):
        """See TaskType.compile."""
        # Detect the submission's language. The checks about the
        # formal correctedness of the submission are done in CWS,
        # before accepting it.
        language = get_language(job.language)
        source_ext = language.source_extension

        # TODO: here we are sure that submission.files are the same as
        # task.submission_format. The following check shouldn't be
        # here, but in the definition of the task, since this actually
        # checks that task's task type and submission format agree.
        if len(job.files) != 1:
            job.success = True
            job.compilation_success = False
            job.text = [N_("Invalid files in submission")]
            logger.error("Submission contains %d files, expecting 1",
                         len(job.files),
                         extra={"operation": job.info})
            return

        # Create the sandbox
        sandbox = create_sandbox(file_cacher, job.multithreaded_sandbox)
        job.sandboxes.append(sandbox.path)

        # Prepare the source files in the sandbox
        files_to_get = {}
        format_filename = next(iterkeys(job.files))
        source_filenames = []
        source_filenames.append(format_filename.replace(".%l", source_ext))
        files_to_get[source_filenames[0]] = \
            job.files[format_filename].digest
        # If a grader is specified, we add to the command line (and to
        # the files to get) the corresponding manager. The grader must
        # be the first file in source_filenames.
        if self._uses_grader():
            source_filenames.insert(0, "grader%s" % source_ext)
            files_to_get["grader%s" % source_ext] = \
                job.managers["grader%s" % source_ext].digest

        # Also copy all managers that might be useful during compilation.
        for filename in iterkeys(job.managers):
            if any(filename.endswith(header) for header in HEADER_EXTS):
                files_to_get[filename] = \
                    job.managers[filename].digest
            elif any(filename.endswith(source) for source in SOURCE_EXTS):
                files_to_get[filename] = \
                    job.managers[filename].digest
            elif any(filename.endswith(obj) for obj in OBJECT_EXTS):
                files_to_get[filename] = \
                    job.managers[filename].digest

        for filename, digest in iteritems(files_to_get):
            sandbox.create_file_from_storage(filename, digest)

        # Prepare the compilation command
        executable_filename = format_filename.replace(".%l", "")
        commands = language.get_compilation_commands(source_filenames,
                                                     executable_filename)

        # Run the compilation
        operation_success, compilation_success, text, plus = \
            compilation_step(sandbox, commands)

        # Retrieve the compiled executables
        job.success = operation_success
        job.compilation_success = compilation_success
        job.plus = plus
        job.text = text
        if operation_success and compilation_success:
            digest = sandbox.get_file_to_storage(
                executable_filename,
                "Executable %s for %s" % (executable_filename, job.info))
            job.executables[executable_filename] = \
                Executable(executable_filename, digest)

        # Cleanup
        delete_sandbox(sandbox, job.success)

    def evaluate(self, job, file_cacher):
        """See TaskType.evaluate."""
        # Create the sandbox
        sandbox = create_sandbox(file_cacher, job.multithreaded_sandbox)

        # Prepare the execution
        assert len(job.executables) == 1
        executable_filename = next(iterkeys(job.executables))
        language = get_language(job.language)
        commands = language.get_evaluation_commands(
            executable_filename,
            main="grader" if self._uses_grader() else executable_filename)
        executables_to_get = {
            executable_filename: job.executables[executable_filename].digest
        }
        input_filename, output_filename = self.parameters[1]
        stdin_redirect = None
        stdout_redirect = None
        files_allowing_write = []
        if len(input_filename) == 0:
            input_filename = "input.txt"
            stdin_redirect = input_filename
        if len(output_filename) == 0:
            output_filename = "output.txt"
            stdout_redirect = output_filename
        else:
            files_allowing_write.append(output_filename)
        files_to_get = {input_filename: job.input}

        # Put the required files into the sandbox
        for filename, digest in iteritems(executables_to_get):
            sandbox.create_file_from_storage(filename, digest, executable=True)
        for filename, digest in iteritems(files_to_get):
            sandbox.create_file_from_storage(filename, digest)

        # Actually performs the execution
        success, plus = evaluation_step(sandbox,
                                        commands,
                                        job.time_limit,
                                        job.memory_limit,
                                        writable_files=files_allowing_write,
                                        stdin_redirect=stdin_redirect,
                                        stdout_redirect=stdout_redirect)

        job.sandboxes = [sandbox.path]
        job.plus = plus

        outcome = None
        text = []

        # Error in the sandbox: nothing to do!
        if not success:
            pass

        # Contestant's error: the marks won't be good
        elif not is_evaluation_passed(plus):
            outcome = 0.0
            text = human_evaluation_message(plus)
            if job.get_output:
                job.user_output = None

        # Otherwise, advance to checking the solution
        else:

            # Check that the output file was created
            if not sandbox.file_exists(output_filename):
                outcome = 0.0
                text = [
                    N_("Evaluation didn't produce file %s"), output_filename
                ]
                if job.get_output:
                    job.user_output = None

            else:
                # If asked so, put the output file into the storage
                if job.get_output:
                    job.user_output = sandbox.get_file_to_storage(
                        output_filename,
                        "Output file in job %s" % job.info,
                        trunc_len=100 * 1024)

                # If just asked to execute, fill text and set dummy
                # outcome.
                if job.only_execution:
                    outcome = 0.0
                    text = [N_("Execution completed successfully")]

                # Otherwise evaluate the output file.
                else:

                    # Put the reference solution into the sandbox
                    sandbox.create_file_from_storage("res.txt", job.output)

                    # Check the solution with white_diff
                    if self.parameters[2] == "diff":
                        outcome, text = white_diff_step(
                            sandbox, output_filename, "res.txt")

                    # Check the solution with a comparator
                    elif self.parameters[2] == "comparator":
                        manager_filename = "checker"

                        if manager_filename not in job.managers:
                            logger.error(
                                "Configuration error: missing or "
                                "invalid comparator (it must be "
                                "named 'checker')",
                                extra={"operation": job.info})
                            success = False

                        else:
                            sandbox.create_file_from_storage(
                                manager_filename,
                                job.managers[manager_filename].digest,
                                executable=True)
                            # Rewrite input file. The untrusted
                            # contestant program should not be able to
                            # modify it; however, the grader may
                            # destroy the input file to prevent the
                            # contestant's program from directly
                            # accessing it. Since we cannot create
                            # files already existing in the sandbox,
                            # we try removing the file first.
                            try:
                                sandbox.remove_file(input_filename)
                            except OSError as e:
                                # Let us be extra sure that the file
                                # was actually removed and we did not
                                # mess up with permissions.
                                assert not sandbox.file_exists(input_filename)
                            sandbox.create_file_from_storage(
                                input_filename, job.input)

                            # Allow using any number of processes (because e.g.
                            # one may want to write a bash checker who calls
                            # other processes). Set to a high number because
                            # to avoid fork-bombing the worker.
                            sandbox.max_processes = 1000

                            success, _ = evaluation_step(
                                sandbox, [[
                                    "./%s" % manager_filename, input_filename,
                                    "res.txt", output_filename
                                ]])
                        if success:
                            try:
                                outcome, text = \
                                    extract_outcome_and_text(sandbox)
                            except ValueError as e:
                                logger.error(
                                    "Invalid output from "
                                    "comparator: %s",
                                    e.message,
                                    extra={"operation": job.info})
                                success = False

                    else:
                        raise ValueError("Unrecognized third parameter"
                                         " `%s' for Batch tasktype." %
                                         self.parameters[2])

        # Whatever happened, we conclude.
        job.success = success
        job.outcome = "%s" % outcome if outcome is not None else None
        job.text = text

        delete_sandbox(sandbox, job.success)
Beispiel #5
0
class Batch(TaskType):
    """Task type class for a unique standalone submission source, with
    comparator (or not).

    Parameters needs to be a list of three elements.

    The first element is 'grader' or 'alone': in the first
    case, the source file is to be compiled with a provided piece of
    software ('grader'); in the other by itself.

    The second element is a 2-tuple of the input file name and output file
    name. The input file may be '' to denote stdin, and similarly the
    output filename may be '' to denote stdout.

    The third element is 'diff' or 'comparator' and says whether the
    output is compared with a simple diff algorithm or using a
    comparator.

    Note: the first element is used only in the compilation step; the
    others only in the evaluation step.

    A comparator can read argv[1], argv[2], argv[3] (respectively,
    input, correct output and user output) and should write the
    outcome to stdout and the text to stderr.

    """
    # Codename of the checker, if it is used.
    CHECKER_CODENAME = "checker"
    # Basename of the grader, used in the manager filename and as the main
    # class in languages that require us to specify it.
    GRADER_BASENAME = "grader"
    # Default input and output filenames when not provided as parameters.
    DEFAULT_INPUT_FILENAME = "input.txt"
    DEFAULT_OUTPUT_FILENAME = "output.txt"

    # Constants used in the parameter definition.
    OUTPUT_EVAL_DIFF = "diff"
    OUTPUT_EVAL_CHECKER = "comparator"
    COMPILATION_ALONE = "alone"
    COMPILATION_GRADER = "grader"

    # Other constants to specify the task type behaviour and parameters.
    ALLOW_PARTIAL_SUBMISSION = False

    _COMPILATION = ParameterTypeChoice(
        "Compilation", "compilation", "", {
            COMPILATION_ALONE: "Submissions are self-sufficient",
            COMPILATION_GRADER: "Submissions are compiled with a grader"
        })

    _USE_FILE = ParameterTypeCollection(
        "I/O (blank for stdin/stdout)", "io", "", [
            ParameterTypeString("Input file", "inputfile", ""),
            ParameterTypeString("Output file", "outputfile", ""),
        ])

    _EVALUATION = ParameterTypeChoice(
        "Output evaluation", "output_eval", "", {
            OUTPUT_EVAL_DIFF: "Outputs compared with white diff",
            OUTPUT_EVAL_CHECKER: "Outputs are compared by a comparator"
        })

    ACCEPTED_PARAMETERS = [_COMPILATION, _USE_FILE, _EVALUATION]

    @property
    def name(self):
        """See TaskType.name."""
        # TODO add some details if a grader/comparator is used, etc...
        return "Batch"

    def __init__(self, parameters):
        super(Batch, self).__init__(parameters)

        # Data in the parameters.
        self.compilation = self.parameters[0]
        self.input_filename, self.output_filename = self.parameters[1]
        self.output_eval = self.parameters[2]

        # Actual input and output are the files used to store input and
        # where the output is checked, regardless of using redirects or not.
        self._actual_input = self.input_filename
        self._actual_output = self.output_filename
        if len(self.input_filename) == 0:
            self._actual_input = Batch.DEFAULT_INPUT_FILENAME
        if len(self.output_filename) == 0:
            self._actual_output = Batch.DEFAULT_OUTPUT_FILENAME

    def get_compilation_commands(self, submission_format):
        """See TaskType.get_compilation_commands."""
        source_filenames = []
        # If a grader is specified, we add to the command line (and to
        # the files to get) the corresponding manager.
        if self._uses_grader():
            source_filenames.append(Batch.GRADER_BASENAME + ".%l")
        source_filenames.append(submission_format[0])
        executable_filename = submission_format[0].replace(".%l", "")
        res = dict()
        for language in LANGUAGES:
            res[language.name] = language.get_compilation_commands([
                source.replace(".%l", language.source_extension)
                for source in source_filenames
            ], executable_filename)
        return res

    def get_user_managers(self):
        """See TaskType.get_user_managers."""
        # In case the task uses a grader, we let the user provide their own
        # grader (which is usually a simplified grader provided by the admins).
        if self._uses_grader():
            return ["grader.%l"]
        else:
            return []

    def get_auto_managers(self):
        """See TaskType.get_auto_managers."""
        return []

    def _uses_grader(self):
        return self.compilation == Batch.COMPILATION_GRADER

    def _uses_checker(self):
        return self.output_eval == Batch.OUTPUT_EVAL_CHECKER

    def compile(self, job, file_cacher):
        """See TaskType.compile."""
        language = get_language(job.language)
        source_ext = language.source_extension

        if not check_files_number(job, 1):
            return

        user_file_format = next(iterkeys(job.files))
        user_source_filename = user_file_format.replace(".%l", source_ext)
        executable_filename = user_file_format.replace(".%l", "")

        # Create the list of filenames to be passed to the compiler. If we use
        # a grader, it needs to be in first position in the command line, and
        # we check that it exists.
        source_filenames = [user_source_filename]
        if self._uses_grader():
            grader_source_filename = Batch.GRADER_BASENAME + source_ext
            if not check_manager_present(job, grader_source_filename):
                return
            source_filenames.insert(0, grader_source_filename)

        # Prepare the compilation command.
        commands = language.get_compilation_commands(source_filenames,
                                                     executable_filename)

        # Create the sandbox.
        sandbox = create_sandbox(file_cacher, name="compile")
        job.sandboxes.append(sandbox.path)

        # Copy required files in the sandbox (includes the grader if present).
        sandbox.create_file_from_storage(user_source_filename,
                                         job.files[user_file_format].digest)
        for filename, manager in iteritems(job.managers):
            if is_manager_for_compilation(filename, language):
                sandbox.create_file_from_storage(filename, manager.digest)

        # Run the compilation.
        box_success, compilation_success, text, stats = \
            compilation_step(sandbox, commands)

        # Retrieve the compiled executables.
        job.success = box_success
        job.compilation_success = compilation_success
        job.text = text
        job.plus = stats
        if box_success and compilation_success:
            digest = sandbox.get_file_to_storage(
                executable_filename,
                "Executable %s for %s" % (executable_filename, job.info))
            job.executables[executable_filename] = \
                Executable(executable_filename, digest)

        # Cleanup.
        delete_sandbox(sandbox, job.success)

    def evaluate(self, job, file_cacher):
        """See TaskType.evaluate."""
        if not check_executables_number(job, 1):
            return

        # Prepare the execution
        executable_filename = next(iterkeys(job.executables))
        language = get_language(job.language)
        main = Batch.GRADER_BASENAME \
            if self._uses_grader() else executable_filename
        commands = language.get_evaluation_commands(executable_filename,
                                                    main=main)
        executables_to_get = {
            executable_filename: job.executables[executable_filename].digest
        }
        files_to_get = {self._actual_input: job.input}

        # Check which redirect we need to perform, and in case we don't
        # manage the output via redirect, the submission needs to be able
        # to write on it.
        files_allowing_write = []
        stdin_redirect = None
        stdout_redirect = None
        if len(self.input_filename) == 0:
            stdin_redirect = self._actual_input
        if len(self.output_filename) == 0:
            stdout_redirect = self._actual_output
        else:
            files_allowing_write.append(self._actual_output)

        # Create the sandbox
        sandbox = create_sandbox(file_cacher, name="evaluate")
        job.sandboxes.append(sandbox.path)

        # Put the required files into the sandbox
        for filename, digest in iteritems(executables_to_get):
            sandbox.create_file_from_storage(filename, digest, executable=True)
        for filename, digest in iteritems(files_to_get):
            sandbox.create_file_from_storage(filename, digest)

        # Actually performs the execution
        box_success, evaluation_success, stats = evaluation_step(
            sandbox,
            commands,
            job.time_limit,
            job.memory_limit,
            writable_files=files_allowing_write,
            stdin_redirect=stdin_redirect,
            stdout_redirect=stdout_redirect,
            multiprocess=job.multithreaded_sandbox)

        outcome = None
        text = None

        # Error in the sandbox: nothing to do!
        if not box_success:
            pass

        # Contestant's error: the marks won't be good
        elif not evaluation_success:
            outcome = 0.0
            text = human_evaluation_message(stats)
            if job.get_output:
                job.user_output = None

        # Otherwise, advance to checking the solution
        else:

            # Check that the output file was created
            if not sandbox.file_exists(self._actual_output):
                outcome = 0.0
                text = [
                    N_("Evaluation didn't produce file %s"),
                    self._actual_output
                ]
                if job.get_output:
                    job.user_output = None

            else:
                # If asked so, put the output file into the storage.
                if job.get_output:
                    job.user_output = sandbox.get_file_to_storage(
                        self._actual_output,
                        "Output file in job %s" % job.info,
                        trunc_len=100 * 1024)

                # If just asked to execute, fill text and set dummy outcome.
                if job.only_execution:
                    outcome = 0.0
                    text = [N_("Execution completed successfully")]

                # Otherwise evaluate the output file.
                else:
                    box_success, outcome, text = eval_output(
                        file_cacher,
                        job,
                        Batch.CHECKER_CODENAME
                        if self._uses_checker() else None,
                        user_output_path=sandbox.relative_path(
                            self._actual_output),
                        user_output_filename=self.output_filename)

        # Fill in the job with the results.
        job.success = box_success
        job.outcome = str(outcome) if outcome is not None else None
        job.text = text
        job.plus = stats

        delete_sandbox(sandbox, job.success)
Beispiel #6
0
class TestParameterTypeCollection(unittest.TestCase):
    """Test the class ParameterTypeCollection."""

    def setUp(self):
        super(TestParameterTypeCollection, self).setUp()
        self.p = ParameterTypeCollection("name", "shortname", "description", [
            ParameterTypeInt("name0", "shortname0", "desc0"),
            ParameterTypeString("name1", "shortname1", "desc1"),
            ParameterTypeChoice("name2", "shortname2", "desc2", {
                "c1": "First choice",
                "c2": "Second choice",
            })])

    def test_validate_success(self):
        self.p.validate([1, "2", "c1"])
        self.p.validate([-1, "asd", "c1"])

    def test_validate_failure_wrong_type(self):
        with self.assertRaises(ValueError):
            self.p.validate((1, "2", "c1"))
        with self.assertRaises(ValueError):
            self.p.validate(["1", "2", "c1"])

    def test_parse_handler(self):
        h = FakeHandler({
            "ok_shortname_0_shortname0": "1",
            "ok_shortname_1_shortname1": "1",
            "ok_shortname_2_shortname2": "c1",
            "missing_shortname_0_shortname0": "1",
            "wrong_shortname_0_shortname0": "1",
            "wrong_shortname_1_shortname1": "1",
            "wrong_shortname_2_shortname2": "c3",
        })
        self.assertEqual(self.p.parse_handler(h, "ok_"), [1, "1", "c1"])
        with self.assertRaises(ValueError):
            self.p.parse_handler(h, "wrong_")
        with self.assertRaises(MissingArgumentError):
            self.p.parse_handler(h, "missing_")
Beispiel #7
0
class TestParameterTypeCollection(unittest.TestCase):
    """Test the class ParameterTypeCollection."""

    def setUp(self):
        super().setUp()
        self.p = ParameterTypeCollection("name", "shortname", "description", [
            ParameterTypeInt("name0", "shortname0", "desc0"),
            ParameterTypeString("name1", "shortname1", "desc1"),
            ParameterTypeChoice("name2", "shortname2", "desc2", {
                "c1": "First choice",
                "c2": "Second choice",
            })])

    def test_validate_success(self):
        self.p.validate([1, "2", "c1"])
        self.p.validate([-1, "asd", "c1"])

    def test_validate_failure_wrong_type(self):
        with self.assertRaises(ValueError):
            self.p.validate((1, "2", "c1"))
        with self.assertRaises(ValueError):
            self.p.validate(["1", "2", "c1"])

    def test_parse_handler(self):
        h = FakeHandler({
            "ok_shortname_0_shortname0": "1",
            "ok_shortname_1_shortname1": "1",
            "ok_shortname_2_shortname2": "c1",
            "missing_shortname_0_shortname0": "1",
            "wrong_shortname_0_shortname0": "1",
            "wrong_shortname_1_shortname1": "1",
            "wrong_shortname_2_shortname2": "c3",
        })
        self.assertEqual(self.p.parse_handler(h, "ok_"), [1, "1", "c1"])
        with self.assertRaises(ValueError):
            self.p.parse_handler(h, "wrong_")
        with self.assertRaises(MissingArgumentError):
            self.p.parse_handler(h, "missing_")
Beispiel #8
0
class Batch(TaskType):
    """Task type class for a unique standalone submission source, with
    comparator (or not).

    Parameters needs to be a list of three elements.

    The first element is 'grader' or 'alone': in the first
    case, the source file is to be compiled with a provided piece of
    software ('grader'); in the other by itself.

    The second element is a 2-tuple of the input file name and output file
    name. The input file may be '' to denote stdin, and similarly the
    output filename may be '' to denote stdout.

    The third element is 'diff' or 'comparator' and says whether the
    output is compared with a simple diff algorithm or using a
    comparator.

    Note: the first element is used only in the compilation step; the
    others only in the evaluation step.

    A comparator can read argv[1], argv[2], argv[3] (respectively,
    input, correct output and user output) and should write the
    outcome to stdout and the text to stderr.

    """
    ALLOW_PARTIAL_SUBMISSION = False

    _COMPILATION = ParameterTypeChoice(
        "Compilation", "compilation", "", {
            "alone": "Submissions are self-sufficient",
            "grader": "Submissions are compiled with a grader"
        })

    _USE_FILE = ParameterTypeCollection(
        "I/O (blank for stdin/stdout)", "io", "", [
            ParameterTypeString("Input file", "inputfile", ""),
            ParameterTypeString("Output file", "outputfile", ""),
        ])

    _EVALUATION = ParameterTypeChoice(
        "Output evaluation", "output_eval", "", {
            "diff": "Outputs compared with white diff",
            "comparator": "Outputs are compared by a comparator"
        })

    ACCEPTED_PARAMETERS = [_COMPILATION, _USE_FILE, _EVALUATION]

    @property
    def name(self):
        """See TaskType.name."""
        # TODO add some details if a grader/comparator is used, etc...
        return "Batch"

    def get_compilation_commands(self, submission_format):
        """See TaskType.get_compilation_commands."""
        res = dict()
        for language in LANGUAGES:
            format_filename = submission_format[0]
            source_ext = LANGUAGE_TO_SOURCE_EXT_MAP[language]
            source_filenames = []
            # If a grader is specified, we add to the command line (and to
            # the files to get) the corresponding manager.
            if self.parameters[0] == "grader":
                source_filenames.append("grader%s" % source_ext)
            source_filenames.append(format_filename.replace(".%l", source_ext))
            executable_filename = format_filename.replace(".%l", "")
            commands = get_compilation_commands(language, source_filenames,
                                                executable_filename)
            res[language] = commands
        return res

    def get_user_managers(self, submission_format):
        """See TaskType.get_user_managers."""
        return []

    def get_auto_managers(self):
        """See TaskType.get_auto_managers."""
        return None

    def compile(self, job, file_cacher):
        """See TaskType.compile."""
        # Detect the submission's language. The checks about the
        # formal correctedness of the submission are done in CWS,
        # before accepting it.
        language = job.language
        source_ext = LANGUAGE_TO_SOURCE_EXT_MAP[language]

        # TODO: here we are sure that submission.files are the same as
        # task.submission_format. The following check shouldn't be
        # here, but in the definition of the task, since this actually
        # checks that task's task type and submission format agree.
        if len(job.files) != 1:
            job.success = True
            job.compilation_success = False
            job.text = [N_("Invalid files in submission")]
            logger.error("Submission contains %d files, expecting 1" %
                         len(job.files),
                         extra={"operation": job.info})
            return True

        # Create the sandbox
        sandbox = create_sandbox(file_cacher)
        job.sandboxes.append(sandbox.path)

        # Prepare the source files in the sandbox
        files_to_get = {}
        format_filename = job.files.keys()[0]
        source_filenames = []
        source_filenames.append(format_filename.replace(".%l", source_ext))
        files_to_get[source_filenames[0]] = \
            job.files[format_filename].digest
        # If a grader is specified, we add to the command line (and to
        # the files to get) the corresponding manager. The grader must
        # be the first file in source_filenames.
        if self.parameters[0] == "grader":
            source_filenames.insert(0, "grader%s" % source_ext)
            files_to_get["grader%s" % source_ext] = \
                job.managers["grader%s" % source_ext].digest

        # Also copy all *.h and *lib.pas graders
        for filename in job.managers.iterkeys():
            if filename.endswith('.h') or \
                    filename.endswith('lib.pas'):
                files_to_get[filename] = \
                    job.managers[filename].digest

        for filename, digest in files_to_get.iteritems():
            sandbox.create_file_from_storage(filename, digest)

        # Prepare the compilation command
        executable_filename = format_filename.replace(".%l", "")
        commands = get_compilation_commands(language, source_filenames,
                                            executable_filename)

        # Run the compilation
        operation_success, compilation_success, text, plus = \
            compilation_step(sandbox, commands)

        # Retrieve the compiled executables
        job.success = operation_success
        job.compilation_success = compilation_success
        job.plus = plus
        job.text = text
        if operation_success and compilation_success:
            digest = sandbox.get_file_to_storage(
                executable_filename,
                "Executable %s for %s" % (executable_filename, job.info))
            job.executables[executable_filename] = \
                Executable(executable_filename, digest)

        # Cleanup
        delete_sandbox(sandbox)

    def evaluate(self, job, file_cacher):
        """See TaskType.evaluate."""
        # Create the sandbox
        sandbox = create_sandbox(file_cacher)

        # Prepare the execution
        executable_filename = job.executables.keys()[0]
        language = job.language
        commands = get_evaluation_commands(language, executable_filename)
        executables_to_get = {
            executable_filename: job.executables[executable_filename].digest
        }
        input_filename, output_filename = self.parameters[1]
        stdin_redirect = None
        stdout_redirect = None
        if input_filename == "":
            input_filename = "input.txt"
            stdin_redirect = input_filename
        if output_filename == "":
            output_filename = "output.txt"
            stdout_redirect = output_filename
        files_to_get = {input_filename: job.input}

        # Put the required files into the sandbox
        for filename, digest in executables_to_get.iteritems():
            sandbox.create_file_from_storage(filename, digest, executable=True)
        for filename, digest in files_to_get.iteritems():
            sandbox.create_file_from_storage(filename, digest)

        # Actually performs the execution
        success, plus = evaluation_step(sandbox,
                                        commands,
                                        job.time_limit,
                                        job.memory_limit,
                                        stdin_redirect=stdin_redirect,
                                        stdout_redirect=stdout_redirect)

        job.sandboxes = [sandbox.path]
        job.plus = plus

        outcome = None
        text = None

        # Error in the sandbox: nothing to do!
        if not success:
            pass

        # Contestant's error: the marks won't be good
        elif not is_evaluation_passed(plus):
            outcome = 0.0
            text = human_evaluation_message(plus)
            if job.get_output:
                job.user_output = None

        # Otherwise, advance to checking the solution
        else:

            # Check that the output file was created
            if not sandbox.file_exists(output_filename):
                outcome = 0.0
                text = [
                    N_("Evaluation didn't produce file %s"), output_filename
                ]
                if job.get_output:
                    job.user_output = None

            else:
                # If asked so, put the output file into the storage
                if job.get_output:
                    job.user_output = sandbox.get_file_to_storage(
                        output_filename,
                        "Output file in job %s" % job.info,
                        trunc_len=100 * 1024)

                # If not asked otherwise, evaluate the output file
                if not job.only_execution:

                    # Put the reference solution into the sandbox
                    sandbox.create_file_from_storage("res.txt", job.output)

                    # Check the solution with white_diff
                    if self.parameters[2] == "diff":
                        outcome, text = white_diff_step(
                            sandbox, output_filename, "res.txt")

                    # Check the solution with a comparator
                    elif self.parameters[2] == "comparator":
                        manager_filename = "checker"

                        if not manager_filename in job.managers:
                            logger.error(
                                "Configuration error: missing or "
                                "invalid comparator (it must be "
                                "named 'checker')",
                                extra={"operation": job.info})
                            success = False

                        else:
                            sandbox.create_file_from_storage(
                                manager_filename,
                                job.managers[manager_filename].digest,
                                executable=True)
                            success, _ = evaluation_step(
                                sandbox, [[
                                    "./%s" % manager_filename, input_filename,
                                    "res.txt", output_filename
                                ]])
                        if success:
                            try:
                                outcome, text = \
                                    extract_outcome_and_text(sandbox)
                            except ValueError, e:
                                logger.error("Invalid output from "
                                             "comparator: %s" % (e.message, ),
                                             extra={"operation": job.info})
                                success = False

                    else:
                        raise ValueError("Unrecognized third parameter"
                                         " `%s' for Batch tasktype." %
                                         self.parameters[2])

        # Whatever happened, we conclude.
        job.success = success
        job.outcome = "%s" % outcome if outcome is not None else None
        job.text = text

        delete_sandbox(sandbox)
Beispiel #9
0
class Batch(TaskType):
    """Task type class for a unique standalone submission source, with
    comparator (or not).

    Parameters needs to be a list of three elements.

    The first element is 'grader' or 'alone': in the first
    case, the source file is to be compiled with a provided piece of
    software ('grader'); in the other by itself.

    The second element is a 2-tuple of the input file name and output file
    name. The input file may be '' to denote stdin, and similarly the
    output filename may be '' to denote stdout.

    The third element is 'diff' or 'comparator' and says whether the
    output is compared with a simple diff algorithm or using a
    comparator.

    Note: the first element is used only in the compilation step; the
    others only in the evaluation step.

    A comparator can read argv[1], argv[2], argv[3] (respectively,
    input, correct output and user output) and should write the
    outcome to stdout and the text to stderr.

    """
    # Codename of the checker, if it is used.
    CHECKER_CODENAME = "checker"
    # Codename of the manager, when an interactive task is to be
    # evaluated in a single sandbox.
    MANAGER_CODENAME = "batchmanager"
    # Basename of the grader, used in the manager filename and as the main
    # class in languages that require us to specify it.
    GRADER_BASENAME = "grader"
    # Default input and output filenames when not provided as parameters.
    DEFAULT_INPUT_FILENAME = "input.txt"
    DEFAULT_OUTPUT_FILENAME = "output.txt"

    # Constants used in the parameter definition.
    OUTPUT_EVAL_DIFF = "diff"
    OUTPUT_EVAL_CHECKER = "comparator"
    COMPILATION_ALONE = "alone"
    COMPILATION_GRADER = "grader"

    # Other constants to specify the task type behaviour and parameters.
    ALLOW_PARTIAL_SUBMISSION = False

    _COMPILATION = ParameterTypeChoice(
        "Compilation",
        "compilation",
        "",
        {COMPILATION_ALONE: "Submissions are self-sufficient",
         COMPILATION_GRADER: "Submissions are compiled with a grader"})

    _USE_FILE = ParameterTypeCollection(
        "I/O (blank for stdin/stdout)",
        "io",
        "",
        [
            ParameterTypeString("Input file", "inputfile", ""),
            ParameterTypeString("Output file", "outputfile", ""),
        ])

    _EVALUATION = ParameterTypeChoice(
        "Output evaluation",
        "output_eval",
        "",
        {OUTPUT_EVAL_DIFF: "Outputs compared with white diff",
         OUTPUT_EVAL_CHECKER: "Outputs are compared by a comparator"})

    ACCEPTED_PARAMETERS = [_COMPILATION, _USE_FILE, _EVALUATION]

    @property
    def name(self):
        """See TaskType.name."""
        # TODO add some details if a grader/comparator is used, etc...
        return "Batch"

    def __init__(self, parameters):
        super(Batch, self).__init__(parameters)

        # Data in the parameters.
        self.compilation = self.parameters[0]
        self.input_filename, self.output_filename = self.parameters[1]
        self.output_eval = self.parameters[2]

        # Actual input and output are the files used to store input and
        # where the output is checked, regardless of using redirects or not.
        self._actual_input = self.input_filename
        self._actual_output = self.output_filename
        if len(self.input_filename) == 0:
            self._actual_input = self.DEFAULT_INPUT_FILENAME
        if len(self.output_filename) == 0:
            self._actual_output = self.DEFAULT_OUTPUT_FILENAME

    def get_compilation_commands(self, submission_format):
        """See TaskType.get_compilation_commands."""
        codenames_to_compile = []
        if self._uses_grader():
            codenames_to_compile.append(self.GRADER_BASENAME + ".%l")
        codenames_to_compile.extend(submission_format)
        executable_filename = self._executable_filename(submission_format)
        res = dict()
        for language in LANGUAGES:
            source_ext = language.source_extension
            res[language.name] = language.get_compilation_commands(
                [codename.replace(".%l", source_ext)
                 for codename in codenames_to_compile],
                executable_filename)
        return res

    def get_user_managers(self):
        """See TaskType.get_user_managers."""
        # In case the task uses a grader, we let the user provide their own
        # grader (which is usually a simplified grader provided by the admins).
        if self._uses_grader():
            return [self.GRADER_BASENAME + ".%l"]
        else:
            return []

    def get_auto_managers(self):
        """See TaskType.get_auto_managers."""
        return []

    def _uses_grader(self):
        return self.compilation == self.COMPILATION_GRADER

    def _uses_checker(self):
        return self.output_eval == self.OUTPUT_EVAL_CHECKER

    @staticmethod
    def _executable_filename(codenames):
        """Return the chosen executable name computed from the codenames.

        codenames ([str]): submission format or codename of submitted files,
            may contain %l.

        return (str): a deterministic executable name.

        """
        return "_".join(sorted(codename.replace(".%l", "")
                               for codename in codenames))

    def compile(self, job, file_cacher):
        """See TaskType.compile."""
        language = get_language(job.language)
        source_ext = language.source_extension

        if not check_files_number(job, 1, or_more=True):
            return

        # Create the list of filenames to be passed to the compiler. If we use
        # a grader, it needs to be in first position in the command line, and
        # we check that it exists.
        filenames_to_compile = []
        filenames_and_digests_to_get = {}
        # The grader, that must have been provided (copy and add to
        # compilation).
        if self._uses_grader():
            grader_filename = self.GRADER_BASENAME + source_ext
            if not check_manager_present(job, grader_filename):
                return
            filenames_to_compile.append(grader_filename)
            filenames_and_digests_to_get[grader_filename] = \
                job.managers[grader_filename].digest
        # User's submitted file(s) (copy and add to compilation).
        for codename, file_ in iteritems(job.files):
            filename = codename.replace(".%l", source_ext)
            filenames_to_compile.append(filename)
            filenames_and_digests_to_get[filename] = file_.digest
        # Any other useful manager (just copy).
        for filename, manager in iteritems(job.managers):
            if is_manager_for_compilation(filename, language):
                filenames_and_digests_to_get[filename] = manager.digest

        # Prepare the compilation command.
        executable_filename = self._executable_filename(iterkeys(job.files))
        commands = language.get_compilation_commands(
            filenames_to_compile, executable_filename)

        # Create the sandbox.
        sandbox = create_sandbox(file_cacher, name="compile")
        job.sandboxes.append(sandbox.get_root_path())

        # Copy required files in the sandbox (includes the grader if present).
        for filename, digest in iteritems(filenames_and_digests_to_get):
            sandbox.create_file_from_storage(filename, digest)

        # Run the compilation.
        box_success, compilation_success, text, stats = \
            compilation_step(sandbox, commands)

        # Retrieve the compiled executables.
        job.success = box_success
        job.compilation_success = compilation_success
        job.text = text
        job.plus = stats
        if box_success and compilation_success:
            digest = sandbox.get_file_to_storage(
                executable_filename,
                "Executable %s for %s" % (executable_filename, job.info))
            job.executables[executable_filename] = \
                Executable(executable_filename, digest)

        # Cleanup.
        delete_sandbox(sandbox, job)

    def evaluate(self, job, file_cacher):
        """See TaskType.evaluate."""
        if not check_executables_number(job, 1):
            return

        # Prepare the execution
        executable_filename = next(iterkeys(job.executables))
        language = get_language(job.language)
        main = self.GRADER_BASENAME \
            if self._uses_grader() else executable_filename
        commands = language.get_evaluation_commands(
            executable_filename, main=main)
        # HACK for NECKLACE: one-time hack to support a task with very low memory limit
        if job.memory_limit == 3 and job.language == "Java / JDK":
            jvm_args = ["-Deval=true", "-Xmx4224k", "-Xss256k", "-XX:MaxMetaspaceSize=8704k"]
            commands = language.get_evaluation_commands(
                executable_filename, main=main, jvm_args=jvm_args)
        executables_to_get = {
            executable_filename: job.executables[executable_filename].digest
        }
        files_to_get = {
            self._actual_input: job.input
        }

        # Check which redirect we need to perform, and in case we don't
        # manage the output via redirect, the submission needs to be able
        # to write on it.
        files_allowing_write = []
        stdin_redirect = None
        stdout_redirect = None
        if len(self.input_filename) == 0:
            stdin_redirect = self._actual_input
        if len(self.output_filename) == 0:
            stdout_redirect = self._actual_output
        else:
            files_allowing_write.append(self._actual_output)

        # Create the sandbox
        sandbox = create_sandbox(file_cacher, name="evaluate")
        job.sandboxes.append(sandbox.get_root_path())

        # Put the required files into the sandbox
        for filename, digest in iteritems(executables_to_get):
            sandbox.create_file_from_storage(filename, digest, executable=True)
        for filename, digest in iteritems(files_to_get):
            sandbox.create_file_from_storage(filename, digest)

        # Special handling: if there's a batchmanager, then this is really an
        # interactive task to be evaluated in a single sandbox.
        # Do NOT use check_manager_present() here, as it will raise an error
        # for normal tasks with no batchmanager.
        if Batch.MANAGER_CODENAME in job.managers:
            sandbox.create_file_from_storage(Batch.MANAGER_CODENAME,
                job.managers[Batch.MANAGER_CODENAME].digest, executable=True)
            # If there is a batchmanagermanager, run the last command with it.
            commands[-1][:0] = ["./%s" % Batch.MANAGER_CODENAME,
                self.input_filename, self.output_filename]

        # Actually performs the execution
        # HACK for NECKLACE: one-time hack to support a task with very low memory limit
        if job.memory_limit == 3 and job.language == "Java / JDK":
            memory_limit = 20
        elif job.memory_limit == 3 and job.language == "Python 3 / CPython":
            memory_limit = 8
        elif job.memory_limit == 3 and job.language == "C++11 / g++":
            memory_limit = 4
        elif job.memory_limit == 3 and job.language == "C11 / gcc":
            memory_limit = 4
        else:
            memory_limit = job.memory_limit
        box_success, evaluation_success, stats = evaluation_step(
            sandbox,
            commands,
            job.effective_time_limit(),
            memory_limit,
            writable_files=files_allowing_write,
            stdin_redirect=stdin_redirect,
            stdout_redirect=stdout_redirect,
            multiprocess=job.multithreaded_sandbox)

        outcome = None
        text = None

        # Error in the sandbox: nothing to do!
        if not box_success:
            pass

        # Contestant's error: the marks won't be good
        elif not evaluation_success:
            outcome = 0.0
            text = human_evaluation_message(stats)
            if job.get_output:
                job.user_output = None

        # Otherwise, advance to checking the solution
        else:

            # Check that the output file was created
            if not sandbox.file_exists(self._actual_output):
                outcome = 0.0
                text = [N_("Evaluation didn't produce file %s"),
                        self._actual_output]
                if job.get_output:
                    job.user_output = None

            else:
                # If asked so, put the output file into the storage.
                if job.get_output:
                    job.user_output = sandbox.get_file_to_storage(
                        self._actual_output,
                        "Output file in job %s" % job.info,
                        trunc_len=100 * 1024)

                # If just asked to execute, fill text and set dummy outcome.
                if job.only_execution:
                    outcome = 0.0
                    text = [N_("Execution completed successfully")]

                # Otherwise evaluate the output file.
                else:
                    box_success, outcome, text = eval_output(
                        file_cacher, job,
                        self.CHECKER_CODENAME
                        if self._uses_checker() else None,
                        user_output_path=sandbox.relative_path(
                            self._actual_output),
                        user_output_filename=self.output_filename)

        # Fill in the job with the results.
        job.success = box_success
        job.outcome = str(outcome) if outcome is not None else None
        job.text = text
        job.plus = stats

        delete_sandbox(sandbox, job)