Пример #1
0
    def destroy_cache(self):
        """Completely remove and destroy the cache.

        Nothing that could have been created by this object will be
        left on disk. After that, this instance isn't usable anymore.

        """
        rmtree(self.file_dir)
Пример #2
0
    def delete(self):
        """Delete the directory where the sandbox operated.

        """
        logger.debug("Deleting sandbox in %s.", self.path)

        # Delete the working directory.
        rmtree(self.path)
Пример #3
0
    def destroy_cache(self):
        """Completely remove and destroy the cache.

        Nothing that could have been created by this object will be
        left on disk. After that, this instance isn't usable anymore.

        """
        rmtree(self.file_dir)
Пример #4
0
    def test_symlink(self):
        """Test failure on a symlink."""
        link = os.path.join(self.tmpdir, "link")
        os.symlink(self.tmpdir, link)

        with self.assertRaises(NotADirectoryError):
            rmtree(link)

        os.remove(link)
Пример #5
0
    def __init__(self, service=None, path=None, null=False):
        """Initialize.

        By default the database-powered backend will be used, but this
        can be changed using the parameters.

        service (Service|None): the service we are running for. Only
            used if present to determine the location of the
            file-system cache (and to provide the shard number to the
            Sandbox... sigh!).
        path (string|None): if specified, back the FileCacher with a
            file system-based storage instead of the default
            database-based one. The specified directory will be used
            as root for the storage and it will be created if it
            doesn't exist.
        null (bool): if True, back the FileCacher with a NullBackend,
            that just discards every file it receives. This setting
            takes priority over path.

        """
        self.service = service

        if null:
            self.backend = NullBackend()
        elif path is None:
            if config.s3_backend_enabled:
                self.backend = S3Backend(
                    region=config.s3_backend_region,
                    bucket=config.s3_backend_bucket,
                    prefix=config.s3_backend_prefix,
                    s3_proxy=config.s3_backend_proxy,
                    base_url_for_fetch=config.s3_backend_fetch_base_url,
                )
            else:
                self.backend = DBBackend()
        else:
            self.backend = FSBackend(path)

        if service is None:
            self.file_dir = tempfile.mkdtemp(dir=config.temp_dir)
            # Delete this directory on exit since it has a random name and
            # won't be used again.
            atexit.register(lambda: rmtree(self.file_dir))
        else:
            self.file_dir = os.path.join(
                config.cache_dir,
                "fs-cache-%s-%d" % (service.name, service.shard))

        self.temp_dir = os.path.join(self.file_dir, "_temp")

        if not mkdir(config.cache_dir) or not mkdir(config.temp_dir) \
                or not mkdir(self.file_dir) or not mkdir(self.temp_dir):
            logger.error("Cannot create necessary directories.")
            raise RuntimeError("Cannot create necessary directories.")
        atexit.register(lambda: rmtree(self.temp_dir))
Пример #6
0
    def destroy_cache(self):
        """Completely remove and destroy the cache.

        Nothing that could have been created by this object will be
        left on disk. After that, this instance isn't usable anymore.

        This function must not be called if the cache directory is shared.

        """
        if self.is_shared():
            raise Exception("You may not destroy a shared cache.")
        rmtree(self.file_dir)
Пример #7
0
    def test_success(self):
        """Test success case."""
        testdir = os.path.join(self.tmpdir, "test")
        os.makedirs(os.path.join(testdir, "a"))
        os.makedirs(os.path.join(testdir, "b", "c"))
        open(os.path.join(testdir, "x"), "w").close()
        os.symlink("foo", os.path.join(testdir, "a", "y"))
        os.symlink(self.tmpdir, os.path.join(testdir, "b", "z"))

        rmtree(testdir)
        self.assertFalse(os.path.exists(testdir))
        self.assertTrue(os.path.isdir(self.tmpdir))
Пример #8
0
    def __init__(self, service=None, path=None, null=False):
        """Initialize.

        By default the database-powered backend will be used, but this
        can be changed using the parameters.

        service (Service|None): the service we are running for. Only
            used if present to determine the location of the
            file-system cache (and to provide the shard number to the
            Sandbox... sigh!).
        path (string|None): if specified, back the FileCacher with a
            file system-based storage instead of the default
            database-based one. The specified directory will be used
            as root for the storage and it will be created if it
            doesn't exist.
        null (bool): if True, back the FileCacher with a NullBackend,
            that just discards every file it receives. This setting
            takes priority over path.

        """
        self.service = service

        if null:
            self.backend = NullBackend()
        elif path is None:
            self.backend = DBBackend()
        else:
            self.backend = FSBackend(path)

        # First we create the config directories.
        self._create_directory_or_die(config.temp_dir)
        self._create_directory_or_die(config.cache_dir)

        if service is None:
            self.file_dir = tempfile.mkdtemp(dir=config.temp_dir)
            # Delete this directory on exit since it has a random name and
            # won't be used again.
            atexit.register(lambda: rmtree(self.file_dir))
        else:
            self.file_dir = os.path.join(
                config.cache_dir,
                "fs-cache-%s-%d" % (service.name, service.shard))
        self._create_directory_or_die(self.file_dir)

        # Temp dir must be a subdirectory of file_dir to avoid cross-filesystem
        # moves.
        self.temp_dir = tempfile.mkdtemp(dir=self.file_dir, prefix="_temp")
        atexit.register(lambda: rmtree(self.temp_dir))
        # Just to make sure it was created.
        self._create_directory_or_die(self.file_dir)
Пример #9
0
    def __init__(self, service=None, path=None, null=False):
        """Initialize.

        By default the database-powered backend will be used, but this
        can be changed using the parameters.

        service (Service|None): the service we are running for. Only
            used if present to determine the location of the
            file-system cache (and to provide the shard number to the
            Sandbox... sigh!).
        path (string|None): if specified, back the FileCacher with a
            file system-based storage instead of the default
            database-based one. The specified directory will be used
            as root for the storage and it will be created if it
            doesn't exist.
        null (bool): if True, back the FileCacher with a NullBackend,
            that just discards every file it receives. This setting
            takes priority over path.

        """
        self.service = service

        if null:
            self.backend = NullBackend()
        elif path is None:
            self.backend = DBBackend()
        else:
            self.backend = FSBackend(path)

        # First we create the config directories.
        self._create_directory_or_die(config.temp_dir)
        self._create_directory_or_die(config.cache_dir)

        if service is None:
            self.file_dir = tempfile.mkdtemp(dir=config.temp_dir)
            # Delete this directory on exit since it has a random name and
            # won't be used again.
            atexit.register(lambda: rmtree(self.file_dir))
        else:
            self.file_dir = os.path.join(
                config.cache_dir,
                "fs-cache-%s-%d" % (service.name, service.shard))
        self._create_directory_or_die(self.file_dir)

        # Temp dir must be a subdirectory of file_dir to avoid cross-filesystem
        # moves.
        self.temp_dir = tempfile.mkdtemp(dir=self.file_dir, prefix="_temp")
        atexit.register(lambda: rmtree(self.temp_dir))
        # Just to make sure it was created.
        self._create_directory_or_die(self.file_dir)
Пример #10
0
    def evaluate(self, job, file_cacher):
        """See TaskType.evaluate."""
        if not check_executables_number(job, 1):
            return
        executable_filename = next(iterkeys(job.executables))
        executable_digest = job.executables[executable_filename].digest

        # Make sure the required manager is among the job managers.
        if not check_manager_present(job, self.MANAGER_FILENAME):
            return
        manager_digest = job.managers[self.MANAGER_FILENAME].digest

        # Indices for the objects related to each user process.
        indices = range(self.num_processes)

        # Create FIFOs.
        fifo_dir = [tempfile.mkdtemp(dir=config.temp_dir) for i in indices]
        if not self._uses_grader():
            abortion_control_fifo_dir = tempfile.mkdtemp(dir=config.temp_dir)
        fifo_user_to_manager = [
            os.path.join(fifo_dir[i], "u%d_to_m" % i) for i in indices]
        fifo_manager_to_user = [
            os.path.join(fifo_dir[i], "m_to_u%d" % i) for i in indices]
        if not self._uses_grader():
            fifo_solution_quitter = os.path.join(abortion_control_fifo_dir, "sq")
            fifo_manager_quitter = os.path.join(abortion_control_fifo_dir, "mq")
        for i in indices:
            os.mkfifo(fifo_user_to_manager[i])
            os.mkfifo(fifo_manager_to_user[i])
            os.chmod(fifo_dir[i], 0o755)
            os.chmod(fifo_user_to_manager[i], 0o666)
            os.chmod(fifo_manager_to_user[i], 0o666)
        if not self._uses_grader():
            os.mkfifo(fifo_solution_quitter)
            os.mkfifo(fifo_manager_quitter)
            os.chmod(abortion_control_fifo_dir, 0o755)
            os.chmod(fifo_solution_quitter, 0o666)
            os.chmod(fifo_manager_quitter, 0o666)

        # Names of the fifos after being mapped inside the sandboxes.
        sandbox_fifo_dir = ["/fifo%d" % i for i in indices]
        sandbox_fifo_user_to_manager = [
            os.path.join(sandbox_fifo_dir[i], "u%d_to_m" % i) for i in indices]
        sandbox_fifo_manager_to_user = [
            os.path.join(sandbox_fifo_dir[i], "m_to_u%d" % i) for i in indices]
        if not self._uses_grader():
            sandbox_abortion_control_fifo_dir = "/abort"
            sandbox_fifo_solution_quitter = \
                os.path.join(sandbox_abortion_control_fifo_dir, "sq")
            sandbox_fifo_manager_quitter = \
                os.path.join(sandbox_abortion_control_fifo_dir, "mq")

        # Create the manager sandbox and copy manager and input and
        # reference output.
        sandbox_mgr = create_sandbox(file_cacher, name="manager_evaluate")
        job.sandboxes.append(sandbox_mgr.get_root_path())
        sandbox_mgr.create_file_from_storage(
            self.MANAGER_FILENAME, manager_digest, executable=True)
        sandbox_mgr.create_file_from_storage(
            self.INPUT_FILENAME, job.input)
        sandbox_mgr.create_file_from_storage(
            self.OK_FILENAME, job.output)

        # Create the user sandbox(es) and copy the executable.
        sandbox_user = [create_sandbox(file_cacher, name="user_evaluate")
                        for i in indices]
        job.sandboxes.extend(s.get_root_path() for s in sandbox_user)
        for i in indices:
            sandbox_user[i].create_file_from_storage(
                executable_filename, executable_digest, executable=True)

        # Start the manager. Redirecting to stdin is unnecessary, but for
        # historical reasons the manager can choose to read from there
        # instead than from INPUT_FILENAME.
        manager_command = ["./%s" % self.MANAGER_FILENAME]
        for i in indices:
            manager_command += [sandbox_fifo_user_to_manager[i],
                                sandbox_fifo_manager_to_user[i]]
        if not self._uses_grader():
            manager_command += [sandbox_fifo_solution_quitter,
                                sandbox_fifo_manager_quitter]
        # We could use trusted_step for the manager, since it's fully
        # admin-controlled. But trusted_step is only synchronous at the moment.
        # Thus we use evaluation_step, and we set a time limit generous enough
        # to prevent user programs from sending the manager in timeout.
        # This means that:
        # - the manager wall clock timeout must be greater than the sum of all
        #     wall clock timeouts of the user programs;
        # - with the assumption that the work the manager performs is not
        #     greater than the work performed by the user programs, the manager
        #     user timeout must be greater than the maximum allowed total time
        #     of the user programs; in theory, this is the task's time limit,
        #     but in practice is num_processes times that because the
        #     constraint on the total time can only be enforced after all user
        #     programs terminated.
        manager_time_limit = max(self.num_processes * (job.time_limit + 1.0),
                                 config.trusted_sandbox_max_time_s)
        manager_dirs_map = dict((fifo_dir[i], (sandbox_fifo_dir[i], "rw"))
                                for i in indices)
        if not self._uses_grader():
            manager_dirs_map[abortion_control_fifo_dir] = \
                (sandbox_abortion_control_fifo_dir, "rw")
        manager = evaluation_step_before_run(
            sandbox_mgr,
            manager_command,
            manager_time_limit,
            config.trusted_sandbox_max_memory_kib // 1024,
            dirs_map=manager_dirs_map,
            writable_files=[self.OUTPUT_FILENAME],
            stdin_redirect=self.INPUT_FILENAME,
            multiprocess=job.multithreaded_sandbox)

        if not self._uses_grader():
            solution_quitter = open(fifo_solution_quitter, "r")
            manager_quitter = open(fifo_manager_quitter, "w")
            manager_quitter_open = True

        # Start the user submissions compiled with the stub.
        language = get_language(job.language)
        processes = [None for i in indices]
        for i in indices:
            args = [sandbox_fifo_manager_to_user[i],
                    sandbox_fifo_user_to_manager[i]]
            if self.num_processes != 1:
                args.append(str(i))
            if self._uses_grader():
                main = self.STUB_BASENAME
            else:
                main = executable_filename
            commands = language.get_evaluation_commands(
                executable_filename,
                main=main,
                args=args)
            # Assumes that the actual execution of the user solution is the
            # last command in commands, and that the previous are "setup"
            # that don't need tight control.
            if len(commands) > 1:
                trusted_step(sandbox_user[i], commands[:-1])
            processes[i] = evaluation_step_before_run(
                sandbox_user[i],
                commands[-1],
                job.time_limit,
                job.memory_limit,
                dirs_map={fifo_dir[i]: (sandbox_fifo_dir[i], "rw")},
                stdin_redirect=sandbox_fifo_manager_to_user[i],
                stdout_redirect=sandbox_fifo_user_to_manager[i],
                multiprocess=job.multithreaded_sandbox)

        if not self._uses_grader():
            # Manager still running but wants to quit
            if solution_quitter.read() == "<3":
                for i in indices:
                    processes[i].send_signal(signal.SIGINT)  # Kill user
                wait_without_std(processes)
                manager_quitter.close()
                manager_quitter_open = False

        # Wait for the processes to conclude, without blocking them on I/O.
        wait_without_std(processes + [manager])

        if not self._uses_grader():
            solution_quitter.close()
            if manager_quitter_open:
                manager_quitter.close()

        # Get the results of the manager sandbox.
        box_success_mgr, evaluation_success_mgr, unused_stats_mgr = \
            evaluation_step_after_run(sandbox_mgr)

        # Coalesce the results of the user sandboxes.
        user_results = [evaluation_step_after_run(s) for s in sandbox_user]
        box_success_user = all(r[0] for r in user_results)
        evaluation_success_user = all(r[1] for r in user_results)
        stats_user = reduce(merge_execution_stats,
                            [r[2] for r in user_results])
        # The actual running time is the sum of every user process, but each
        # sandbox can only check its own; if the sum is greater than the time
        # limit we adjust the result.
        if box_success_user and evaluation_success_user and \
                stats_user["execution_time"] >= job.time_limit:
            evaluation_success_user = False
            stats_user['exit_status'] = Sandbox.EXIT_TIMEOUT

        success = box_success_user \
            and box_success_mgr and evaluation_success_mgr
        outcome = None
        text = None

        # If at least one sandbox had problems, or the manager did not
        # terminate correctly, we report an error (and no need for user stats).
        if not success:
            stats_user = None
            pass

        # If just asked to execute, fill text and set dummy outcome.
        elif job.only_execution:
            outcome = 0.0
            text = [N_("Execution completed successfully")]

        # If the user sandbox detected some problem (timeout, ...),
        # the outcome is 0.0 and the text describes that problem.
        elif not evaluation_success_user:
            outcome = 0.0
            text = human_evaluation_message(stats_user)

        # Otherwise, we use the manager to obtain the outcome.
        else:
            outcome, text = extract_outcome_and_text(sandbox_mgr)

        # If asked so, save the output file with additional information,
        # provided that it exists.
        if job.get_output:
            if sandbox_mgr.file_exists(self.OUTPUT_FILENAME):
                job.user_output = sandbox_mgr.get_file_to_storage(
                    self.OUTPUT_FILENAME,
                    "Output file in job %s" % job.info,
                    trunc_len=100 * 1024)
            else:
                job.user_output = None

        # Fill in the job with the results.
        job.success = success
        job.outcome = "%s" % outcome if outcome is not None else None
        job.text = text
        job.plus = stats_user

        delete_sandbox(sandbox_mgr, job.success)
        for s in sandbox_user:
            delete_sandbox(s, job.success)
        if not config.keep_sandbox:
            for d in fifo_dir:
                rmtree(d)
            if not self._uses_grader():
                rmtree(abortion_control_fifo_dir)
Пример #11
0
    def execute(self, entry):
        """Print a print job.

        This is the core of PrintingService.

        entry (QueueEntry): the entry containing the operation to
            perform.

        """
        # TODO: automatically re-enqueue in case of a recoverable
        # error.
        printjob_id = entry.item.printjob_id
        with SessionGen() as session:
            # Obtain print job.
            printjob = PrintJob.get_from_id(printjob_id, session)
            if printjob is None:
                raise ValueError("Print job %d not found in the database." %
                                 printjob_id)
            user = printjob.participation.user
            contest = printjob.participation.contest
            timezone = get_timezone(user, contest)
            timestr = str(
                printjob.timestamp.replace(
                    tzinfo=utc).astimezone(timezone).replace(tzinfo=None))
            filename = printjob.filename

            # Check if it's ready to be printed.
            if printjob.done:
                logger.info("Print job %d was already sent to the printer.",
                            printjob_id)

            directory = tempfile.mkdtemp(dir=config.temp_dir)
            logger.info("Preparing print job in directory %s", directory)

            # Take the base name just to be sure.
            relname = "source_" + os.path.basename(filename)
            source = os.path.join(directory, relname)
            with open(source, "wb") as file_:
                self.file_cacher.get_file_to_fobj(printjob.digest, file_)

            if filename.endswith(".pdf") and config.pdf_printing_allowed:
                source_pdf = source
            else:
                # Convert text to ps.
                source_ps = os.path.join(directory, "source.ps")
                cmd = [
                    "a2ps", source, "--delegate=no", "--output=" + source_ps,
                    "--medium=%s" % config.paper_size.capitalize(),
                    "--portrait", "--columns=1", "--rows=1",
                    "--pages=1-%d" % (config.max_pages_per_job), "--header=",
                    "--footer=", "--left-footer=", "--right-footer=",
                    "--center-title=" + filename, "--left-title=" + timestr
                ]
                ret = subprocess.call(cmd, cwd=directory)
                if ret != 0:
                    raise Exception(
                        "Failed to convert text file to ps with command: %s"
                        "(error %d)" % (pretty_print_cmdline(cmd), ret))

                if not os.path.exists(source_ps):
                    logger.warning("Unable to convert from text to ps.")
                    printjob.done = True
                    printjob.status = [N_("Invalid file")]
                    session.commit()
                    rmtree(directory)
                    return

                # Convert ps to pdf
                source_pdf = os.path.join(directory, "source.pdf")
                cmd = [
                    "ps2pdf",
                    "-sPAPERSIZE=%s" % config.paper_size.lower(), source_ps
                ]
                ret = subprocess.call(cmd, cwd=directory)
                if ret != 0:
                    raise Exception(
                        "Failed to convert ps file to pdf with command: %s"
                        "(error %d)" % (pretty_print_cmdline(cmd), ret))

            # Find out number of pages
            with open(source_pdf, "rb") as file_:
                pdfreader = PdfFileReader(file_)
                page_count = pdfreader.getNumPages()

            logger.info("Preparing %d page(s) (plus the title page)",
                        page_count)

            if page_count > config.max_pages_per_job:
                logger.info("Too many pages.")
                printjob.done = True
                printjob.status = [N_("Print job has too many pages")]
                session.commit()
                rmtree(directory)
                return

            # Add the title page
            title_tex = os.path.join(directory, "title_page.tex")
            title_pdf = os.path.join(directory, "title_page.pdf")
            with open(title_tex, "wb") as f:
                f.write(
                    self.jinja2_env.get_template("title_page.tex").render(
                        user=user,
                        filename=filename,
                        timestr=timestr,
                        page_count=page_count,
                        paper_size=config.paper_size))
            cmd = ["pdflatex", "-interaction", "nonstopmode", title_tex]
            ret = subprocess.call(cmd, cwd=directory)
            if ret != 0:
                raise Exception("Failed to create title page with command: %s"
                                "(error %d)" %
                                (pretty_print_cmdline(cmd), ret))

            pdfmerger = PdfFileMerger()
            with open(title_pdf, "rb") as file_:
                pdfmerger.append(file_)
            with open(source_pdf, "rb") as file_:
                pdfmerger.append(file_)
            result = os.path.join(directory, "document.pdf")
            with open(result, "wb") as file_:
                pdfmerger.write(file_)

            try:
                printer_connection = cups.Connection()
                printer_connection.printFile(config.printer, result,
                                             "Printout %d" % printjob_id, {})
            except cups.IPPError as error:
                logger.error("Unable to print: `%s'.", error)
            else:
                printjob.done = True
                printjob.status = [N_("Sent to printer")]
                session.commit()
            finally:
                rmtree(directory)
Пример #12
0
    def do_export(self):
        """Run the actual export code."""
        logger.info("Starting export.")

        export_dir = self.export_target
        archive_info = get_archive_info(self.export_target)

        if archive_info["write_mode"] != "":
            # We are able to write to this archive.
            if os.path.exists(self.export_target):
                logger.critical("The specified file already exists, "
                                "I won't overwrite it.")
                return False
            export_dir = os.path.join(tempfile.mkdtemp(),
                                      archive_info["basename"])

        logger.info("Creating dir structure.")
        try:
            os.mkdir(export_dir)
        except OSError:
            logger.critical("The specified directory already exists, "
                            "I won't overwrite it.")
            return False

        files_dir = os.path.join(export_dir, "files")
        descr_dir = os.path.join(export_dir, "descriptions")
        os.mkdir(files_dir)
        os.mkdir(descr_dir)

        with SessionGen() as session:
            # Export files.
            logger.info("Exporting files.")
            if self.dump_files:
                for contest_id in self.contests_ids:
                    contest = Contest.get_from_id(contest_id, session)
                    files = contest.enumerate_files(self.skip_submissions,
                                                    self.skip_user_tests,
                                                    self.skip_generated)
                    for file_ in files:
                        if not self.safe_get_file(file_,
                                                  os.path.join(files_dir,
                                                               file_),
                                                  os.path.join(descr_dir,
                                                               file_)):
                            return False

            # Export data in JSON format.
            if self.dump_model:
                logger.info("Exporting data to a JSON file.")

                # We use strings because they'll be the keys of a JSON
                # object
                self.ids = {}
                self.queue = []

                data = dict()

                for cls, lst in [(Contest, self.contests_ids),
                                 (User, self.users_ids),
                                 (Task, self.tasks_ids)]:
                    for i in lst:
                        obj = cls.get_from_id(i, session)
                        self.get_id(obj)

                # Specify the "root" of the data graph
                data["_objects"] = list(itervalues(self.ids))

                while len(self.queue) > 0:
                    obj = self.queue.pop(0)
                    data[self.ids[obj.sa_identity_key]] = \
                        self.export_object(obj)

                data["_version"] = model_version

                destination = os.path.join(export_dir, "contest.json")
                if PY3:
                    with io.open(destination, "wt", encoding="utf-8") as fout:
                        json.dump(data, fout, indent=4, sort_keys=True)
                else:
                    with io.open(destination, "wb") as fout:
                        json.dump(data, fout, indent=4, sort_keys=True)

        # If the admin requested export to file, we do that.
        if archive_info["write_mode"] != "":
            archive = tarfile.open(self.export_target,
                                   archive_info["write_mode"])
            archive.add(export_dir, arcname=archive_info["basename"])
            archive.close()
            rmtree(export_dir)

        logger.info("Export finished.")

        return True
Пример #13
0
    def evaluate(self, job, file_cacher):
        """See TaskType.evaluate."""
        if len(self.parameters) <= 0:
            num_processes = 1
        else:
            num_processes = self.parameters[0]
        indices = range(num_processes)
        # Create sandboxes and FIFOs
        sandbox_mgr = create_sandbox(file_cacher,
                                     multithreaded=job.multithreaded_sandbox,
                                     name="manager_evaluate")
        sandbox_user = [
            create_sandbox(file_cacher,
                           multithreaded=job.multithreaded_sandbox,
                           name="user_evaluate") for i in indices
        ]
        fifo_dir = [tempfile.mkdtemp(dir=config.temp_dir) for i in indices]
        fifo_in = [os.path.join(fifo_dir[i], "in%d" % i) for i in indices]
        fifo_out = [os.path.join(fifo_dir[i], "out%d" % i) for i in indices]
        for i in indices:
            os.mkfifo(fifo_in[i])
            os.mkfifo(fifo_out[i])
            os.chmod(fifo_dir[i], 0o755)
            os.chmod(fifo_in[i], 0o666)
            os.chmod(fifo_out[i], 0o666)

        # First step: prepare the manager.
        manager_filename = "manager"
        manager_command = ["./%s" % manager_filename]
        for i in indices:
            manager_command.append(fifo_in[i])
            manager_command.append(fifo_out[i])
        manager_executables_to_get = {
            manager_filename: job.managers[manager_filename].digest
        }
        manager_files_to_get = {"input.txt": job.input}
        manager_allow_dirs = fifo_dir
        for filename, digest in iteritems(manager_executables_to_get):
            sandbox_mgr.create_file_from_storage(filename,
                                                 digest,
                                                 executable=True)
        for filename, digest in iteritems(manager_files_to_get):
            sandbox_mgr.create_file_from_storage(filename, digest)

        # Second step: load the executables for the user processes
        # (done before launching the manager so that it does not
        # impact its wall clock time).
        assert len(job.executables) == 1
        executable_filename = next(iterkeys(job.executables))
        executables_to_get = {
            executable_filename: job.executables[executable_filename].digest
        }
        for i in indices:
            for filename, digest in iteritems(executables_to_get):
                sandbox_user[i].create_file_from_storage(filename,
                                                         digest,
                                                         executable=True)

        # Third step: start the manager.
        manager = evaluation_step_before_run(sandbox_mgr,
                                             manager_command,
                                             num_processes * job.time_limit,
                                             0,
                                             allow_dirs=manager_allow_dirs,
                                             writable_files=["output.txt"],
                                             stdin_redirect="input.txt")

        # Fourth step: start the user submissions compiled with the stub.
        language = get_language(job.language)
        processes = [None for i in indices]
        for i in indices:
            args = [fifo_out[i], fifo_in[i]]
            if num_processes != 1:
                args.append(str(i))
            commands = language.get_evaluation_commands(executable_filename,
                                                        main="stub",
                                                        args=args)
            user_allow_dirs = [fifo_dir[i]]
            # Assumes that the actual execution of the user solution
            # is the last command in commands, and that the previous
            # are "setup" that doesn't need tight control.
            if len(commands) > 1:
                evaluation_step(sandbox_user[i], commands[:-1], 10, 256)
            processes[i] = evaluation_step_before_run(
                sandbox_user[i],
                commands[-1],
                job.time_limit,
                job.memory_limit,
                allow_dirs=user_allow_dirs)

        # Consume output.
        wait_without_std(processes + [manager])
        # TODO: check exit codes with translate_box_exitcode.

        user_results = [evaluation_step_after_run(s) for s in sandbox_user]
        success_user = all(r[0] for r in user_results)
        plus_user = reduce(merge_evaluation_results,
                           [r[1] for r in user_results])
        success_mgr, unused_plus_mgr = \
            evaluation_step_after_run(sandbox_mgr)

        if plus_user['exit_status'] == Sandbox.EXIT_OK and \
                plus_user["execution_time"] >= job.time_limit:
            plus_user['exit_status'] = Sandbox.EXIT_TIMEOUT

        # Merge results.
        job.sandboxes = [s.path for s in sandbox_user] + [sandbox_mgr.path]
        job.plus = plus_user

        # If at least one evaluation had problems, we report the
        # problems.
        if not success_user or not success_mgr:
            success, outcome, text = False, None, []
        # If the user sandbox detected some problem (timeout, ...),
        # the outcome is 0.0 and the text describes that problem.
        elif not is_evaluation_passed(plus_user):
            success = True
            outcome, text = 0.0, human_evaluation_message(plus_user)
        # Otherwise, we use the manager to obtain the outcome.
        else:
            success = True
            outcome, text = extract_outcome_and_text(sandbox_mgr)

        # If asked so, save the output file, provided that it exists
        if job.get_output:
            if sandbox_mgr.file_exists("output.txt"):
                job.user_output = sandbox_mgr.get_file_to_storage(
                    "output.txt", "Output file in job %s" % job.info)
            else:
                job.user_output = None

        # Whatever happened, we conclude.
        job.success = success
        job.outcome = "%s" % outcome if outcome is not None else None
        job.text = text

        delete_sandbox(sandbox_mgr, job.success)
        for s in sandbox_user:
            delete_sandbox(s, job.success)
        if not config.keep_sandbox:
            for d in fifo_dir:
                rmtree(d)
Пример #14
0
    def evaluate(self, job, file_cacher):
        if not check_executables_number(job, 1):
            return
        executable_filename = next(iter(job.executables.keys()))
        executable_digest = job.executables[executable_filename].digest

        # Make sure the required manager is among the job managers.
        if not check_manager_present(job, self.MANAGER_FILENAME):
            return
        manager_digest = job.managers[self.MANAGER_FILENAME].digest

        # Create FIFO dirs and first batch of FIFos
        fifo_dir_base = tempfile.mkdtemp(dir=config.temp_dir)

        def fifo_dir(i, j):
            p = os.path.join(fifo_dir_base, f"fifo{i}_{j}")
            if not os.path.exists(p):
                os.mkdir(p)
            return p

        abortion_control_fifo_dir = tempfile.mkdtemp(dir=config.temp_dir)
        fifo_solution_quitter = os.path.join(abortion_control_fifo_dir, "sq")
        fifo_manager_quitter = os.path.join(abortion_control_fifo_dir, "mq")
        os.mkfifo(fifo_solution_quitter)
        os.mkfifo(fifo_manager_quitter)
        os.chmod(abortion_control_fifo_dir, 0o755)
        os.chmod(fifo_solution_quitter, 0o666)
        os.chmod(fifo_manager_quitter, 0o666)

        sandbox_abortion_control_fifo_dir = "/abort"
        sandbox_fifo_solution_quitter = \
            os.path.join(sandbox_abortion_control_fifo_dir, "sq")
        sandbox_fifo_manager_quitter = \
            os.path.join(sandbox_abortion_control_fifo_dir, "mq")

        # Start the manager. Redirecting to stdin is unnecessary, but for
        # historical reasons the manager can choose to read from there
        # instead than from INPUT_FILENAME.
        manager_command = ["./%s" % self.MANAGER_FILENAME]
        manager_command += [
            sandbox_fifo_solution_quitter, sandbox_fifo_manager_quitter
        ]

        # Create the manager sandbox and copy manager and input and
        # reference output.
        sandbox_mgr = create_sandbox(file_cacher, name="manager_evaluate")
        job.sandboxes.append(sandbox_mgr.get_root_path())
        sandbox_mgr.create_file_from_storage(self.MANAGER_FILENAME,
                                             manager_digest,
                                             executable=True)
        sandbox_mgr.create_file_from_storage(self.INPUT_FILENAME, job.input)
        sandbox_mgr.create_file_from_storage(self.OK_FILENAME, job.output)

        # We could use trusted_step for the manager, since it's fully
        # admin-controlled. But trusted_step is only synchronous at the moment.
        # Thus we use evaluation_step, and we set a time limit generous enough
        # to prevent user programs from sending the manager in timeout.
        # This means that:
        # - the manager wall clock timeout must be greater than the sum of all
        #     wall clock timeouts of the user programs;
        # - with the assumption that the work the manager performs is not
        #     greater than the work performed by the user programs, the manager
        #     user timeout must be greater than the maximum allowed total time
        #     of the user programs; in theory, this is the task's time limit,
        #     but in practice is num_processes times that because the
        #     constraint on the total time can only be enforced after all user
        #     programs terminated.
        sandbox_fifo_dir_base = "/fifo"

        def sandbox_fifo_dir(i, j):
            return f"{sandbox_fifo_dir_base}/fifo{i}_{j}"

        manager_time_limit = max(self.num_processes * (job.time_limit + 1.0),
                                 config.trusted_sandbox_max_time_s)
        manager_dirs_map = {
            abortion_control_fifo_dir:
            (sandbox_abortion_control_fifo_dir, "rw")
        }

        # TODO: can we avoid creating all these directories?
        MAX_NUM_INSTANCES = 42

        list_of_fifo_dirs = []

        for pr in range(0, self.num_processes):
            for i in range(0, MAX_NUM_INSTANCES):
                d = fifo_dir(i, pr)
                list_of_fifo_dirs.append(d)
                manager_dirs_map[d] = (sandbox_fifo_dir(i, pr), "rw")

        manager = evaluation_step_before_run(
            sandbox_mgr,
            manager_command,
            manager_time_limit,
            config.trusted_sandbox_max_memory_kib * 1024,
            dirs_map=manager_dirs_map,
            writable_files=[self.OUTPUT_FILENAME],
            stdin_redirect=self.INPUT_FILENAME,
            multiprocess=True)

        solution_quitter = open(fifo_solution_quitter, "r")
        manager_quitter = open(fifo_manager_quitter, "w")

        def finish_run():
            wait_without_std(processes)
            L = [finish_run_single(i) for i in indices]
            return all(L)

        def finish_run_single(i):
            nonlocal wall_clock_acc
            nonlocal num_runs

            user_results.append(evaluation_step_after_run(sandbox_user[i]))
            wall_clock_acc += user_results[-1][2]["execution_wall_clock_time"]
            num_runs += 1
            runtimes[i].append(user_results[-1][2]["execution_time"])

            # Convert tuple to list for write access to entries
            L = list(user_results[-1])
            L[2]["execution_time"] = runtimes[i][-1] / max_num_runs

            if (L[2]["execution_time"] >= job.time_limit):
                L[2]["exit_status"] = Sandbox.EXIT_TIMEOUT

            user_results[-1] = tuple(L)

            if not self._uses_stub():
                # It can happen that the submission runs out of memory and then
                # gets killed by the manager while it is being shut down, in
                # which case isolate does not report a signal as the exit
                # status. To catch this, we look for cg-oom-killed in the logs
                sandbox_user[i].get_log()
                if user_results[-1][1] and \
                   "cg-oom-killed" in sandbox_user[i].log:
                    # Convert tuple to list for write access to entries
                    r = list(user_results[-1])
                    r[1] = False
                    r[2]["status"] = ["SG"]
                    r[2]["exit_status"] = "signal"
                    r[2]["signal"] = -41  # sit by a lake
                    r[2]["message"] = ["out of memory"]
                    user_results[-1] = tuple(r)

            return user_results[-1][0] and user_results[-1][1]

        def respond(okay=True):
            manager_quitter.write("O" if okay else "X")
            manager_quitter.flush()

        def read_int_from_manager():
            L = []
            while True:
                c = solution_quitter.read(1)
                if c == 'B':
                    break
                else:
                    L.append(c)
            return int("".join(L))

        quit = False

        for pr in range(0, self.num_processes):
            if quit:
                break

            wall_clock_acc = 0
            num_runs = 0

            # Startup message to sync pipes
            manager_quitter.write('S')
            manager_quitter.flush()

            # Ask the manager for the number of processes
            num_instances = read_int_from_manager()
            indices = range(0, num_instances)
            max_num_runs = read_int_from_manager()

            # Create remaining FIFOs
            fifo_user_to_manager = [
                os.path.join(fifo_dir(i, pr), f"u{pr}_{i}_to_m")
                for i in indices
            ]
            fifo_manager_to_user = [
                os.path.join(fifo_dir(i, pr), f"m_to_u{pr}_{i}")
                for i in indices
            ]
            for i in indices:
                os.mkfifo(fifo_user_to_manager[i])
                os.mkfifo(fifo_manager_to_user[i])
                os.chmod(fifo_dir(i, pr), 0o755)
                os.chmod(fifo_user_to_manager[i], 0o666)
                os.chmod(fifo_manager_to_user[i], 0o666)

            # Names of the fifos after being mapped inside the sandboxes.
            sandbox_fifo_user_to_manager = \
                [os.path.join(sandbox_fifo_dir(i, pr),
                              f"u{pr}_{i}_to_m") for i in indices]
            sandbox_fifo_manager_to_user = \
                [os.path.join(sandbox_fifo_dir(i, pr),
                              f"m_to_u{pr}_{i}") for i in indices]

            for i in indices:
                print(sandbox_fifo_user_to_manager[i],
                      file=manager_quitter,
                      flush=True)
                print(sandbox_fifo_manager_to_user[i],
                      file=manager_quitter,
                      flush=True)

            # Create the user sandbox(es) and copy the executable.
            sandbox_user = [
                create_sandbox(file_cacher, name="user_evaluate")
                for i in indices
            ]
            job.sandboxes.extend(s.get_root_path() for s in sandbox_user)

            for i in indices:
                sandbox_user[i].create_file_from_storage(executable_filename,
                                                         executable_digest,
                                                         executable=True)

                # Prepare the user submissions
                language = get_language(job.language)
                main = self.STUB_BASENAME if self._uses_stub() \
                    else os.path.splitext(executable_filename)[0]
                processes = [None for i in indices]
                user_results = []

                args = []
                stdin_redirect = None
                stdout_redirect = None
                if self._uses_fifos():
                    args.extend([
                        sandbox_fifo_manager_to_user[i],
                        sandbox_fifo_user_to_manager[i]
                    ])
                if self.num_processes != 1:
                    args.append(str(i))
                if self._uses_stub():
                    main = self.STUB_BASENAME
                else:
                    main = executable_filename
                commands = language.get_evaluation_commands(
                    executable_filename, main=main, args=args)

                # Assumes that the actual execution of the user solution is the
                # last command in commands, and that the previous are "setup"
                # that don't need tight control.
                if len(commands) > 1:
                    trusted_step(sandbox_user[i], commands[:-1])

            processes = [None for _ in indices]
            runtimes = [[] for _ in indices]

            while True:
                for i in indices:
                    processes[i] = evaluation_step_before_run(
                        sandbox_user[i],
                        commands[-1],
                        job.time_limit * max_num_runs * num_instances,
                        job.memory_limit,
                        dirs_map={
                            fifo_dir(i, pr): (sandbox_fifo_dir(i, pr), "rw")
                        },
                        stdin_redirect=sandbox_fifo_manager_to_user[i],
                        stdout_redirect=sandbox_fifo_user_to_manager[i],
                        multiprocess=job.multithreaded_sandbox)

                response = solution_quitter.read(1)

                if response == "C":  # continue
                    if not finish_run():
                        # this run was not successful, time to call it quits
                        quit = True
                        respond(okay=False)
                        break
                    respond()
                elif response == "N":  # next process
                    if not finish_run():
                        # this run was not successful, time to call it quits
                        quit = True
                        respond(okay=False)
                        break
                    respond()
                    break
                elif response == "Q":
                    if not self._uses_stub():
                        time.sleep(.01)
                        processes[i].send_signal(signal.SIGINT)
                    finish_run()
                    respond()
                    quit = True
                    break
                else:
                    raise RuntimeError("Received '{}' ".format(response) +
                                       "through solution_quitter.")

        # Wait for the manager to conclude, without blocking them on I/O.
        wait_without_std([manager])

        solution_quitter.close()
        manager_quitter.close()

        # Get the results of the manager sandbox.
        box_success_mgr, evaluation_success_mgr, unused_stats_mgr = \
            evaluation_step_after_run(sandbox_mgr)

        # Coalesce the results of the user sandboxes.
        box_success_user = all(r[0] for r in user_results)
        evaluation_success_user = all(r[1] for r in user_results)
        stats_user = reduce(my_merge_execution_stats,
                            [r[2] for r in user_results])
        # The actual running time is the sum of every user process, but each
        # sandbox can only check its own; if the sum is greater than the time
        # limit we adjust the result.
        if box_success_user and evaluation_success_user and \
                stats_user["execution_time"] >= job.time_limit:
            evaluation_success_user = False
            stats_user['exit_status'] = Sandbox.EXIT_TIMEOUT

        success = box_success_user \
            and box_success_mgr and evaluation_success_mgr
        outcome = None
        text = None

        # If at least one sandbox had problems, or the manager did not
        # terminate correctly, we report an error (and no need for user stats).
        if not success:
            stats_user = None

        # If just asked to execute, fill text and set dummy outcome.
        elif job.only_execution:
            outcome = 0.0
            text = [N_("Execution completed successfully")]

        # If the user sandbox detected some problem (timeout, ...),
        # the outcome is 0.0 and the text describes that problem.
        elif not evaluation_success_user:
            outcome = 0.0
            text = human_evaluation_message(stats_user)

        # Otherwise, we use the manager to obtain the outcome.
        else:
            outcome, text = extract_outcome_and_text(sandbox_mgr)

        # If asked so, save the output file with additional information,
        # provided that it exists.
        if job.get_output:
            if sandbox_mgr.file_exists(self.OUTPUT_FILENAME):
                job.user_output = sandbox_mgr.get_file_to_storage(
                    self.OUTPUT_FILENAME,
                    "Output file in job %s" % job.info,
                    trunc_len=100 * 1024)
            else:
                job.user_output = None

        # Fill in the job with the results.
        job.success = success
        job.outcome = "%s" % outcome if outcome is not None else None
        job.text = text
        job.plus = stats_user

        delete_sandbox(sandbox_mgr, job.success, job.keep_sandbox)
        for s in sandbox_user:
            delete_sandbox(s, job.success, job.keep_sandbox)
        if job.success and not config.keep_sandbox and not job.keep_sandbox:
            rmtree(fifo_dir_base)
            rmtree(abortion_control_fifo_dir)
Пример #15
0
    def evaluate(self, job, file_cacher):
        """See TaskType.evaluate."""
        if not check_executables_number(job, 1):
            return
        executable_filename = next(iterkeys(job.executables))
        executable_digest = job.executables[executable_filename].digest

        # Make sure the required manager is among the job managers.
        if not check_manager_present(job, Communication.MANAGER_FILENAME):
            return
        manager_digest = job.managers[Communication.MANAGER_FILENAME].digest

        # Indices for the objects related to each user process.
        indices = range(self.num_processes)

        # Create FIFOs.
        fifo_dir = [tempfile.mkdtemp(dir=config.temp_dir) for i in indices]
        fifo_in = [os.path.join(fifo_dir[i], "in%d" % i) for i in indices]
        fifo_out = [os.path.join(fifo_dir[i], "out%d" % i) for i in indices]
        for i in indices:
            os.mkfifo(fifo_in[i])
            os.mkfifo(fifo_out[i])
            os.chmod(fifo_dir[i], 0o755)
            os.chmod(fifo_in[i], 0o666)
            os.chmod(fifo_out[i], 0o666)

        # Create the manager sandbox and copy manager and input.
        sandbox_mgr = create_sandbox(file_cacher, name="manager_evaluate")
        job.sandboxes.append(sandbox_mgr.path)
        sandbox_mgr.create_file_from_storage(Communication.MANAGER_FILENAME,
                                             manager_digest,
                                             executable=True)
        sandbox_mgr.create_file_from_storage(Communication.INPUT_FILENAME,
                                             job.input)

        # Create the user sandbox(es) and copy the executable.
        sandbox_user = [
            create_sandbox(file_cacher, name="user_evaluate") for i in indices
        ]
        job.sandboxes.extend(s.path for s in sandbox_user)
        for i in indices:
            sandbox_user[i].create_file_from_storage(executable_filename,
                                                     executable_digest,
                                                     executable=True)
            if Communication.STUB_PRELOAD_FILENAME in job.managers:
                digest = job.managers[
                    Communication.STUB_PRELOAD_FILENAME].digest
                sandbox_user[i].create_file_from_storage(
                    Communication.STUB_PRELOAD_FILENAME,
                    digest,
                    executable=True)

        # Start the manager. Redirecting to stdin is unnecessary, but for
        # historical reasons the manager can choose to read from there
        # instead than from INPUT_FILENAME.
        manager_command = ["./%s" % Communication.MANAGER_FILENAME]
        for i in indices:
            manager_command += [fifo_in[i], fifo_out[i]]
        # We could use trusted_step for the manager, since it's fully
        # admin-controlled. But trusted_step is only synchronous at the moment.
        # Thus we use evaluation_step, and we set a time limit generous enough
        # to prevent user programs from sending the manager in timeout.
        # This means that:
        # - the manager wall clock timeout must be greater than the sum of all
        #     wall clock timeouts of the user programs;
        # - with the assumption that the work the manager performs is not
        #     greater than the work performed by the user programs, the manager
        #     user timeout must be greater than the maximum allowed total time
        #     of the user programs; in theory, this is the task's time limit,
        #     but in practice is num_processes times that because the
        #     constraint on the total time can only be enforced after all user
        #     programs terminated.
        manager_time_limit = max(self.num_processes * (job.time_limit + 1.0),
                                 config.trusted_sandbox_max_time_s)
        manager = evaluation_step_before_run(
            sandbox_mgr,
            manager_command,
            manager_time_limit,
            config.trusted_sandbox_max_memory_kib // 1024,
            allow_dirs=fifo_dir,
            writable_files=[Communication.OUTPUT_FILENAME],
            stdin_redirect=Communication.INPUT_FILENAME,
            multiprocess=job.multithreaded_sandbox)

        # Start the user submissions compiled with the stub.
        language = get_language(job.language)
        processes = [None for i in indices]
        for i in indices:
            args = [fifo_out[i], fifo_in[i]]
            if self.num_processes != 1:
                args.append(str(i))
            commands = language.get_evaluation_commands(executable_filename,
                                                        main="stub",
                                                        args=args)
            # Assumes that the actual execution of the user solution is the
            # last command in commands, and that the previous are "setup"
            # that don't need tight control.
            if len(commands) > 1:
                trusted_step(sandbox_user[i], commands[:-1])

            last_cmd = commands[-1]

            # Inject preload program if needed
            if Communication.STUB_PRELOAD_FILENAME in job.managers:
                last_cmd = [
                    "./%s" % Communication.STUB_PRELOAD_FILENAME, fifo_out[i],
                    fifo_in[i]
                ] + commands[-1]

            processes[i] = evaluation_step_before_run(
                sandbox_user[i],
                last_cmd,
                job.time_limit,
                job.memory_limit,
                allow_dirs=[fifo_dir[i]],
                multiprocess=job.multithreaded_sandbox)

        # Wait for the processes to conclude, without blocking them on I/O.
        wait_without_std(processes + [manager])

        # Get the results of the manager sandbox.
        box_success_mgr, evaluation_success_mgr, unused_stats_mgr = \
            evaluation_step_after_run(sandbox_mgr)

        # Coalesce the results of the user sandboxes.
        user_results = [evaluation_step_after_run(s) for s in sandbox_user]
        box_success_user = all(r[0] for r in user_results)
        evaluation_success_user = all(r[1] for r in user_results)
        stats_user = reduce(merge_execution_stats,
                            [r[2] for r in user_results])
        # The actual running time is the sum of every user process, but each
        # sandbox can only check its own; if the sum is greater than the time
        # limit we adjust the result.
        if box_success_user and evaluation_success_user and \
                stats_user["execution_time"] >= job.time_limit:
            evaluation_success_user = False
            stats_user['exit_status'] = Sandbox.EXIT_TIMEOUT

        success = box_success_user \
            and box_success_mgr and evaluation_success_mgr
        outcome = None
        text = None

        # If at least one sandbox had problems, or the manager did not
        # terminate correctly, we report an error (and no need for user stats).
        if not success:
            stats_user = None
            pass

        # If just asked to execute, fill text and set dummy outcome.
        elif job.only_execution:
            outcome = 0.0
            text = [N_("Execution completed successfully")]

        # If the user sandbox detected some problem (timeout, ...),
        # the outcome is 0.0 and the text describes that problem.
        elif not evaluation_success_user:
            outcome = 0.0
            text = human_evaluation_message(stats_user, job.feedback_level)

        # Otherwise, we use the manager to obtain the outcome.
        else:
            outcome, text = extract_outcome_and_text(sandbox_mgr)

        # If asked so, save the output file with additional information,
        # provided that it exists.
        if job.get_output:
            if sandbox_mgr.file_exists(Communication.OUTPUT_FILENAME):
                job.user_output = sandbox_mgr.get_file_to_storage(
                    Communication.OUTPUT_FILENAME,
                    "Output file in job %s" % job.info,
                    trunc_len=100 * 1024)
            else:
                job.user_output = None

        # Fill in the job with the results.
        job.success = success
        job.outcome = "%s" % outcome if outcome is not None else None
        job.text = text
        job.plus = stats_user

        delete_sandbox(sandbox_mgr, job.success)
        for s in sandbox_user:
            delete_sandbox(s, job.success)
        if not config.keep_sandbox:
            for d in fifo_dir:
                rmtree(d)
Пример #16
0
 def test_missing(self):
     """Test failure on a missing directory."""
     with self.assertRaises(FileNotFoundError):
         rmtree(os.path.join(self.tmpdir, "missing"))
Пример #17
0
    def evaluate(self, job, file_cacher):
        """See TaskType.evaluate."""
        if len(self.parameters) <= 0:
            num_processes = 1
        else:
            num_processes = self.parameters[0]
        indices = range(num_processes)
        # Create sandboxes and FIFOs
        sandbox_mgr = create_sandbox(
            file_cacher,
            multithreaded=job.multithreaded_sandbox,
            name="manager_evaluate")
        sandbox_user = [
            create_sandbox(
                file_cacher,
                multithreaded=job.multithreaded_sandbox,
                name="user_evaluate")
            for i in indices]
        fifo_dir = [tempfile.mkdtemp(dir=config.temp_dir) for i in indices]
        fifo_in = [os.path.join(fifo_dir[i], "in%d" % i) for i in indices]
        fifo_out = [os.path.join(fifo_dir[i], "out%d" % i) for i in indices]
        for i in indices:
            os.mkfifo(fifo_in[i])
            os.mkfifo(fifo_out[i])
            os.chmod(fifo_dir[i], 0o755)
            os.chmod(fifo_in[i], 0o666)
            os.chmod(fifo_out[i], 0o666)

        # First step: prepare the manager.
        manager_filename = "manager"
        manager_command = ["./%s" % manager_filename]
        for i in indices:
            manager_command.append(fifo_in[i])
            manager_command.append(fifo_out[i])
        manager_executables_to_get = {
            manager_filename:
            job.managers[manager_filename].digest
            }
        manager_files_to_get = {
            "input.txt": job.input
            }
        manager_allow_dirs = fifo_dir
        for filename, digest in iteritems(manager_executables_to_get):
            sandbox_mgr.create_file_from_storage(
                filename, digest, executable=True)
        for filename, digest in iteritems(manager_files_to_get):
            sandbox_mgr.create_file_from_storage(filename, digest)

        # Second step: load the executables for the user processes
        # (done before launching the manager so that it does not
        # impact its wall clock time).
        assert len(job.executables) == 1
        executable_filename = next(iterkeys(job.executables))
        executables_to_get = {
            executable_filename:
            job.executables[executable_filename].digest
            }
        for i in indices:
            for filename, digest in iteritems(executables_to_get):
                sandbox_user[i].create_file_from_storage(
                    filename, digest, executable=True)

        # Third step: start the manager.
        manager = evaluation_step_before_run(
            sandbox_mgr,
            manager_command,
            num_processes * job.time_limit,
            0,
            allow_dirs=manager_allow_dirs,
            writable_files=["output.txt"],
            stdin_redirect="input.txt")

        # Fourth step: start the user submissions compiled with the stub.
        language = get_language(job.language)
        processes = [None for i in indices]
        for i in indices:
            args = [fifo_out[i], fifo_in[i]]
            if num_processes != 1:
                args.append(str(i))
            commands = language.get_evaluation_commands(
                executable_filename,
                main="stub",
                args=args)
            user_allow_dirs = [fifo_dir[i]]
            # Assumes that the actual execution of the user solution
            # is the last command in commands, and that the previous
            # are "setup" that doesn't need tight control.
            if len(commands) > 1:
                evaluation_step(sandbox_user[i], commands[:-1], 10, 256)
            processes[i] = evaluation_step_before_run(
                sandbox_user[i],
                commands[-1],
                job.time_limit,
                job.memory_limit,
                allow_dirs=user_allow_dirs)

        # Consume output.
        wait_without_std(processes + [manager])
        # TODO: check exit codes with translate_box_exitcode.

        user_results = [evaluation_step_after_run(s) for s in sandbox_user]
        success_user = all(r[0] for r in user_results)
        plus_user = reduce(merge_evaluation_results,
                           [r[1] for r in user_results])
        success_mgr, unused_plus_mgr = \
            evaluation_step_after_run(sandbox_mgr)

        if plus_user['exit_status'] == Sandbox.EXIT_OK and \
                plus_user["execution_time"] >= job.time_limit:
            plus_user['exit_status'] = Sandbox.EXIT_TIMEOUT

        # Merge results.
        job.sandboxes = [s.path for s in sandbox_user] + [sandbox_mgr.path]
        job.plus = plus_user

        # If at least one evaluation had problems, we report the
        # problems.
        if not success_user or not success_mgr:
            success, outcome, text = False, None, []
        # If the user sandbox detected some problem (timeout, ...),
        # the outcome is 0.0 and the text describes that problem.
        elif not is_evaluation_passed(plus_user):
            success = True
            outcome, text = 0.0, human_evaluation_message(plus_user)
        # Otherwise, we use the manager to obtain the outcome.
        else:
            success = True
            outcome, text = extract_outcome_and_text(sandbox_mgr)

        # If asked so, save the output file, provided that it exists
        if job.get_output:
            if sandbox_mgr.file_exists("output.txt"):
                job.user_output = sandbox_mgr.get_file_to_storage(
                    "output.txt",
                    "Output file in job %s" % job.info)
            else:
                job.user_output = None

        # Whatever happened, we conclude.
        job.success = success
        job.outcome = "%s" % outcome if outcome is not None else None
        job.text = text

        delete_sandbox(sandbox_mgr, job.success)
        for s in sandbox_user:
            delete_sandbox(s, job.success)
        if not config.keep_sandbox:
            for d in fifo_dir:
                rmtree(d)
Пример #18
0
    def do_export(self):
        """Run the actual export code."""
        logger.info("Starting export.")

        export_dir = self.export_target
        archive_info = get_archive_info(self.export_target)

        if archive_info["write_mode"] != "":
            # We are able to write to this archive.
            if os.path.exists(self.export_target):
                logger.critical("The specified file already exists, "
                                "I won't overwrite it.")
                return False
            export_dir = os.path.join(tempfile.mkdtemp(),
                                      archive_info["basename"])

        logger.info("Creating dir structure.")
        try:
            os.mkdir(export_dir)
        except OSError:
            logger.critical("The specified directory already exists, "
                            "I won't overwrite it.")
            return False

        files_dir = os.path.join(export_dir, "files")
        descr_dir = os.path.join(export_dir, "descriptions")
        os.mkdir(files_dir)
        os.mkdir(descr_dir)

        with SessionGen() as session:
            # Export files.
            logger.info("Exporting files.")
            if self.dump_files:
                for contest_id in self.contests_ids:
                    contest = Contest.get_from_id(contest_id, session)
                    files = enumerate_files(
                        session, contest,
                        skip_submissions=self.skip_submissions,
                        skip_user_tests=self.skip_user_tests,
                        skip_users=self.skip_users,
                        skip_print_jobs=self.skip_print_jobs,
                        skip_generated=self.skip_generated)
                    for file_ in files:
                        if not self.safe_get_file(file_,
                                                  os.path.join(files_dir,
                                                               file_),
                                                  os.path.join(descr_dir,
                                                               file_)):
                            return False

            # Export data in JSON format.
            if self.dump_model:
                logger.info("Exporting data to a JSON file.")

                # We use strings because they'll be the keys of a JSON
                # object
                self.ids = {}
                self.queue = []

                data = dict()

                for cls, lst in [(Contest, self.contests_ids),
                                 (User, self.users_ids),
                                 (Task, self.tasks_ids)]:
                    for i in lst:
                        obj = cls.get_from_id(i, session)
                        self.get_id(obj)

                # Specify the "root" of the data graph
                data["_objects"] = list(self.ids.values())

                while len(self.queue) > 0:
                    obj = self.queue.pop(0)
                    data[self.ids[obj.sa_identity_key]] = \
                        self.export_object(obj)

                data["_version"] = model_version

                destination = os.path.join(export_dir, "contest.json")
                with open(destination, "wt", encoding="utf-8") as fout:
                    json.dump(data, fout, indent=4, sort_keys=True)

        # If the admin requested export to file, we do that.
        if archive_info["write_mode"] != "":
            with tarfile.open(self.export_target,
                              archive_info["write_mode"]) as archive:
                archive.add(export_dir, arcname=archive_info["basename"])
            rmtree(export_dir)

        logger.info("Export finished.")

        return True
Пример #19
0
    def execute(self, entry):
        """Print a print job.

        This is the core of PrintingService.

        entry (QueueEntry): the entry containing the operation to
            perform.

        """
        # TODO: automatically re-enqueue in case of a recoverable
        # error.
        printjob_id = entry.item.printjob_id
        with SessionGen() as session:
            # Obtain print job.
            printjob = PrintJob.get_from_id(printjob_id, session)
            if printjob is None:
                raise ValueError("Print job %d not found in the database." %
                                 printjob_id)
            user = printjob.participation.user
            contest = printjob.participation.contest
            timezone = get_timezone(user, contest)
            timestr = str(printjob.timestamp.replace(tzinfo=utc)
                          .astimezone(timezone).replace(tzinfo=None))
            filename = printjob.filename

            # Check if it's ready to be printed.
            if printjob.done:
                logger.info("Print job %d was already sent to the printer.",
                            printjob_id)

            directory = tempfile.mkdtemp(dir=config.temp_dir)
            logger.info("Preparing print job in directory %s", directory)

            # Take the base name just to be sure.
            relname = "source_" + os.path.basename(filename)
            source = os.path.join(directory, relname)
            with open(source, "wb") as file_:
                self.file_cacher.get_file_to_fobj(printjob.digest, file_)

            if filename.endswith(".pdf") and config.pdf_printing_allowed:
                source_pdf = source
            else:
                # Convert text to ps.
                source_ps = os.path.join(directory, "source.ps")
                cmd = ["a2ps",
                       source,
                       "--delegate=no",
                       "--output=" + source_ps,
                       "--medium=%s" % config.paper_size.capitalize(),
                       "--portrait",
                       "--columns=1",
                       "--rows=1",
                       "--pages=1-%d" % (config.max_pages_per_job),
                       "--header=",
                       "--footer=",
                       "--left-footer=",
                       "--right-footer=",
                       "--center-title=" + filename,
                       "--left-title=" + timestr]
                ret = subprocess.call(cmd, cwd=directory)
                if ret != 0:
                    raise Exception(
                        "Failed to convert text file to ps with command: %s"
                        "(error %d)" % (pretty_print_cmdline(cmd), ret))

                if not os.path.exists(source_ps):
                    logger.warning("Unable to convert from text to ps.")
                    printjob.done = True
                    printjob.status = [N_("Invalid file")]
                    session.commit()
                    rmtree(directory)
                    return

                # Convert ps to pdf
                source_pdf = os.path.join(directory, "source.pdf")
                cmd = ["ps2pdf",
                       "-sPAPERSIZE=%s" % config.paper_size.lower(),
                       source_ps]
                ret = subprocess.call(cmd, cwd=directory)
                if ret != 0:
                    raise Exception(
                        "Failed to convert ps file to pdf with command: %s"
                        "(error %d)" % (pretty_print_cmdline(cmd), ret))

            # Find out number of pages
            with open(source_pdf, "rb") as file_:
                pdfreader = PdfFileReader(file_)
                page_count = pdfreader.getNumPages()

            logger.info("Preparing %d page(s) (plus the title page)",
                        page_count)

            if page_count > config.max_pages_per_job:
                logger.info("Too many pages.")
                printjob.done = True
                printjob.status = [N_("Print job has too many pages")]
                session.commit()
                rmtree(directory)
                return

            # Add the title page
            title_tex = os.path.join(directory, "title_page.tex")
            title_pdf = os.path.join(directory, "title_page.pdf")
            with open(title_tex, "wb") as f:
                f.write(self.jinja2_env.get_template("title_page.tex")
                        .render(user=user, filename=filename,
                                timestr=timestr,
                                page_count=page_count,
                                paper_size=config.paper_size))
            cmd = ["pdflatex",
                   "-interaction",
                   "nonstopmode",
                   title_tex]
            ret = subprocess.call(cmd, cwd=directory)
            if ret != 0:
                raise Exception(
                    "Failed to create title page with command: %s"
                    "(error %d)" % (pretty_print_cmdline(cmd), ret))

            pdfmerger = PdfFileMerger()
            with open(title_pdf, "rb") as file_:
                pdfmerger.append(file_)
            with open(source_pdf, "rb") as file_:
                pdfmerger.append(file_)
            result = os.path.join(directory, "document.pdf")
            with open(result, "wb") as file_:
                pdfmerger.write(file_)

            try:
                printer_connection = cups.Connection()
                printer_connection.printFile(
                    config.printer, result,
                    "Printout %d" % printjob_id, {})
            except cups.IPPError as error:
                logger.error("Unable to print: `%s'.", error)
            else:
                printjob.done = True
                printjob.status = [N_("Sent to printer")]
                session.commit()
            finally:
                rmtree(directory)