Esempio n. 1
0
    def _find_proc(self, service):
        """Returns the pid of a given service running on this machine.

        service (ServiceCoord): the service we are interested in
        returns (psutil.Process): the process of service, or None if
                                  not found

        """
        logger.debug("ResourceService._find_proc")
        cmdline = config.process_cmdline[:]
        length = len(cmdline)
        for i in range(length):
            if config.installed:
                cmdline[i] = cmdline[i].replace("%s", service.name)
            else:
                cmdline[i] = cmdline[i].replace(
                    "%s",
                    os.path.join(
                        ResourceService.SERVICE_PATH[service.name],
                        service.name))
            cmdline[i] = cmdline[i].replace("%d", str(service.shard))
        for proc in psutil.get_process_list():
            try:
                if proc.cmdline[:length] == cmdline:
                    self._services_prev_cpu_times[service] = \
                        proc.get_cpu_times()
                    return proc
            except psutil.NoSuchProcess:
                continue
        return None
Esempio n. 2
0
File: __init__.py Progetto: Mloc/cms
def evaluation_step_before_run(sandbox, command,
                              time_limit=0, memory_limit=0,
                              allow_dirs=None,
                              stdin_redirect=None, stdout_redirect=None,
                              wait=False):
    """First part of an evaluation step, until the running.

    return: exit code already translated if wait is True, the
            process if wait is False.

    """
    # Set sandbox parameters suitable for evaluation.
    sandbox.timeout = time_limit
    sandbox.wallclock_timeout = 2 * time_limit + 1
    sandbox.address_space = memory_limit * 1024

    if stdin_redirect is not None:
        sandbox.stdin_file = stdin_redirect
    else:
        sandbox.stdin_file = None

    if stdout_redirect is not None:
        sandbox.stdout_file = stdout_redirect
    else:
        sandbox.stdout_file = "stdout.txt"

    sandbox.stderr_file = "stderr.txt"

    if allow_dirs is not None:
        for allow_dir in allow_dirs:
            sandbox.dirs += [(allow_dir, None, "rw")]

    # Actually run the evaluation command.
    logger.debug("Starting execution step.")
    return sandbox.execute_without_std(command, wait=wait)
Esempio n. 3
0
File: __init__.py Progetto: Mloc/cms
def evaluation_step(sandbox, command,
                    time_limit=0, memory_limit=0,
                    allow_dirs=None,
                    stdin_redirect=None, stdout_redirect=None):
    """Execute an evaluation command in the sandbox. Note that in some
    task types, there may be more than one evaluation commands (per
    testcase) (in others there can be none, of course).

    sandbox (Sandbox): the sandbox we consider.
    command (string): the actual evaluation line.
    time_limit (float): time limit in seconds.
    memory_limit (int): memory limit in MB.

    return (bool, dict): True if the evaluation was successful, or
                         False; and additional data.

    """
    success = evaluation_step_before_run(
        sandbox, command, time_limit, memory_limit, allow_dirs,
        stdin_redirect, stdout_redirect, wait=True)
    if not success:
        logger.debug("Job failed in evaluation_step_before_run.")
        return False, None

    success, plus = evaluation_step_after_run(sandbox)
    if not success:
        logger.debug("Job failed in evaluation_step_after_run: %r" % plus)

    return success, plus
Esempio n. 4
0
    def popen(self, command,
              stdin=None, stdout=None, stderr=None,
              close_fds=True):
        """Execute the given command in the sandbox using
        subprocess.Popen, assigning the corresponding standard file
        descriptors.

        command (list): executable filename and arguments of the
                        command.
        stdin (file): a file descriptor/object or None.
        stdout (file): a file descriptor/object or None.
        stderr (file): a file descriptor/object or None.
        close_fds (bool): close all file descriptor before executing.
        return (object): popen object.

        """
        self.exec_num += 1
        self.log = None
        args = [self.box_exec] + self.build_box_options() + ["--"] + command
        logger.debug("Executing program in sandbox with command: %s" %
                     " ".join(args))
        with open(self.relative_path(self.cmd_file), 'a') as commands:
            commands.write("%s\n" % (" ".join(args)))
        return subprocess.Popen(args,
                                stdin=stdin, stdout=stdout, stderr=stderr,
                                close_fds=close_fds)
Esempio n. 5
0
    def echo_callback(self, data, error=None):
        """Callback for check.

        """
        current = time.time()
        logger.debug("Checker.echo_callback")
        if error is not None:
            return
        try:
            service, time_ = data.split()
            time_ = float(time_)
            name, shard = service.split(",")
            shard = int(shard)
            service = ServiceCoord(name, shard)
            if service not in self.waiting_for or current - time_ > 10:
                logger.warning("Got late reply (%5.3lf s) from %s."
                            % (current - time_, service))
            else:
                if time_ - self.waiting_for[service] > 0.001:
                    logger.warning("Someone cheated on the timestamp?!")
                logger.info("Got reply (%5.3lf s) from %s."
                            % (current - time_, service))
                del self.waiting_for[service]
        except KeyError:
            logger.error("Echo answer mis-shapen.")
Esempio n. 6
0
    def release_worker(self, shard):
        """To be called by ES when it receives a notification that a
        job finished.

        Note: if the worker is scheduled to be disabled, then we
        disable it, and notify the ES to discard the outcome obtained
        by the worker.

        shard (int): the worker to release.

        returns (bool): if the result is to be ignored.

        """
        if self._job[shard] == WorkerPool.WORKER_INACTIVE:
            err_msg = "Trying to release worker while it's inactive."
            logger.error(err_msg)
            raise ValueError(err_msg)
        ret = self._ignore[shard]
        self._start_time[shard] = None
        self._side_data[shard] = None
        self._ignore[shard] = False
        if self._schedule_disabling[shard]:
            self._job[shard] = WorkerPool.WORKER_DISABLED
            self._schedule_disabling[shard] = False
            logger.info("Worker %s released and disabled." % shard)
        else:
            self._job[shard] = WorkerPool.WORKER_INACTIVE
            logger.debug("Worker %s released." % shard)
        return ret
Esempio n. 7
0
    def delete(self):
        """Delete the directory where the sandbox operated.

        """
        logger.debug("Deleting sandbox in %s" % self.path)

        # Delete the working directory.
        rmtree(self.path)
Esempio n. 8
0
    def finish(self, *args, **kwds):
        """ Finish this response, ending the HTTP request.

        We override this method in order to properly close the database.

        """
        logger.debug("Closing SQL connection.")
        self.sql_session.close()
        tornado.web.RequestHandler.finish(self, *args, **kwds)
Esempio n. 9
0
    def put_file(self, description="", binary_data=None,
                 file_obj=None, path=None):
        """Put a file in the storage, and keep a copy locally. The
        caller has to provide exactly one among binary_data, file_obj
        and path.

        description (string): a human-readable description of the
                              content.
        binary_data (string): the content of the file to send.
        file_obj (file): the file-like object to send.
        path (string): the file to send.

        """
        temp_fd, temp_path = tempfile.mkstemp(dir=self.tmp_dir)
        os.close(temp_fd)

        # Input checking
        if [binary_data, file_obj, path].count(None) != 2:
            error_string = "No content (or too many) specified in put_file."
            logger.error(error_string)
            raise ValueError(error_string)

        logger.debug("Reading input file to store on the database.")

        # Copy the file content, whatever forms it arrives, into the
        # temporary file
        # TODO - This could be long lasting: probably it would be wise
        # to call self.service._step() periodically, but this would
        # require reimplementing of shutil functions
        if path is not None:
            shutil.copy(path, temp_path)
        elif binary_data is not None:
            with open(temp_path, 'wb') as temp_file:
                temp_file.write(binary_data)
        else:  # file_obj is not None.
            with open(temp_path, 'wb') as temp_file:
                shutil.copyfileobj(file_obj, temp_file)

        hasher = hashlib.sha1()

        # Calculate the file SHA1 digest
        with open(temp_path, 'rb') as temp_file:
            buf = temp_file.read(self.CHUNK_SIZE)
            while buf != '':
                hasher.update(buf)
                buf = temp_file.read(self.CHUNK_SIZE)
        digest = hasher.hexdigest()

        logger.debug("File has digest %s." % digest)

        self.backend.put_file(digest, temp_path, description=description)

        # Move the temporary file in the cache
        shutil.move(temp_path,
                    os.path.join(self.obj_dir, digest))

        return digest
Esempio n. 10
0
    def acquire_worker(self, job, side_data=None):
        """Tries to assign a job to an available worker. If no workers
        are available then this returns None, otherwise this returns
        the chosen worker.

        job (job): the job to assign to a worker
        side_data (object): object to attach to the worker for later
                            use

        returns (int): None if no workers are available, the worker
                       assigned to the job otherwise
        """
        # We look for an available worker
        try:
            shard = self.find_worker(WorkerPool.WORKER_INACTIVE, require_connection=True, random_worker=True)
        except LookupError:
            return None

        # Then we fill the info for future memory
        self._job[shard] = job
        self._start_time[shard] = make_datetime()
        self._side_data[shard] = side_data
        logger.debug("Worker %s acquired." % shard)

        # And finally we ask the worker to do the job
        action, object_id = job
        timestamp = side_data[1]
        queue_time = self._start_time[shard] - timestamp
        logger.info(
            "Asking worker %s to %s submission/user test %d "
            " (%s after submission)." % (shard, action, object_id, queue_time)
        )

        with SessionGen(commit=False) as session:
            if action == EvaluationService.JOB_TYPE_COMPILATION:
                submission = Submission.get_from_id(object_id, session)
                job_ = CompilationJob.from_submission(submission)
            elif action == EvaluationService.JOB_TYPE_EVALUATION:
                submission = Submission.get_from_id(object_id, session)
                job_ = EvaluationJob.from_submission(submission)
            elif action == EvaluationService.JOB_TYPE_TEST_COMPILATION:
                user_test = UserTest.get_from_id(object_id, session)
                job_ = CompilationJob.from_user_test(user_test)
            elif action == EvaluationService.JOB_TYPE_TEST_EVALUATION:
                user_test = UserTest.get_from_id(object_id, session)
                job_ = EvaluationJob.from_user_test(user_test)
                job_.get_output = True
                job_.only_execution = True

            self._worker[shard].execute_job(
                job_dict=job_.export_to_dict(),
                callback=self._service.action_finished.im_func,
                plus=(action, object_id, side_data, shard),
            )

        return shard
Esempio n. 11
0
    def get_resources(self, last_time=0):
        """Returns the resurce usage information from last_time to
        now.

        last_time (int): timestamp of the last time the caller called
                         this method.

        """
        logger.debug("ResourceService._get_resources")
        index = bisect.bisect_right(self._local_store, (last_time, 0))
        return self._local_store[index:]
Esempio n. 12
0
    def get_file(self, path):
        """Open a file in the sandbox given its relative path.

        path (string): relative path of the file inside the sandbox.
        return (file): the file opened in read binary mode.

        """
        logger.debug("Retrieving file %s from sandbox" % (path))
        real_path = self.relative_path(path)
        file_ = open(real_path, "rb")
        return file_
Esempio n. 13
0
    def delete(self):
        """Delete the directory where the sandbox operated.

        """
        logger.debug("Deleting sandbox in %s" % self.path)

        # Tell isolate to cleanup the sandbox.
        box_cmd = [self.box_exec, "--cg", "-b", str(self.box_id)]
        subprocess.call(box_cmd + ["--cleanup"])

        # Delete the working directory.
        shutil.rmtree(self.outer_temp_dir)
Esempio n. 14
0
    def finish(self, *args, **kwds):
        """ Finishes this response, ending the HTTP request.

        We override this method in order to properly close the database.

        """
        if hasattr(self, "sql_session"):
            logger.debug("Closing SQL connection.")
            try:
                self.sql_session.close()
            except Exception as error:
                logger.warning("Couldn't close SQL connection: %r" % error)
        tornado.web.RequestHandler.finish(self, *args, **kwds)
Esempio n. 15
0
    def _find_local_services(self):
        """Returns the services that are running on the same machine
        as us.

        returns (list): a list of ServiceCoord elements, sorted by
                        name and shard

        """
        logger.debug("ResourceService._find_local_services")
        services = config.async.core_services
        local_machine = services[self._my_coord].ip
        local_services = [x for x in services if services[x].ip == local_machine]
        return sorted(local_services)
Esempio n. 16
0
    def put_file(self, digest, origin, description=""):
        """See FileCacherBackend.put_file().

        """
        with SessionGen() as session:

            # Check digest uniqueness
            if FSObject.get_from_digest(digest, session) is not None:
                logger.debug("File %s already on database, "
                             "dropping this one." % digest)
                session.rollback()

            # If it is not already present, copy the file into the
            # lobject
            else:
                fso = FSObject(description=description)
                logger.debug("Sending file %s to the database." % digest)
                with open(origin, 'rb') as temp_file:
                    with fso.get_lobject(session, mode='wb') as lobject:
                        logger.debug("Large object created.")
                        buf = temp_file.read(self.CHUNK_SIZE)
                        while buf != '':
                            while len(buf) > 0:
                                written = lobject.write(buf)
                                buf = buf[written:]
                                if self.service is not None:
                                    self.service._step()
                            buf = temp_file.read(self.CHUNK_SIZE)
                fso.digest = digest
                session.add(fso)
                session.commit()
                logger.debug("File %s sent to the database." % digest)
Esempio n. 17
0
def evaluation_step_before_run(sandbox, command,
                              time_limit=0, memory_limit=0,
                              allow_path=None,
                              stdin_redirect=None, stdout_redirect=None,
                              wait=False):
    """First part of an evaluation step, until the running.

    return: exit code already translated if wait is True, the
            process if wait is False.

    """
    # Set sandbox parameters suitable for evaluation.
    sandbox.chdir = sandbox.path
    sandbox.filter_syscalls = 2
    sandbox.timeout = time_limit
    sandbox.wallclock_timeout = 2 * time_limit
    sandbox.address_space = memory_limit * 1024
    sandbox.file_check = 1
    if allow_path is None:
        allow_path = []
    sandbox.allow_path = allow_path
    sandbox.stdin_file = stdin_redirect
    sandbox.stdout_file = stdout_redirect
    stdout_filename = os.path.join(sandbox.path, "stdout.txt")
    stderr_filename = os.path.join(sandbox.path, "stderr.txt")
    if sandbox.stdout_file is None:
        sandbox.stdout_file = stdout_filename
    sandbox.stderr_file = stderr_filename
    # These syscalls and paths are used by executables generated
    # by fpc.
    sandbox.allow_path += ["/proc/self/exe",
                           "/etc/timezone",
                           "/usr/share/zoneinfo/",
                           "/proc/self/maps",
                           "/sys/devices/system/cpu/online"]
    sandbox.allow_syscall += ["getrlimit",
                              "rt_sigaction",
                              "ugetrlimit",
                              "time",
                              "rt_sigprocmask",
                              "mremap"]
    # This one seems to be used for a C++ executable.
    sandbox.allow_path += ["/proc/meminfo"]
    # This is used by freopen in Ubuntu 12.04.
    sandbox.allow_syscall += ["dup3"]

    # Actually run the evaluation command.
    logger.debug("Starting execution step.")
    return sandbox.execute_without_std(command, wait=wait)
Esempio n. 18
0
File: Sandbox.py Progetto: riaz/cms
    def execute(self, command):
        """Execute the given command in the sandbox.

        command (list): executable filename and arguments of the
                        command.
        return (bool): True if the sandbox didn't report errors
                       (caused by the sandbox itself), False otherwise

        """
        self.exec_num += 1
        self.log = None
        args = [self.box_exec] + self.build_box_options() + ["--"] + command
        logger.debug("Executing program in sandbox with command: %s" % " ".join(args))
        with open(self.relative_path(self.cmd_file), "a") as commands:
            commands.write("%s\n" % (" ".join(args)))
        return translate_box_exitcode(subprocess.call(args))
Esempio n. 19
0
    def run(self):
        # The cumulative data that we will try to send to the ranking,
        # built by combining items in the queue.
        data = list(dict() for i in xrange(self.TYPE_COUNT))

        # The number of times we will call self.data_queue.task_done()
        # after a successful send (i.e. how many times we called .get()
        # on the queue to build up the data we have now).
        task_count = list(0 for i in xrange(self.TYPE_COUNT))

        while True:
            # Block until we have something to do.
            self.data_queue.peek()

            try:
                while True:
                    # Get other data if it's immediately available.
                    item = self.data_queue.get_nowait()
                    task_count[item[0]] += 1

                    # Merge this item with the cumulative data.
                    data[item[0]].update(item[1])
            except gevent.queue.Empty:
                pass

            try:
                for i in xrange(self.TYPE_COUNT):
                    # Send entities of type i.
                    if len(data[i]) > 0:
                        # XXX We abuse the resource path as the english
                        # (plural) name for the entity type.
                        name = self.RESOURCE_PATHS[i]
                        operation = \
                            "sending %s to ranking %s" % (name, self.ranking)

                        logger.debug(operation.capitalize())
                        safe_put_data(
                            self.ranking, "%s/" % name, data[i], operation)

                        data[i].clear()
                        for j in xrange(task_count[i]):
                            self.data_queue.task_done()
                        task_count[i] = 0

            except CannotSendError:
                # A log message has already been produced.
                gevent.sleep(self.FAILURE_WAIT)
Esempio n. 20
0
    def add_worker(self, worker_coord):
        """Add a new worker to the worker pool.

        worker_coord (ServiceCoord): the coordinates of the worker.

        """
        shard = worker_coord.shard
        # Instruct AsyncLibrary to connect ES to the Worker.
        self._worker[shard] = self._service.connect_to(worker_coord, on_connect=self.on_worker_connected)

        # And we fill all data.
        self._job[shard] = WorkerPool.WORKER_INACTIVE
        self._start_time[shard] = None
        self._side_data[shard] = None
        self._schedule_disabling[shard] = False
        self._ignore[shard] = False
        logger.debug("Worker %s added." % shard)
Esempio n. 21
0
    def acquire_worker(self, job, side_data=None):
        """Tries to assign a job to an available worker. If no workers
        are available then this returns None, otherwise this returns
        the chosen worker.

        job (job): the job to assign to a worker
        side_data (object): object to attach to the worker for later
                            use

        returns (int): None if no workers are available, the worker
                       assigned to the job otherwise
        """
        # We look for an available worker
        try:
            shard = self.find_worker(WorkerPool.WORKER_INACTIVE,
                                     require_connection=True,
                                     random_worker=True)
        except LookupError:
            return None

        # Then we fill the info for future memory
        self._job[shard] = job
        self._start_time[shard] = int(time.time())
        self._side_data[shard] = side_data
        logger.debug("Worker %s acquired." % shard)

        # And finally we ask the worker to do the job
        action, submission_id = job
        timestamp = side_data[1]
        queue_time = self._start_time[shard] - timestamp
        logger.info("Asking worker %s to %s submission %s "
                    " (%s seconds after submission)." %
                    (shard, action, submission_id, queue_time))
        if action == EvaluationService.JOB_TYPE_COMPILATION:
            self._worker[shard].compile(
                submission_id=submission_id,
                callback=self._service.action_finished.im_func,
                plus=(job, side_data, shard))
        elif action == EvaluationService.JOB_TYPE_EVALUATION:
            self._worker[shard].evaluate(
                submission_id=submission_id,
                callback=self._service.action_finished.im_func,
                plus=(job, side_data, shard))

        return shard
Esempio n. 22
0
    def __init__(self, file_cacher, temp_dir=None):
        """Initialization.

        file_cacher (FileCacher): an instance of the FileCacher class
                                  (to interact with FS).
        temp_dir (string): the directory where to put the sandbox
                           (which is itself a directory).

        """
        self.file_cacher = file_cacher

        if temp_dir is None:
            temp_dir = config.temp_dir
        self.path = tempfile.mkdtemp(dir=temp_dir)
        self.exec_name = 'mo-box'
        self.box_exec = self.detect_box_executable()
        self.info_basename = "run.log"   # Used for -M
        self.cmd_file = "commands.log"
        self.log = None
        self.exec_num = -1
        logger.debug("Sandbox in `%s' created, using box `%s'." %
                     (self.path, self.box_exec))

        # Default parameters for mo-box
        self.file_check = None         # -a
        self.chdir = None              # -c
        self.preserve_env = False      # -e
        self.inherit_env = []          # -E
        self.set_env = {}              # -E
        self.filter_syscalls = None    # -f
        self.allow_fork = False        # -F
        self.stdin_file = None         # -i
        self.stack_space = None        # -k
        self.address_space = None      # -m
        self.stdout_file = None        # -o
        self.allow_path = []           # -p
        self.set_path = {}             # -p
        self.stderr_file = None        # -r
        self.allow_syscall = []        # -s
        self.set_syscall = {}          # -s
        self.deny_timing = False       # -S
        self.timeout = None            # -t
        self.verbosity = 0             # -v
        self.wallclock_timeout = None  # -w
        self.extra_timeout = None      # -x
Esempio n. 23
0
    def get_file(self, path, trunc_len=None):
        """Open a file in the sandbox given its relative path.

        path (string): relative path of the file inside the sandbox.
        trunc_len (int): if None, does nothing; otherwise, before
                         returning truncate it at the specified length.

        return (file): the file opened in read binary mode.

        """
        logger.debug("Retrieving file %s from sandbox" % (path))
        real_path = self.relative_path(path)
        if trunc_len is not None:
            file_ = open(real_path, "ab")
            my_truncate(file_, trunc_len)
            file_.close()
        file_ = open(real_path, "rb")
        return file_
Esempio n. 24
0
    def check(self):
        """For all services, send an echo request and logs the time of
        the request.

        """
        logger.debug("Checker.check")
        for coordinates, service in self.remote_services.iteritems():
            if coordinates in self.waiting_for:
                logger.info("Service %s timeout, retrying." % str(coordinates))
                del self.waiting_for[coordinates]

            if service.connected:
                now = time.time()
                self.waiting_for[coordinates] = now
                service.echo(string="%s %5.3lf" % (coordinates, now), callback=Checker.echo_callback)
            else:
                logger.info("Service %s not connected." % str(coordinates))
        return True
Esempio n. 25
0
    def __init__(self, file_cacher=None, temp_dir=None):
        """Initialization.

        For arguments documentation, see SandboxBase.__init__.

        """
        SandboxBase.__init__(self, file_cacher, temp_dir)

        # Make box directory
        if temp_dir is None:
            temp_dir = config.temp_dir
        self.path = tempfile.mkdtemp(dir=temp_dir)

        self.cmd_file = "commands.log"
        self.exec_num = -1
        self.popen = None
        self.popen_time = None
        self.exec_time = None

        logger.debug("Sandbox in `%s' created, using stupid box." %
                     (self.path))

        # Box parameters
        self.chdir = self.path
        self.stdin_file = None
        self.stdout_file = None
        self.stderr_file = None
        self.stack_space = None
        self.address_space = None
        self.timeout = None
        self.wallclock_timeout = None
        self.extra_timeout = None

        # These parameters are not going to be used, but are here for
        # API compatibility
        self.box_id = 0
        self.cgroup = False
        self.dirs = []
        self.preserve_env = False
        self.inherit_env = []
        self.set_env = {}
        self.max_processes = 1
        self.verbosity = 0
Esempio n. 26
0
    def push_logs(self, logger):
        """Push all log lines written since the last call to
        get_logs() to the logger object.

        """
        with self.log_lock:
            tmp = self.logs
            self.logs = []
        for (line, severity) in tmp:
            if severity == 'debug':
                logger.debug(line)
            elif severity == 'info':
                logger.info(line)
            elif severity == 'warning':
                logger.warning(line)
            elif severity == 'error':
                logger.error(line)
            elif severity == 'critical':
                logger.critical(line)
Esempio n. 27
0
    def create_file(self, path, executable=False):
        """Create an empty file in the sandbox and open it in write
        binary mode.

        path (string): relative path of the file inside the sandbox.
        executable (bool): to set permissions.
        return (file): the file opened in write binary mode.

        """
        if executable:
            logger.debug("Creating executable file %s in sandbox." % path)
        else:
            logger.debug("Creating plain file %s in sandbox." % path)
        real_path = self.relative_path(path)
        file_ = open(real_path, "wb")
        mod = stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH | stat.S_IWUSR
        if executable:
            mod |= stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
        os.chmod(real_path, mod)
        return file_
Esempio n. 28
0
    def _find_proc(self, service):
        """Returns the pid of a given service running on this machine.

        service (ServiceCoord): the service we are interested in
        returns (psutil.Process): the process of service, or None if
                                  not found

        """
        logger.debug("ResourceService._find_proc")
        cmdline = config.process_cmdline[:]
        length = len(cmdline)
        for i in range(length):
            cmdline[i] = cmdline[i].replace("%s", service.name)
            cmdline[i] = cmdline[i].replace("%d", str(service.shard))
        for proc in psutil.get_process_list():
            try:
                if proc.cmdline[:length] == cmdline:
                    self._services_prev_cpu_times[service] = \
                        proc.get_cpu_times()
                    return proc
            except psutil.error.NoSuchProcess:
                continue
        return None
Esempio n. 29
0
def evaluation_step(sandbox,
                    command,
                    time_limit=0,
                    memory_limit=0,
                    allow_dirs=None,
                    stdin_redirect=None,
                    stdout_redirect=None):
    """Execute an evaluation command in the sandbox. Note that in some
    task types, there may be more than one evaluation commands (per
    testcase) (in others there can be none, of course).

    sandbox (Sandbox): the sandbox we consider.
    command (string): the actual evaluation line.
    time_limit (float): time limit in seconds.
    memory_limit (int): memory limit in MB.

    return (bool, dict): True if the evaluation was successful, or
                         False; and additional data.

    """
    success = evaluation_step_before_run(sandbox,
                                         command,
                                         time_limit,
                                         memory_limit,
                                         allow_dirs,
                                         stdin_redirect,
                                         stdout_redirect,
                                         wait=True)
    if not success:
        logger.debug("Job failed in evaluation_step_before_run.")
        return False, None

    success, plus = evaluation_step_after_run(sandbox)
    if not success:
        logger.debug("Job failed in evaluation_step_after_run: %r" % plus)

    return success, plus
Esempio n. 30
0
    def _popen(self, command,
               stdin=None, stdout=None, stderr=None,
               preexec_fn=None, close_fds=True):
        """Execute the given command in the sandbox using
        subprocess.Popen, assigning the corresponding standard file
        descriptors.

        command (list): executable filename and arguments of the
                        command.
        stdin (file): a file descriptor/object or None.
        stdout (file): a file descriptor/object or None.
        stderr (file): a file descriptor/object or None.
        preexec_fn (callable): to be called just before execve() or
                               None.
        close_fds (bool): close all file descriptor before executing.
        return (object): popen object.

        """
        self.exec_time = None
        self.exec_num += 1
        self.log = None
        logger.debug("Executing program in sandbox with command: %s" %
                     " ".join(command))
        with open(self.relative_path(self.cmd_file), 'a') as commands:
            commands.write("%s\n" % (" ".join(command)))
        try:
            p = subprocess.Popen(command,
                                 stdin=stdin, stdout=stdout, stderr=stderr,
                                 preexec_fn=preexec_fn, close_fds=close_fds)
        except OSError:
            logger.critical("Failed to execute program in sandbox "
                            "with command: %s" %
                            " ".join(command), exc_info=True)
            raise

        return p
Esempio n. 31
0
    def put_file(self, digest, origin, description=""):
        """See FileCacherBackend.put_file().

        """
        try:
            with SessionGen() as session:

                # Check digest uniqueness
                if FSObject.get_from_digest(digest, session) is not None:
                    logger.debug("File %s already on database, "
                                 "dropping this one." % digest)
                    session.rollback()

                # If it is not already present, copy the file into the
                # lobject
                else:
                    fso = FSObject(description=description)
                    logger.debug("Sending file %s to the database." % digest)
                    with open(origin, 'rb') as temp_file:

                        with fso.get_lobject(session, mode='wb') \
                                as lobject:
                            logger.debug("Large object created.")
                            buf = temp_file.read(self.CHUNK_SIZE)
                            while buf != '':
                                while len(buf) > 0:
                                    written = lobject.write(buf)
                                    buf = buf[written:]
                                    # Cooperative yield
                                    gevent.sleep(0)
                                buf = temp_file.read(self.CHUNK_SIZE)

                    fso.digest = digest
                    session.add(fso)
                    session.commit()
                    logger.debug("File %s sent to the database." % digest)

        except IntegrityError:
            logger.warning("File %s caused an IntegrityError, ignoring..."
                           % digest)
Esempio n. 32
0
    def put_file(self, digest, origin, description=""):
        """See FileCacherBackend.put_file().

        """
        try:
            with SessionGen() as session:

                # Check digest uniqueness
                if FSObject.get_from_digest(digest, session) is not None:
                    logger.debug("File %s already on database, "
                                 "dropping this one." % digest)
                    session.rollback()

                # If it is not already present, copy the file into the
                # lobject
                else:
                    fso = FSObject(description=description)
                    logger.debug("Sending file %s to the database." % digest)
                    with open(origin, 'rb') as temp_file:
                        with fso.get_lobject(session, mode='wb') as lobject:
                            logger.debug("Large object created.")
                            buf = temp_file.read(self.CHUNK_SIZE)
                            while buf != '':
                                while len(buf) > 0:
                                    written = lobject.write(buf)
                                    buf = buf[written:]
                                    if self.service is not None:
                                        self.service._step()
                                buf = temp_file.read(self.CHUNK_SIZE)
                    fso.digest = digest
                    session.add(fso)
                    session.commit()
                    logger.debug("File %s sent to the database." % digest)

        except IntegrityError:
            logger.warning("File %s caused an IntegrityError, ignoring..."
                           % digest)
Esempio n. 33
0
    def delete(self):
        """Delete the directory where the sendbox operated.

        """
        logger.debug("Deleting sandbox in %s" % self.path)
        shutil.rmtree(self.path)
Esempio n. 34
0
    def get_file(self, digest, path=None, file_obj=None,
                 string=False, temp_path=False, temp_file_obj=False):
        """Get a file from the storage, possibly using the cache if
        the file is available there.

        digest (string): the sha1 sum of the file.
        path (string): a path where to save the file.
        file_obj (file): a handler where to save the file (that is not
                         closed at return).
        string (bool): True to return content as a string.
        temp_path (bool): True to return path of a temporary file with
                          that content. The file is reserved to the
                          caller, who has the duty to unlink it.
        temp_file-obj (bool): True to return a file object opened to a
                              temporary file with that content. The
                              file is reserved to the caller. Use this
                              method only for debugging purpose, as it
                              leave a file lying in the temporary
                              directory of FileCacher.

        """
        if [string, temp_path, temp_file_obj].count(True) > 1:
            raise ValueError("Ask for at most one amongst content, "
                             "temp path and temp file obj.")

        cache_path = os.path.join(self.obj_dir, digest)
        cache_exists = os.path.exists(cache_path)

        logger.debug("Getting file %s" % (digest))

        if not cache_exists:
            logger.debug("File %s not in cache, downloading "
                         "from database." % digest)

            # Receives the file from the database
            temp_file, temp_filename = tempfile.mkstemp(dir=self.tmp_dir)
            temp_file = os.fdopen(temp_file, "wb")
            self.backend.get_file(digest, temp_filename)

            # And move it in the cache. Warning: this is not atomic if
            # the temp and the cache dir are on different filesystems.
            shutil.move(temp_filename, cache_path)

            logger.debug("File %s downloaded." % digest)

        # Saving to path
        if path is not None:
            shutil.copy(cache_path, path)

        # Saving to file object
        if file_obj is not None:
            with open(cache_path, "rb") as file_:
                shutil.copyfileobj(file_, file_obj)

        # Returning string?
        if string:
            with open(cache_path, "rb") as cache_file:
                return cache_file.read()

        # Returning temporary file?
        elif temp_path:
            temp_file, temp_filename = tempfile.mkstemp(dir=self.tmp_dir)
            os.close(temp_file)
            shutil.copy(cache_path, temp_filename)
            return temp_filename

        # Returning temporary file object?
        elif temp_file_obj:
            temp_file, temp_filename = tempfile.mkstemp(dir=self.tmp_dir)
            os.close(temp_file)
            shutil.copy(cache_path, temp_filename)
            temp_file = open(temp_filename, "rb")
            return temp_file
Esempio n. 35
0
    def get_file(self,
                 digest,
                 path=None,
                 file_obj=None,
                 string=False,
                 temp_path=False,
                 temp_file_obj=False):
        """Get a file from the storage, possibly using the cache if
        the file is available there.

        digest (string): the sha1 sum of the file.
        path (string): a path where to save the file.
        file_obj (file): a handler where to save the file (that is not
                         closed at return).
        string (bool): True to return content as a string.
        temp_path (bool): True to return path of a temporary file with
                          that content. The file is reserved to the
                          caller, who has the duty to unlink it.
        temp_file-obj (bool): True to return a file object opened to a
                              temporary file with that content. The
                              file is reserved to the caller. Use this
                              method only for debugging purpose, as it
                              leave a file lying in the temporary
                              directory of FileCacher.

        """
        if [string, temp_path, temp_file_obj].count(True) > 1:
            raise ValueError("Ask for at most one amongst content, "
                             "temp path and temp file obj.")

        cache_path = os.path.join(self.obj_dir, digest)
        cache_exists = os.path.exists(cache_path)

        logger.debug("Getting file %s" % (digest))

        if not cache_exists:
            logger.debug("File %s not in cache, downloading "
                         "from database." % digest)

            # Receives the file from the database
            temp_file, temp_filename = tempfile.mkstemp(dir=self.tmp_dir)
            temp_file = os.fdopen(temp_file, "wb")
            self.backend.get_file(digest, temp_filename)

            # And move it in the cache. Warning: this is not atomic if
            # the temp and the cache dir are on different filesystems.
            shutil.move(temp_filename, cache_path)

            logger.debug("File %s downloaded." % digest)

        # Saving to path
        if path is not None:
            shutil.copy(cache_path, path)

        # Saving to file object
        if file_obj is not None:
            with open(cache_path, "rb") as file_:
                shutil.copyfileobj(file_, file_obj)

        # Returning string?
        if string:
            with open(cache_path, "rb") as cache_file:
                return cache_file.read()

        # Returning temporary file?
        elif temp_path:
            temp_file, temp_filename = tempfile.mkstemp(dir=self.tmp_dir)
            os.close(temp_file)
            shutil.copy(cache_path, temp_filename)
            return temp_filename

        # Returning temporary file object?
        elif temp_file_obj:
            temp_file, temp_filename = tempfile.mkstemp(dir=self.tmp_dir)
            os.close(temp_file)
            shutil.copy(cache_path, temp_filename)
            temp_file = open(temp_filename, "rb")
            return temp_file
Esempio n. 36
0
    def _store_resources(self, store=True):
        """Looks at the resources usage and store the data locally.

        store (bool): if False, run the method but do not store the
                      resulting values - useful for initializing the
                      previous values

        """
        logger.debug("ResourceService._store_resources")
        # We use the precise time to compute the delta
        now = time.time()
        delta = now - self._last_saved_time
        while delta == 0.0:
            now = time.time()
            delta = now - self._last_saved_time
        self._last_saved_time = now
        now = int(now)

        data = {}

        # CPU
        cpu_times = self._get_cpu_times()
        data["cpu"] = dict((x, int(round((cpu_times[x] -
                                          self._prev_cpu_times[x])
                                   / delta * 100.0)))
                            for x in cpu_times)
        data["cpu"]["num_cpu"] = psutil.NUM_CPUS
        self._prev_cpu_times = cpu_times

        # Memory. The following relations hold (I think... I only
        # verified them experimentally on a swap-less system):
        # * vmem.free == vmem.available - vmem.cached - vmem.buffers
        # * vmem.total == vmem.used + vmem.free
        # That means that cache & buffers are counted both in .used
        # and in .available. We want to partition the memory into
        # types that sum up to vmem.total.
        vmem = psutil.virtual_memory()
        swap = psutil.swap_memory()
        data["memory"] = {
            "ram_total": vmem.total / B_TO_MB,
            "ram_available": vmem.free / B_TO_MB,
            "ram_cached": vmem.cached / B_TO_MB,
            "ram_buffers": vmem.buffers / B_TO_MB,
            "ram_used": (vmem.used - vmem.cached - vmem.buffers) / B_TO_MB,
            "swap_total": swap.total / B_TO_MB,
            "swap_available": swap.free / B_TO_MB,
            "swap_used": swap.used / B_TO_MB,
            }

        data["services"] = {}
        # Details of our services
        for service in self._local_services:
            dic = {"autorestart": self._will_restart[service],
                   "running": True}
            proc = self._procs[service]
            # If we don't have a previously found process for the
            # service, we find it
            if proc is None:
                proc = self._find_proc(service)
            # If we still do not find it, there is no process
            if proc is None:
                dic["running"] = False
            # We have a process, but maybe it has been shut down
            elif not proc.is_running():
                # If so, let us find the new one
                proc = self._find_proc(service)
                # If there is no new one, continue
                if proc is None:
                    dic["running"] = False
            # If the process is not running, we have nothing to do.
            if not dic["running"]:
                data["services"][str(service)] = dic
                continue

            try:
                dic["since"] = self._last_saved_time - proc.create_time
                dic["resident"], dic["virtual"] = \
                    (x / 1048576  for x in proc.get_memory_info())
                cpu_times = proc.get_cpu_times()
                dic["user"] = int(
                    round((cpu_times[0] -
                           self._services_prev_cpu_times[service][0])
                          / delta * 100))
                dic["sys"] = int(
                    round((cpu_times[1] -
                           self._services_prev_cpu_times[service][1])
                          / delta * 100))
                self._services_prev_cpu_times[service] = cpu_times
                try:
                    dic["threads"] = proc.get_num_threads()
                except AttributeError:
                    dic["threads"] = 0  # 0 = Not implemented

                self._procs[service] = proc
            except psutil.error.NoSuchProcess:
                # Shut down while we operated?
                dic = {"autorestart": self._will_restart[service],
                       "running": False}
            data["services"][str(service)] = dic

        if store:
            if len(self._local_store) >= 5000:  # almost 7 hours
                self._local_store = self._local_store[1:]
            self._local_store.append((now, data))

        return True
Esempio n. 37
0
    def __init__(self, file_cacher=None, temp_dir=None):
        """Initialization.

        file_cacher (FileCacher): an instance of the FileCacher class
                                  (to interact with FS).
        temp_dir (string): the directory where to put the sandbox
                           (which is itself a directory).

        """
        self.file_cacher = file_cacher

        # Get our shard number, to use as a unique identifier for the sandbox
        # on this machine.
        if file_cacher is not None and file_cacher.service is not None:
            box_id = file_cacher.service._my_coord.shard
        else:
            box_id = 0

        # We create a directory "tmp" inside the outer temporary directory,
        # because the sandbox will bind-mount the inner one. The sandbox also
        # runs code as a different user, and so we need to ensure that they can
        # read and write to the directory. But we don't want everybody on the
        # system to, which is why the outer directory exists with no read
        # permissions.
        self.inner_temp_dir = "/tmp"
        if temp_dir is None:
            temp_dir = config.temp_dir
        self.outer_temp_dir = tempfile.mkdtemp(dir=temp_dir)
        # Don't use os.path.join here, because the absoluteness of /tmp will
        # bite you.
        self.path = self.outer_temp_dir + self.inner_temp_dir
        os.mkdir(self.path)
        os.chmod(self.path, 0777)

        self.exec_name = 'isolate'
        self.box_exec = self.detect_box_executable()
        self.info_basename = "run.log"   # Used for -M
        self.cmd_file = "commands.log"
        self.log = None
        self.exec_num = -1
        logger.debug("Sandbox in `%s' created, using box `%s'." %
                     (self.path, self.box_exec))

        # Default parameters for isolate
        self.box_id = box_id           # -b
        self.cgroup = True             # --cg
        self.chdir = self.inner_temp_dir # -c
        self.dirs = []                 # -d
        self.dirs += [(self.inner_temp_dir, self.path, "rw")]
        self.preserve_env = False      # -e
        self.inherit_env = []          # -E
        self.set_env = {}              # -E
        self.stdin_file = None         # -i
        self.stack_space = None        # -k
        self.address_space = None      # -m
        self.stdout_file = None        # -o
        self.max_processes = 1         # -p
        self.stderr_file = None        # -r
        self.timeout = None            # -t
        self.verbosity = 0             # -v
        self.wallclock_timeout = None  # -w
        self.extra_timeout = None      # -x

        # Tell isolate to get the sandbox ready.
        box_cmd = [self.box_exec, "--cg", "-b", str(self.box_id)]
        ret = subprocess.call(box_cmd + ["--init"])
        if ret != 0:
            raise SandboxInterfaceException(
                "Failed to initialize sandbox (error %d)" % ret)
Esempio n. 38
0
def compilation_step(sandbox, command):
    """Execute a compilation command in the sandbox, setting up the
    sandbox itself with a standard configuration and doing standard
    checks at the end of the compilation.

    Note: this needs a sandbox already created.

    sandbox (Sandbox): the sandbox we consider.
    command (string): the actual compilation line.

    """
    # Set sandbox parameters suitable for compilation.
    sandbox.dirs += [("/etc", None, None)]
    sandbox.preserve_env = True
    sandbox.max_processes = None
    sandbox.timeout = 10
    sandbox.wallclock_timeout = 20
    sandbox.address_space = 256 * 1024
    sandbox.stdout_file = "compiler_stdout.txt"
    sandbox.stderr_file = "compiler_stderr.txt"

    # Actually run the compilation command.
    logger.debug("Starting compilation step.")
    box_success = sandbox.execute_without_std(command, wait=True)
    if not box_success:
        logger.error("Compilation aborted because of "
                     "sandbox error in `%s'." % sandbox.path)
        return False, None, None, None

    # Detect the outcome of the compilation.
    exit_status = sandbox.get_exit_status()
    exit_code = sandbox.get_exit_code()
    stdout = sandbox.get_file_to_string("compiler_stdout.txt")
    if stdout.strip() == "":
        stdout = "(empty)\n"
    stdout = unicode(stdout, 'utf-8', errors='replace')
    stderr = sandbox.get_file_to_string("compiler_stderr.txt")
    if stderr.strip() == "":
        stderr = "(empty)\n"
    stderr = unicode(stderr, 'utf-8', errors='replace')
    compiler_output = "Compiler standard output:\n" \
        "%s\n" \
        "Compiler standard error:\n" \
        "%s" % (stdout, stderr)

    # And retrieve some interesting data.
    plus = {
        "execution_time": sandbox.get_execution_time(),
        "execution_wall_clock_time": sandbox.get_execution_wall_clock_time(),
        "memory_used": sandbox.get_memory_used(),
        "stdout": stdout,
        "stderr": stderr,
        "exit_status": exit_status,
    }

    # From now on, we test for the various possible outcomes and
    # act appropriately.

    # Execution finished successfully and the submission was
    # correctly compiled.
    success = False
    compilation_success = None
    text = None

    if exit_status == Sandbox.EXIT_OK and exit_code == 0:
        logger.debug("Compilation successfully finished.")
        success = True
        compilation_success = True
        text = "OK %s\n%s" % (sandbox.get_stats(), compiler_output)

    # Error in compilation: returning the error to the user.
    elif (exit_status == Sandbox.EXIT_OK and exit_code != 0) or \
             exit_status == Sandbox.EXIT_NONZERO_RETURN:
        logger.debug("Compilation failed.")
        success = True
        compilation_success = False
        text = "Failed %s\n%s" % (sandbox.get_stats(), compiler_output)

    # Timeout: returning the error to the user
    elif exit_status == Sandbox.EXIT_TIMEOUT:
        logger.debug("Compilation timed out.")
        success = True
        compilation_success = False
        text = "Time out %s\n%s" % (sandbox.get_stats(), compiler_output)

    # Suicide with signal (probably memory limit): returning the error
    # to the user
    elif exit_status == Sandbox.EXIT_SIGNAL:
        signal = sandbox.get_killing_signal()
        logger.debug("Compilation killed with signal %s." % (signal))
        success = True
        compilation_success = False
        plus["signal"] = signal
        text = "Killed with signal %d %s.\nThis could be triggered by " \
            "violating memory limits\n%s" % \
            (signal, sandbox.get_stats(), compiler_output)

    # Sandbox error: this isn't a user error, the administrator needs
    # to check the environment
    elif exit_status == Sandbox.EXIT_SANDBOX_ERROR:
        logger.error("Compilation aborted because of sandbox error.")

    # Forbidden syscall: this shouldn't happen, probably the
    # administrator should relax the syscall constraints
    elif exit_status == Sandbox.EXIT_SYSCALL:
        syscall = sandbox.get_killing_syscall()
        logger.error("Compilation aborted "
                     "because of forbidden syscall `%s'." % syscall)

    # Forbidden file access: this could be triggered by the user
    # including a forbidden file or too strict sandbox contraints; the
    # administrator should have a look at it
    elif exit_status == Sandbox.EXIT_FILE_ACCESS:
        filename = sandbox.get_forbidden_file_error()
        logger.error("Compilation aborted "
                     "because of forbidden access to file `%s'." % filename)

    # Why the exit status hasn't been captured before?
    else:
        logger.error("Shouldn't arrive here, failing.")

    return success, compilation_success, text, plus
Esempio n. 39
0
    def _store_resources(self, store=True):
        """Looks at the resources usage and store the data locally.

        store (bool): if False, run the method but do not store the
                      resulting values - useful for initializing the
                      previous values

        """
        logger.debug("ResourceService._store_resources")
        # We use the precise time to compute the delta
        now = time.time()
        delta = now - self._last_saved_time
        self._last_saved_time = now
        now = int(now)

        data = {}

        # CPU
        cpu_times = self._get_cpu_times()
        data["cpu"] = dict((x, int(round((cpu_times[x] -
                                          self._prev_cpu_times[x])
                                   / delta * 100.0)))
                            for x in cpu_times)
        data["cpu"]["num_cpu"] = psutil.NUM_CPUS
        self._prev_cpu_times = cpu_times

        # Memory. We differentiate from old and deprecated (< 0.3.0)
        # methods to the new ones. Remove the differentiation when we
        # drop the support for Ubuntu 11.10 (which ships 0.2.1).
        ram_cached = psutil.cached_phymem()
        ram_buffers = psutil.phymem_buffers()
        if psutil_version < (0, 3, 0):
            data["memory"] = {
                "ram_total": psutil.TOTAL_PHYMEM / B_TO_MB,
                "ram_available": psutil.avail_phymem() / B_TO_MB,
                "ram_cached": ram_cached / B_TO_MB,
                "ram_buffers": ram_buffers / B_TO_MB,
                "ram_used": (psutil.used_phymem() - ram_cached - ram_buffers)
                                  / B_TO_MB,
                "swap_total": psutil.total_virtmem() / B_TO_MB,
                "swap_available": psutil.avail_virtmem() / B_TO_MB,
                "swap_used": psutil.used_virtmem() / B_TO_MB,
                }
        else:
            phymem = psutil.phymem_usage()
            virtmem = psutil.virtmem_usage()
            data["memory"] = {
                "ram_total": phymem.total / B_TO_MB,
                "ram_available": phymem.free / B_TO_MB,
                "ram_cached": ram_cached / B_TO_MB,
                "ram_buffers": ram_buffers / B_TO_MB,
                "ram_used": (phymem.used - ram_cached - ram_buffers) / B_TO_MB,
                "swap_total": virtmem.total / B_TO_MB,
                "swap_available": virtmem.free / B_TO_MB,
                "swap_used": virtmem.used / B_TO_MB,
                }

        data["services"] = {}
        # Details of our services
        for service in self._local_services:
            dic = {"autorestart": self._will_restart[service],
                   "running": True}
            proc = self._procs[service]
            # If we don't have a previously found process for the
            # service, we find it
            if proc is None:
                proc = self._find_proc(service)
            # If we still do not find it, there is no process
            if proc is None:
                dic["running"] = False
            # We have a process, but maybe it has been shut down
            elif not proc.is_running():
                # If so, let us find the new one
                proc = self._find_proc(service)
                # If there is no new one, continue
                if proc is None:
                    dic["running"] = False
            # If the process is not running, we have nothing to do.
            if not dic["running"]:
                data["services"][str(service)] = dic
                continue

            try:
                dic["since"] = self._last_saved_time - proc.create_time
                dic["resident"], dic["virtual"] = \
                    (x / 1048576  for x in proc.get_memory_info())
                cpu_times = proc.get_cpu_times()
                dic["user"] = int(
                    round((cpu_times[0] -
                           self._services_prev_cpu_times[service][0])
                          / delta * 100))
                dic["sys"] = int(
                    round((cpu_times[1] -
                           self._services_prev_cpu_times[service][1])
                          / delta * 100))
                self._services_prev_cpu_times[service] = cpu_times
                try:
                    dic["threads"] = proc.get_num_threads()
                except AttributeError:
                    dic["threads"] = 0  # 0 = Not implemented

                self._procs[service] = proc
            except psutil.error.NoSuchProcess:
                # Shut down while we operated?
                dic = {"autorestart": self._will_restart[service],
                       "running": False}
            data["services"][str(service)] = dic

        if store:
            if len(self._local_store) >= 5000:  # almost 7 hours
                self._local_store = self._local_store[1:]
            self._local_store.append((now, data))

        return True
Esempio n. 40
0
def evaluation_step_after_run(sandbox):
    """Second part of an evaluation step, after the running.

    """
    # Detect the outcome of the execution.
    exit_status = sandbox.get_exit_status()

    # And retrieve some interesting data.
    plus = {
        "execution_time": sandbox.get_execution_time(),
        "execution_wall_clock_time": sandbox.get_execution_wall_clock_time(),
        "memory_used": sandbox.get_memory_used(),
        "exit_status": exit_status,
    }

    success = False

    # Timeout: returning the error to the user.
    if exit_status == Sandbox.EXIT_TIMEOUT:
        logger.debug("Execution timed out.")
        success = True

    # Suicide with signal (memory limit, segfault, abort): returning
    # the error to the user.
    elif exit_status == Sandbox.EXIT_SIGNAL:
        signal = sandbox.get_killing_signal()
        logger.debug("Execution killed with signal %d." % signal)
        success = True
        plus["signal"] = signal

    # Sandbox error: this isn't a user error, the administrator needs
    # to check the environment.
    elif exit_status == Sandbox.EXIT_SANDBOX_ERROR:
        logger.error("Evaluation aborted because of sandbox error.")

    # Forbidden syscall: returning the error to the user. Note: this
    # can be triggered also while allocating too much memory
    # dynamically (offensive syscall is mprotect).
    elif exit_status == Sandbox.EXIT_SYSCALL:
        syscall = sandbox.get_killing_syscall()
        msg = "Execution killed because of forbidden syscall %s." % \
            syscall
        logger.debug(msg)
        success = True
        plus["syscall"] = syscall

    # Forbidden file access: returning the error to the user, without
    # disclosing the offending file (can't we?).
    elif exit_status == Sandbox.EXIT_FILE_ACCESS:
        filename = sandbox.get_forbidden_file_error()
        msg = "Execution killed because of forbidden file access."
        logger.debug("%s `%s'." % (msg, filename))
        success = True
        plus["filename"] = filename

    # The exit code was nonzero: returning the error to the user.
    elif exit_status == Sandbox.EXIT_NONZERO_RETURN:
        msg = "Execution failed because the return code was nonzero."
        logger.debug("%s" % msg)
        success = True

    # Last check before assuming that evaluation finished
    # successfully; we accept the evaluation even if the exit code
    # isn't 0.
    elif exit_status != Sandbox.EXIT_OK:
        logger.error("Shouldn't arrive here, failing.")

    else:
        success = True

    return success, plus