def _do_cleanup(self):
        """
        Periodically clean up the oldest images when the disk is close to full.
        """
        logger.info('Image cleanup thread started.')
        while not self._should_stop_cleanup():
            # Start disk usage reduction loop
            # Iteratively try to remove the oldest *used* images until the disk usage
            # is within the limit.
            try:
                while True:
                    total_bytes, reclaimable_bytes = self._docker.get_disk_usage(
                    )
                    if total_bytes > self._max_images_bytes and len(
                            self._last_used) > 0 and reclaimable_bytes > 0:
                        logger.debug('Docker images disk usage: %s (max %s)',
                                     size_str(total_bytes),
                                     size_str(self._max_images_bytes))
                        self._remove_stalest_image()
                    else:
                        # Break out of the loop when disk usage is normal.
                        break
            except Exception:
                # Print the error then go to sleep and try again next time.
                traceback.print_exc()

            # Allow chance to be interrupted before going to sleep.
            if self._should_stop_cleanup():
                break

            time.sleep(1)
Пример #2
0
    def _check_and_report_resource_utilization(self, report):
        new_metadata = {}

        # Get wall clock time.
        new_metadata['time'] = time.time() - self._start_time
        if (self._resources['request_time'] and
            new_metadata['time'] > self._resources['request_time']):
            self.kill('Time limit %s exceeded.' % duration_str(self._resources['request_time']))

        # Get memory, time_user and time_system.
        new_metadata.update(self._docker.get_container_stats(self._container_id))
        if 'memory' in new_metadata and new_metadata['memory'] > self._max_memory:
            self._max_memory = new_metadata['memory']
        new_metadata['memory_max'] = self._max_memory
        if (self._resources['request_memory'] and
            'memory' in new_metadata and
            new_metadata['memory'] > self._resources['request_memory']):
            self.kill('Memory limit %sb exceeded.' % size_str(self._resources['request_memory']))

        # Get disk utilization.
        with self._disk_utilization_lock:
            new_metadata['data_size'] = self._disk_utilization
        if (self._resources['request_disk'] and
            new_metadata['data_size'] > self._resources['request_disk']):
            self.kill('Disk limit %sb exceeded.' % size_str(self._resources['request_disk']))

        new_metadata['last_updated'] = int(time.time())

        if report:
            logger.debug('Reporting resource utilization for run with UUID %s', self._uuid)
            try:
                self._bundle_service.update_bundle_metadata(self._worker.id, self._uuid, new_metadata)
            except BundleServiceException:
                traceback.print_exc()
Пример #3
0
    def download_image(self, docker_image, loop_callback):
        if len(docker_image.split(":")) < 2:
            logger.debug(
                'Missing tag/digest on request docker image "%s", defaulting to latest',
                docker_image)
            docker_image = ':'.join([docker_image, 'latest'])

        logger.debug('Downloading Docker image %s', docker_image)
        for response in self.client.api.pull(docker_image,
                                             stream=True,
                                             decode=True):
            if 'error' in response:
                raise DockerException(response['error'])

            status = ''
            try:
                status = response['status']
            except KeyError:
                pass
            try:
                status += ' (%s / %s)' % (
                    size_str(response['progressDetail']['current']),
                    size_str(response['progressDetail']['total']))
            except KeyError:
                pass
            loop_callback(status)
Пример #4
0
 def status_str():
     if total_bytes:
         return 'Downloaded %s/%s (%d%%)' % (formatting.size_str(
             num_bytes), formatting.size_str(total_bytes), 100.0 *
                                             num_bytes / total_bytes)
     else:
         return 'Downloaded %s' % (formatting.size_str(num_bytes))
Пример #5
0
 def status_str():
     if total_bytes:
         return 'Downloaded %s/%s (%d%%)' % (
             formatting.size_str(num_bytes),
             formatting.size_str(total_bytes),
             100.0 * num_bytes / total_bytes,
         )
     else:
         return 'Downloaded %s' % (formatting.size_str(num_bytes))
Пример #6
0
    def _do_cleanup(self):
        while not self._should_stop_cleanup():
            while True:
                # If the total size of all dependencies exceeds
                # self._max_work_dir_size_bytes, remove the oldest unused
                # dependency. Otherwise, break out of the loop.
                total_size_bytes = 0
                first_used_time = float('inf')
                first_used_target = None
                self._lock.acquire()
                for target, dependency in self._dependencies.items():
                    if dependency.downloading:
                        continue

                    # We compute the size of dependencies here to keep the code
                    # that adds new bundles to the dependency manager simpler.
                    if dependency.size_bytes is None:
                        self._lock.release()
                        size_bytes = get_path_size(
                            os.path.join(self._bundles_dir, dependency.path))
                        self._lock.acquire()
                        dependency.size_bytes = size_bytes
                        self._save_state()

                    total_size_bytes += dependency.size_bytes
                    if (not dependency.has_children()
                            and dependency.last_used < first_used_time):
                        first_used_time = dependency.last_used
                        first_used_target = target
                self._lock.release()

                if (total_size_bytes > self._max_work_dir_size_bytes
                        and first_used_target is not None):
                    logger.info(
                        'used ({}) exceeds capacity ({}), removing oldest bundle from cache'
                        .format(size_str(total_size_bytes),
                                size_str(self._max_work_dir_size_bytes)))
                    with self._lock:
                        dependency = self._dependencies[first_used_target]
                        if dependency.has_children():
                            # Since we released the lock there could be new
                            # children.
                            continue
                        del self._dependencies[first_used_target]
                        self._paths.remove(dependency.path)
                        self._save_state()
                        remove_path(
                            os.path.join(self._bundles_dir, dependency.path))
                else:
                    break

            # Sleep for 10 seconds, allowing interruptions every second.
            for _ in xrange(0, self._cleanup_sleep_secs):
                time.sleep(1)
                if self._should_stop_cleanup():
                    break
Пример #7
0
    def download_image(self, docker_image, loop_callback):
        if len(docker_image.split(":")) < 2:
            logger.debug('Missing tag/digest on request docker image "%s", defaulting to latest', docker_image)
            docker_image = ':'.join([docker_image, 'latest'])

        logger.debug('Downloading Docker image %s', docker_image)
        with closing(self._create_connection()) as conn:
            conn.request('POST',
                         '/images/create?fromImage=%s' % docker_image)
            create_image_response = conn.getresponse()
            if create_image_response.status != 200:
                raise DockerException(create_image_response.read())

            # Wait for the download to finish. Docker sends a stream of JSON
            # objects. Since we don't know how long each one is we read a
            # character at a time until what we have so far parses as a valid
            # JSON object.
            while True:
                response = None
                line = ''
                while True:
                    ch = create_image_response.read(1)
                    if not ch:
                        break
                    line += ch
                    try:
                        response = json.loads(line)
                        logger.debug(line.strip())
                        break
                    except ValueError:
                        pass
                if not response:
                    break
                if 'error' in response:
                    raise DockerException(response['error'])

                status = ''
                try:
                    status = response['status']
                except KeyError:
                    pass
                try:
                    status += ' (%s / %s)' % (
                        size_str(response['progressDetail']['current']),
                        size_str(response['progressDetail']['total']))
                except KeyError:
                    pass
                loop_callback(status)
Пример #8
0
    def _check_and_report_resource_utilization(self, report):
        new_metadata = {}

        # Get wall clock time.
        new_metadata['time'] = time.time() - self._start_time
        if (self._resources['request_time'] and new_metadata['time'] > self._resources['request_time']):
            self.kill('Time limit %s exceeded.' % duration_str(self._resources['request_time']))

        # Get memory, time_user and time_system.
        new_metadata.update(self._docker.get_container_stats(self._container_id))
        if 'memory' in new_metadata and new_metadata['memory'] > self._max_memory:
            self._max_memory = new_metadata['memory']
        new_metadata['memory_max'] = self._max_memory

        # Get disk utilization.
        with self._disk_utilization_lock:
            new_metadata['data_size'] = self._disk_utilization
        if (self._resources['request_disk'] and
            new_metadata['data_size'] > self._resources['request_disk']):
            self.kill('Disk limit %sb exceeded.' % size_str(self._resources['request_disk']))

        new_metadata['last_updated'] = int(time.time())

        if report:
            logger.debug('Reporting resource utilization for run with UUID %s', self._uuid)
            try:
                self._bundle_service.update_bundle_metadata(self._worker.id, self._uuid, new_metadata)
            except BundleServiceException:
                traceback.print_exc()
Пример #9
0
def copy(source, dest, autoflush=True, print_status=None):
    """
    Read from the source file handle and write the data to the dest file handle.
    """
    n = 0
    while True:
        buf = source.read(BUFFER_SIZE)
        if not buf:
            break
        dest.write(buf)
        n += len(buf)
        if autoflush:
            dest.flush()
        if print_status:
            print >>sys.stderr, "\r%s: %s" % (print_status, formatting.size_str(n)),
            sys.stderr.flush()
    if print_status:
        print >>sys.stderr, "\r%s: %s [done]" % (print_status, formatting.size_str(n))
Пример #10
0
def copy(source, dest, autoflush=True, print_status=False):
    '''
    Read from the source file handle and write the data to the dest file handle.
    '''
    n = 0
    while True:
        buffer = source.read(BUFFER_SIZE)
        if not buffer:
            break
        dest.write(buffer)
        n += len(buffer)
        if autoflush:
            dest.flush()
        if print_status:
            print >>sys.stderr, "\rCopied %s" % formatting.size_str(n),
            sys.stderr.flush()
    if print_status:
        print >>sys.stderr, "\rCopied %s [done]" % formatting.size_str(n)
Пример #11
0
    def download_image(self, docker_image, loop_callback):
        logger.debug('Downloading Docker image %s', docker_image)
        with closing(self._create_connection()) as conn:
            conn.request('POST',
                         '/images/create?fromImage=%s' % docker_image)
            create_image_response = conn.getresponse()
            if create_image_response.status != 200:
                raise DockerException(create_image_response.read())

            # Wait for the download to finish. Docker sends a stream of JSON
            # objects. Since we don't know how long each one is we read a
            # character at a time until what we have so far parses as a valid
            # JSON object.
            while True:
                response = None
                line = ''
                while True:
                    ch = create_image_response.read(1)
                    if not ch:
                        break
                    line += ch
                    try:
                        response = json.loads(line)
                        logger.debug(line.strip())
                        break
                    except ValueError:
                        pass
                if not response:
                    break
                if 'error' in response:
                    raise DockerException(response['error'])

                status = ''
                try:
                    status = response['status']
                except KeyError:
                    pass
                try:
                    status += ' (%s / %s)' % (
                        size_str(response['progressDetail']['current']),
                        size_str(response['progressDetail']['total']))
                except KeyError:
                    pass
                loop_callback(status)
Пример #12
0
 def update_status(bytes_uploaded):
     updater('Uploading results: %s done (archived size)' %
             size_str(bytes_uploaded))
Пример #13
0
 def update_status_and_check_killed(bytes_downloaded):
     updater('Downloading dependency %s/%s: %s done (archived size)' %
             (uuid, path, size_str(bytes_downloaded)))
     self._check_killed()
Пример #14
0
 def update_status(bytes_uploaded):
     updater('Uploading results: %s done (archived size)' % 
         size_str(bytes_uploaded))
Пример #15
0
 def update_status_and_check_killed(bytes_downloaded):
     updater('Downloading dependency %s: %s done (archived size)' % (
         dep['child_path'], size_str(bytes_downloaded)))
     check_killed()
Пример #16
0
 def update_status_and_check_killed(bytes_downloaded):
     updater(
         'Downloading dependency %s: %s done (archived size)'
         % (dep['child_path'], size_str(bytes_downloaded)))
     check_killed()