Пример #1
0
class ThreadPool(GroupMappingMixin):
    """
    A pool of native worker threads.

    This can be useful for CPU intensive functions, or those that
    otherwise will not cooperate with gevent. The best functions to execute
    in a thread pool are small functions with a single purpose; ideally they release
    the CPython GIL. Such functions are extension functions implemented in C.

    It implements the same operations as a :class:`gevent.pool.Pool`,
    but using threads instead of greenlets.

    .. note:: The method :meth:`apply_async` will always return a new
       greenlet, bypassing the threadpool entirely.

    Most users will not need to create instances of this class. Instead,
    use the threadpool already associated with gevent's hub::

        pool = gevent.get_hub().threadpool
        result = pool.spawn(lambda: "Some func").get()

    .. important:: It is only possible to use instances of this class from
       the thread running their hub. Typically that means from the thread that
       created them. Using the pattern shown above takes care of this.

       There is no gevent-provided way to have a single process-wide limit on the
       number of threads in various pools when doing that, however. The suggested
       way to use gevent and threadpools is to have a single gevent hub
       and its one threadpool (which is the default without doing any extra work).
       Only dispatch minimal blocking functions to the threadpool, functions that
       do not use the gevent hub.

    The `len` of instances of this class is the number of enqueued
    (unfinished) tasks.

    .. caution:: Instances of this class are only true if they have
       unfinished tasks.

    .. versionchanged:: 1.5a3
       The undocumented ``apply_e`` function, deprecated since 1.1,
       was removed.
    """

    __slots__ = (
        'hub',
        '_maxsize',
        # A Greenlet that runs to adjust the number of worker
        # threads.
        'manager',
        # The PID of the process we were created in.
        # Used to help detect a fork and then re-create
        # internal state.
        'pid',
        'fork_watcher',
        # A semaphore initialized with ``maxsize`` counting the
        # number of available worker threads we have. As a
        # gevent.lock.Semaphore, this is only safe to use from a single
        # native thread.
        '_available_worker_threads_greenlet_sem',
        # A set of running or pending _WorkerGreenlet objects;
        # we rely on the GIL for thread safety.
        '_worker_greenlets',
        # The task queue is itself safe to use from multiple
        # native threads.
        'task_queue',
    )

    def __init__(self, maxsize, hub=None):
        if hub is None:
            hub = get_hub()
        self.hub = hub
        self.pid = os.getpid()
        self.manager = None
        self.task_queue = Queue()
        self.fork_watcher = None

        self._worker_greenlets = set()
        self._maxsize = 0
        # Note that by starting with 1, we actually allow
        # maxsize + 1 tasks in the queue.
        self._available_worker_threads_greenlet_sem = Semaphore(1, hub)
        self._set_maxsize(maxsize)
        self.fork_watcher = hub.loop.fork(ref=False)

    def _register_worker(self, worker):
        self._worker_greenlets.add(worker)

    def _unregister_worker(self, worker):
        self._worker_greenlets.discard(worker)

    def _set_maxsize(self, maxsize):
        if not isinstance(maxsize, integer_types):
            raise TypeError('maxsize must be integer: %r' % (maxsize, ))
        if maxsize < 0:
            raise ValueError('maxsize must not be negative: %r' % (maxsize, ))
        difference = maxsize - self._maxsize
        self._available_worker_threads_greenlet_sem.counter += difference
        self._maxsize = maxsize
        self.adjust()
        # make sure all currently blocking spawn() start unlocking if maxsize increased
        self._available_worker_threads_greenlet_sem._start_notify()

    def _get_maxsize(self):
        return self._maxsize

    maxsize = property(_get_maxsize,
                       _set_maxsize,
                       doc="""\
    The maximum allowed number of worker threads.

    This is also (approximately) a limit on the number of tasks that
    can be queued without blocking the waiting greenlet. If this many
    tasks are already running, then the next greenlet that submits a task
    will block waiting for a task to finish.
    """)

    def __repr__(self, _format_hub=_format_hub):
        return '<%s at 0x%x tasks=%s size=%s maxsize=%s hub=%s>' % (
            self.__class__.__name__,
            id(self),
            len(self),
            self.size,
            self.maxsize,
            _format_hub(self.hub),
        )

    def __len__(self):
        # XXX just do unfinished_tasks property
        # Note that this becomes the boolean value of this class,
        # that's probably not what we want!
        return self.task_queue.unfinished_tasks

    def _get_size(self):
        return len(self._worker_greenlets)

    def _set_size(self, size):
        if size < 0:
            raise ValueError('Size of the pool cannot be negative: %r' %
                             (size, ))
        if size > self._maxsize:
            raise ValueError(
                'Size of the pool cannot be bigger than maxsize: %r > %r' %
                (size, self._maxsize))
        if self.manager:
            self.manager.kill()
        while len(self._worker_greenlets) < size:
            self._add_thread()
        delay = self.hub.loop.approx_timer_resolution
        while len(self._worker_greenlets) > size:
            while len(self._worker_greenlets
                      ) - size > self.task_queue.unfinished_tasks:
                self.task_queue.put(None)
            if getcurrent() is self.hub:
                break
            sleep(delay)
            delay = min(delay * 2, .05)
        if self._worker_greenlets:
            self.fork_watcher.start(self._on_fork)
        else:
            self.fork_watcher.stop()

    size = property(_get_size,
                    _set_size,
                    doc="""\
    The number of running pooled worker threads.

    Setting this attribute will add or remove running
    worker threads, up to `maxsize`.

    Initially there are no pooled running worker threads, and
    threads are created on demand to satisfy concurrent
    requests up to `maxsize` threads.
    """)

    def _on_fork(self):
        # fork() only leaves one thread; also screws up locks;
        # let's re-create locks and threads, and do our best to
        # clean up any worker threads left behind.
        # NOTE: See comment in gevent.hub.reinit.
        pid = os.getpid()
        if pid != self.pid:
            # The OS threads have been destroyed, but the Python
            # objects may live on, creating refcount "leaks". Python 2
            # leaves dead frames (those that are for dead OS threads)
            # around; Python 3.8 does not.
            thread_ident_to_frame = dict(sys._current_frames())
            for worker in list(self._worker_greenlets):
                frame = thread_ident_to_frame.get(worker._thread_ident)
                clear_stack_frames(frame)
                worker.cleanup(worker._hub_of_worker)
                # We can't throw anything to the greenlet, nor can we
                # switch to it or set a parent. Those would all be cross-thread
                # operations, which aren't allowed.
                worker.__dict__.clear()

            # We've cleared f_locals and on Python 3.4, possibly the actual
            # array locals of the stack frame, but the task queue may still be
            # referenced if we didn't actually get all the locals. Shut it down
            # and clear it before we throw away our reference.
            self.task_queue.kill()
            self.__init__(self._maxsize)

    def join(self):
        """Waits until all outstanding tasks have been completed."""
        delay = max(0.0005, self.hub.loop.approx_timer_resolution)
        while self.task_queue.unfinished_tasks > 0:
            sleep(delay)
            delay = min(delay * 2, .05)

    def kill(self):
        self.size = 0
        self.fork_watcher.close()

    def _adjust_step(self):
        # if there is a possibility & necessity for adding a thread, do it
        while (len(self._worker_greenlets) < self._maxsize and
               self.task_queue.unfinished_tasks > len(self._worker_greenlets)):
            self._add_thread()
        # while the number of threads is more than maxsize, kill one
        # we do not check what's already in task_queue - it could be all Nones
        while len(self._worker_greenlets
                  ) - self._maxsize > self.task_queue.unfinished_tasks:
            self.task_queue.put(None)
        if self._worker_greenlets:
            self.fork_watcher.start(self._on_fork)
        elif self.fork_watcher is not None:
            self.fork_watcher.stop()

    def _adjust_wait(self):
        delay = 0.0001
        while True:
            self._adjust_step()
            if len(self._worker_greenlets) <= self._maxsize:
                return
            sleep(delay)
            delay = min(delay * 2, .05)

    def adjust(self):
        self._adjust_step()
        if not self.manager and len(self._worker_greenlets) > self._maxsize:
            # might need to feed more Nones into the pool to shutdown
            # threads.
            self.manager = Greenlet.spawn(self._adjust_wait)

    def _add_thread(self):
        _WorkerGreenlet(self)

    def spawn(self, func, *args, **kwargs):
        """
        Add a new task to the threadpool that will run ``func(*args,
        **kwargs)``.

        Waits until a slot is available. Creates a new native thread
        if necessary.

        This must only be called from the native thread that owns this
        object's hub. This is because creating the necessary data
        structures to communicate back to this thread isn't thread
        safe, so the hub must not be running something else. Also,
        ensuring the pool size stays correct only works within a
        single thread.

        :return: A :class:`gevent.event.AsyncResult`.
        :raises InvalidThreadUseError: If called from a different thread.

        .. versionchanged:: 1.5
           Document the thread-safety requirements.
        """
        if self.hub != get_hub():
            raise InvalidThreadUseError

        while 1:
            semaphore = self._available_worker_threads_greenlet_sem
            semaphore.acquire()
            if semaphore is self._available_worker_threads_greenlet_sem:
                # If we were asked to change size or re-init we could have changed
                # semaphore objects.
                break

        # Returned; lets a greenlet in this thread wait
        # for the pool thread. Signaled when the async watcher
        # is fired from the pool thread back into this thread.
        result = AsyncResult()
        task_queue = self.task_queue
        # Encapsulates the async watcher the worker thread uses to
        # call back into this thread. Immediately allocates and starts the
        # async watcher in this thread, because it uses this hub/loop,
        # which is not thread safe.
        thread_result = None
        try:
            thread_result = ThreadResult(result, self.hub, semaphore.release)
            task_queue.put((func, args, kwargs, thread_result))
            self.adjust()
        except:
            if thread_result is not None:
                thread_result.destroy_in_main_thread()
            semaphore.release()
            raise
        return result

    def _apply_immediately(self):
        # If we're being called from a different thread than the one that
        # created us, e.g., because a worker task is trying to use apply()
        # recursively, we have no choice but to run the task immediately;
        # if we try to AsyncResult.get() in the worker thread, it's likely to have
        # nothing to switch to and lead to a LoopExit.
        return get_hub() is not self.hub

    def _apply_async_cb_spawn(self, callback, result):
        callback(result)

    def _apply_async_use_greenlet(self):
        # Always go to Greenlet because our self.spawn uses threads
        return True