コード例 #1
0
ファイル: pool.py プロジェクト: DigDug101/gevent
    def __init__(self,
                 func,
                 iterable,
                 spawn=None,
                 maxsize=None,
                 _zipped=False):
        """
        An iterator that.

        :keyword int maxsize: If given and not-None, specifies the maximum number of
            finished results that will be allowed to accumulated awaiting the reader;
            more than that number of results will cause map function greenlets to begin
            to block. This is most useful is there is a great disparity in the speed of
            the mapping code and the consumer and the results consume a great deal of resources.
            Using a bound is more computationally expensive than not using a bound.

        .. versionchanged:: 1.1b3
            Added the *maxsize* parameter.
        """
        from gevent.queue import Queue
        Greenlet.__init__(self)
        if spawn is not None:
            self.spawn = spawn
        if _zipped:
            self._zipped = _zipped
        self.func = func
        self.iterable = iterable
        self.queue = Queue()
        if maxsize:
            # Bounding the queue is not enough if we want to keep from
            # accumulating objects; the result value will be around as
            # the greenlet's result, blocked on self.queue.put(), and
            # we'll go on to spawn another greenlet, which in turn can
            # create the result. So we need a semaphore to prevent a
            # greenlet from exiting while the queue is full so that we
            # don't spawn the next greenlet (assuming that self.spawn
            # is of course bounded). (Alternatively we could have the
            # greenlet itself do the insert into the pool, but that
            # takes some rework).
            #
            # Given the use of a semaphore at this level, sizing the queue becomes
            # redundant, and that lets us avoid having to use self.link() instead
            # of self.rawlink() to avoid having blocking methods called in the
            # hub greenlet.
            self._result_semaphore = Semaphore(maxsize)
        else:
            self._result_semaphore = DummySemaphore()
        self.count = 0
        self.finished = False
        # If the queue size is unbounded, then we want to call all
        # the links (_on_finish and _on_result) directly in the hub greenlet
        # for efficiency. However, if the queue is bounded, we can't do that if
        # the queue might block (because if there's no waiter the hub can switch to,
        # the queue simply raises Full). Therefore, in that case, we use
        # the safer, somewhat-slower (because it spawns a greenlet) link() methods.
        # This means that _on_finish and _on_result can be called and interleaved in any order
        # if the call to self.queue.put() blocks..
        # Note that right now we're not bounding the queue, instead using a semaphore.
        self.rawlink(self._on_finish)
コード例 #2
0
ファイル: app.py プロジェクト: Montana/dirt
 def _get_call_semaphore(self, call):
     if call.name.startswith("debug."):  # XXX A bit of a hack
         return DummySemaphore()
     if self._call_semaphore is None:
         if self.max_concurrent_calls is None:
             semaphore = DummySemaphore()
         else:
             semaphore = BoundedSemaphore(self.max_concurrent_calls)
         self._call_semaphore = semaphore
     return self._call_semaphore
コード例 #3
0
ファイル: pool.py プロジェクト: dsuch/gevent
    def __init__(self, func, iterable, spawn=None, maxsize=None, _zipped=False):
        """
        An iterator that.

        :keyword int maxsize: If given and not-None, specifies the maximum number of
            finished results that will be allowed to accumulated awaiting the reader;
            more than that number of results will cause map function greenlets to begin
            to block. This is most useful is there is a great disparity in the speed of
            the mapping code and the consumer and the results consume a great deal of resources.
            Using a bound is more computationally expensive than not using a bound.

        .. versionchanged:: 1.1b3
            Added the *maxsize* parameter.
        """
        from gevent.queue import Queue
        Greenlet.__init__(self)
        if spawn is not None:
            self.spawn = spawn
        if _zipped:
            self._zipped = _zipped
        self.func = func
        self.iterable = iterable
        self.queue = Queue()
        if maxsize:
            # Bounding the queue is not enough if we want to keep from
            # accumulating objects; the result value will be around as
            # the greenlet's result, blocked on self.queue.put(), and
            # we'll go on to spawn another greenlet, which in turn can
            # create the result. So we need a semaphore to prevent a
            # greenlet from exiting while the queue is full so that we
            # don't spawn the next greenlet (assuming that self.spawn
            # is of course bounded). (Alternatively we could have the
            # greenlet itself do the insert into the pool, but that
            # takes some rework).
            #
            # Given the use of a semaphore at this level, sizing the queue becomes
            # redundant, and that lets us avoid having to use self.link() instead
            # of self.rawlink() to avoid having blocking methods called in the
            # hub greenlet.
            self._result_semaphore = Semaphore(maxsize)
        else:
            self._result_semaphore = DummySemaphore()
        self.count = 0
        self.finished = False
        # If the queue size is unbounded, then we want to call all
        # the links (_on_finish and _on_result) directly in the hub greenlet
        # for efficiency. However, if the queue is bounded, we can't do that if
        # the queue might block (because if there's no waiter the hub can switch to,
        # the queue simply raises Full). Therefore, in that case, we use
        # the safer, somewhat-slower (because it spawns a greenlet) link() methods.
        # This means that _on_finish and _on_result can be called and interleaved in any order
        # if the call to self.queue.put() blocks..
        # Note that right now we're not bounding the queue, instead using a semaphore.
        self.rawlink(self._on_finish)
コード例 #4
0
ファイル: _fileobjectcommon.py プロジェクト: sidawater/gevent
    def __init__(self, *args, **kwargs):
        """
        :keyword bool lock: If True (the default) then all operations will
           be performed one-by-one. Note that this does not guarantee that, if using
           this file object from multiple threads/greenlets, operations will be performed
           in any particular order, only that no two operations will be attempted at the
           same time. You can also pass your own :class:`gevent.lock.Semaphore` to synchronize
           file operations with an external resource.
        :keyword bool closefd: If True (the default) then when this object is closed,
           the underlying object is closed as well. If *fobj* is a path, then
           *closefd* must be True.
        """
        lock = kwargs.pop('lock', True)
        threadpool = kwargs.pop('threadpool', None)
        descriptor = OpenDescriptor(*args, **kwargs)

        self.threadpool = threadpool or get_hub().threadpool
        self.lock = lock
        if self.lock is True:
            self.lock = Semaphore()
        elif not self.lock:
            self.lock = DummySemaphore()
        if not hasattr(self.lock, '__enter__'):
            raise TypeError('Expected a Semaphore or boolean, got %r' %
                            type(self.lock))

        self.__io_holder = [descriptor.open()]  # signal for _wrap_method
        super(FileObjectThread, self).__init__(self.__io_holder[0],
                                               descriptor.closefd)
コード例 #5
0
    def __init__(self, fobj, mode=None, bufsize=-1, close=True, threadpool=None, lock=True):
        """
        :param fobj: The underlying file-like object to wrap, or an integer fileno
           that will be pass to :func:`os.fdopen` along with *mode* and *bufsize*.
        :keyword bool lock: If True (the default) then all operations will
           be performed one-by-one. Note that this does not guarantee that, if using
           this file object from multiple threads/greenlets, operations will be performed
           in any particular order, only that no two operations will be attempted at the
           same time. You can also pass your own :class:`gevent.lock.Semaphore` to synchronize
           file operations with an external resource.
        :keyword bool close: If True (the default) then when this object is closed,
           the underlying object is closed as well.
        """
        closefd = close
        self.threadpool = threadpool or get_hub().threadpool
        self.lock = lock
        if self.lock is True:
            self.lock = Semaphore()
        elif not self.lock:
            self.lock = DummySemaphore()
        if not hasattr(self.lock, '__enter__'):
            raise TypeError('Expected a Semaphore or boolean, got %r' % type(self.lock))
        if isinstance(fobj, integer_types):
            if not closefd:
                # we cannot do this, since fdopen object will close the descriptor
                raise TypeError('FileObjectThread does not support close=False on an fd.')
            if mode is None:
                assert bufsize == -1, "If you use the default mode, you can't choose a bufsize"
                fobj = os.fdopen(fobj)
            else:
                fobj = os.fdopen(fobj, mode, bufsize)

        self.__io_holder = [fobj] # signal for _wrap_method
        super(FileObjectThread, self).__init__(fobj, closefd)
コード例 #6
0
ファイル: pool.py プロジェクト: DigDug101/gevent
    def __init__(self, size=None, greenlet_class=None):
        """
        Create a new pool.

        A pool is like a group, but the maximum number of members
        is governed by the *size* parameter.

        :keyword int size: If given, this non-negative integer is the
            maximum count of active greenlets that will be allowed in
            this pool. A few values have special significance:

            * ``None`` (the default) places no limit on the number of
              greenlets. This is useful when you need to track, but not limit,
              greenlets, as with :class:`gevent.pywsgi.WSGIServer`
            * ``0`` creates a pool that can never have any active greenlets. Attempting
              to spawn in this pool will block forever. This is only useful
              if an application uses :meth:`wait_available` with a timeout and checks
              :meth:`free_count` before attempting to spawn.
        """
        if size is not None and size < 0:
            raise ValueError('size must not be negative: %r' % (size, ))
        Group.__init__(self)
        self.size = size
        if greenlet_class is not None:
            self.greenlet_class = greenlet_class
        if size is None:
            self._semaphore = DummySemaphore()
        else:
            self._semaphore = Semaphore(size)
コード例 #7
0
 def __init__(self, fobj, *args, **kwargs):
     """
     :param fobj: The underlying file-like object to wrap, or an integer fileno
        that will be pass to :func:`os.fdopen` along with everything in *args*.
     :keyword bool lock: If True (the default) then all operations will
        be performed one-by-one. Note that this does not guarantee that, if using
        this file object from multiple threads/greenlets, operations will be performed
        in any particular order, only that no two operations will be attempted at the
        same time. You can also pass your own :class:`gevent.lock.Semaphore` to synchronize
        file operations with an external resource.
     :keyword bool close: If True (the default) then when this object is closed,
        the underlying object is closed as well.
     """
     self._close = kwargs.pop('close', True)
     self.threadpool = kwargs.pop('threadpool', None)
     self.lock = kwargs.pop('lock', True)
     if kwargs:
         raise TypeError('Unexpected arguments: %r' % kwargs.keys())
     if self.lock is True:
         self.lock = Semaphore()
     elif not self.lock:
         self.lock = DummySemaphore()
     if not hasattr(self.lock, '__enter__'):
         raise TypeError('Expected a Semaphore or boolean, got %r' %
                         type(self.lock))
     if isinstance(fobj, integer_types):
         if not self._close:
             # we cannot do this, since fdopen object will close the descriptor
             raise TypeError(
                 'FileObjectThread does not support close=False')
         fobj = os.fdopen(fobj, *args)
     self.io = fobj
     if self.threadpool is None:
         self.threadpool = get_hub().threadpool
コード例 #8
0
def configure_socket_lock(max_connections=0):
    global _socket_lock
    if _socket_lock is not None:
        raise RuntimeError("socket_lock already configured!")
    if max_connections < 1:
        _socket_lock = DummySemaphore()
    else:
        _socket_lock = BoundedSemaphore(max_connections)
コード例 #9
0
ファイル: pool.py プロジェクト: op/pkg-python-gevent
 def __init__(self, size=None, greenlet_class=None):
     if size is not None and size < 0:
         raise ValueError('size must not be negative: %r' % (size, ))
     Group.__init__(self)
     self.size = size
     if greenlet_class is not None:
         self.greenlet_class = greenlet_class
     if size is None:
         self._semaphore = DummySemaphore()
     else:
         self._semaphore = Semaphore(size)
コード例 #10
0
 def __init__(self, fobj, *args, **kwargs):
     self._close = kwargs.pop('close', True)
     self.threadpool = kwargs.pop('threadpool', None)
     self.lock = kwargs.pop('lock', True)
     if kwargs:
         raise TypeError('Unexpected arguments: %r' % kwargs.keys())
     if self.lock is True:
         self.lock = Semaphore()
     elif not self.lock:
         self.lock = DummySemaphore()
     if not hasattr(self.lock, '__enter__'):
         raise TypeError('Expected a Semaphore or boolean, got %r' % type(self.lock))
     if isinstance(fobj, (int, long)):
         if not self._close:
             # we cannot do this, since fdopen object will close the descriptor
             raise TypeError('FileObjectThread does not support close=False')
         fobj = os.fdopen(fobj, *args)
     self._fobj = fobj
     if self.threadpool is None:
         self.threadpool = get_hub().threadpool
コード例 #11
0
ファイル: pool.py プロジェクト: dsuch/gevent
class IMapUnordered(Greenlet):
    """
    At iterator of map results.
    """

    _zipped = False

    def __init__(self, func, iterable, spawn=None, maxsize=None, _zipped=False):
        """
        An iterator that.

        :keyword int maxsize: If given and not-None, specifies the maximum number of
            finished results that will be allowed to accumulated awaiting the reader;
            more than that number of results will cause map function greenlets to begin
            to block. This is most useful is there is a great disparity in the speed of
            the mapping code and the consumer and the results consume a great deal of resources.
            Using a bound is more computationally expensive than not using a bound.

        .. versionchanged:: 1.1b3
            Added the *maxsize* parameter.
        """
        from gevent.queue import Queue
        Greenlet.__init__(self)
        if spawn is not None:
            self.spawn = spawn
        if _zipped:
            self._zipped = _zipped
        self.func = func
        self.iterable = iterable
        self.queue = Queue()
        if maxsize:
            # Bounding the queue is not enough if we want to keep from
            # accumulating objects; the result value will be around as
            # the greenlet's result, blocked on self.queue.put(), and
            # we'll go on to spawn another greenlet, which in turn can
            # create the result. So we need a semaphore to prevent a
            # greenlet from exiting while the queue is full so that we
            # don't spawn the next greenlet (assuming that self.spawn
            # is of course bounded). (Alternatively we could have the
            # greenlet itself do the insert into the pool, but that
            # takes some rework).
            #
            # Given the use of a semaphore at this level, sizing the queue becomes
            # redundant, and that lets us avoid having to use self.link() instead
            # of self.rawlink() to avoid having blocking methods called in the
            # hub greenlet.
            self._result_semaphore = Semaphore(maxsize)
        else:
            self._result_semaphore = DummySemaphore()
        self.count = 0
        self.finished = False
        # If the queue size is unbounded, then we want to call all
        # the links (_on_finish and _on_result) directly in the hub greenlet
        # for efficiency. However, if the queue is bounded, we can't do that if
        # the queue might block (because if there's no waiter the hub can switch to,
        # the queue simply raises Full). Therefore, in that case, we use
        # the safer, somewhat-slower (because it spawns a greenlet) link() methods.
        # This means that _on_finish and _on_result can be called and interleaved in any order
        # if the call to self.queue.put() blocks..
        # Note that right now we're not bounding the queue, instead using a semaphore.
        self.rawlink(self._on_finish)

    def __iter__(self):
        return self

    def next(self):
        self._result_semaphore.release()
        value = self._inext()
        if isinstance(value, Failure):
            raise value.exc
        return value
    __next__ = next

    def _inext(self):
        return self.queue.get()

    def _ispawn(self, func, item):
        self._result_semaphore.acquire()
        self.count += 1
        g = self.spawn(func, item) if not self._zipped else self.spawn(func, *item)
        g.rawlink(self._on_result)
        return g

    def _run(self):
        try:
            func = self.func
            for item in self.iterable:
                self._ispawn(func, item)
        finally:
            self.__dict__.pop('spawn', None)
            self.__dict__.pop('func', None)
            self.__dict__.pop('iterable', None)

    def _on_result(self, greenlet):
        # This method can either be called in the hub greenlet (if the
        # queue is unbounded) or its own greenlet. If it's called in
        # its own greenlet, the calls to put() may block and switch
        # greenlets, which in turn could mutate our state. So any
        # state on this object that we need to look at, notably
        # self.count, we need to capture or mutate *before* we put.
        # (Note that right now we're not bounding the queue, but we may
        # choose to do so in the future so this implementation will be left in case.)
        self.count -= 1
        count = self.count
        finished = self.finished
        ready = self.ready()
        put_finished = False

        if ready and count <= 0 and not finished:
            finished = self.finished = True
            put_finished = True

        if greenlet.successful():
            self.queue.put(self._iqueue_value_for_success(greenlet))
        else:
            self.queue.put(self._iqueue_value_for_failure(greenlet))

        if put_finished:
            self.queue.put(self._iqueue_value_for_finished())

    def _on_finish(self, _self):
        if self.finished:
            return

        if not self.successful():
            self.finished = True
            self.queue.put(self._iqueue_value_for_self_failure())
            return

        if self.count <= 0:
            self.finished = True
            self.queue.put(self._iqueue_value_for_finished())

    def _iqueue_value_for_success(self, greenlet):
        return greenlet.value

    def _iqueue_value_for_failure(self, greenlet):
        return Failure(greenlet.exception, getattr(greenlet, '_raise_exception'))

    def _iqueue_value_for_finished(self):
        return Failure(StopIteration)

    def _iqueue_value_for_self_failure(self):
        return Failure(self.exception, self._raise_exception)
コード例 #12
0
ファイル: pool.py プロジェクト: DigDug101/gevent
class IMapUnordered(Greenlet):
    """
    At iterator of map results.
    """

    _zipped = False

    def __init__(self,
                 func,
                 iterable,
                 spawn=None,
                 maxsize=None,
                 _zipped=False):
        """
        An iterator that.

        :keyword int maxsize: If given and not-None, specifies the maximum number of
            finished results that will be allowed to accumulated awaiting the reader;
            more than that number of results will cause map function greenlets to begin
            to block. This is most useful is there is a great disparity in the speed of
            the mapping code and the consumer and the results consume a great deal of resources.
            Using a bound is more computationally expensive than not using a bound.

        .. versionchanged:: 1.1b3
            Added the *maxsize* parameter.
        """
        from gevent.queue import Queue
        Greenlet.__init__(self)
        if spawn is not None:
            self.spawn = spawn
        if _zipped:
            self._zipped = _zipped
        self.func = func
        self.iterable = iterable
        self.queue = Queue()
        if maxsize:
            # Bounding the queue is not enough if we want to keep from
            # accumulating objects; the result value will be around as
            # the greenlet's result, blocked on self.queue.put(), and
            # we'll go on to spawn another greenlet, which in turn can
            # create the result. So we need a semaphore to prevent a
            # greenlet from exiting while the queue is full so that we
            # don't spawn the next greenlet (assuming that self.spawn
            # is of course bounded). (Alternatively we could have the
            # greenlet itself do the insert into the pool, but that
            # takes some rework).
            #
            # Given the use of a semaphore at this level, sizing the queue becomes
            # redundant, and that lets us avoid having to use self.link() instead
            # of self.rawlink() to avoid having blocking methods called in the
            # hub greenlet.
            self._result_semaphore = Semaphore(maxsize)
        else:
            self._result_semaphore = DummySemaphore()
        self.count = 0
        self.finished = False
        # If the queue size is unbounded, then we want to call all
        # the links (_on_finish and _on_result) directly in the hub greenlet
        # for efficiency. However, if the queue is bounded, we can't do that if
        # the queue might block (because if there's no waiter the hub can switch to,
        # the queue simply raises Full). Therefore, in that case, we use
        # the safer, somewhat-slower (because it spawns a greenlet) link() methods.
        # This means that _on_finish and _on_result can be called and interleaved in any order
        # if the call to self.queue.put() blocks..
        # Note that right now we're not bounding the queue, instead using a semaphore.
        self.rawlink(self._on_finish)

    def __iter__(self):
        return self

    def next(self):
        self._result_semaphore.release()
        value = self._inext()
        if isinstance(value, Failure):
            raise value.exc
        return value

    __next__ = next

    def _inext(self):
        return self.queue.get()

    def _ispawn(self, func, item):
        self._result_semaphore.acquire()
        self.count += 1
        g = self.spawn(func, item) if not self._zipped else self.spawn(
            func, *item)
        g.rawlink(self._on_result)
        return g

    def _run(self):
        try:
            func = self.func
            for item in self.iterable:
                self._ispawn(func, item)
        finally:
            self.__dict__.pop('spawn', None)
            self.__dict__.pop('func', None)
            self.__dict__.pop('iterable', None)

    def _on_result(self, greenlet):
        # This method can either be called in the hub greenlet (if the
        # queue is unbounded) or its own greenlet. If it's called in
        # its own greenlet, the calls to put() may block and switch
        # greenlets, which in turn could mutate our state. So any
        # state on this object that we need to look at, notably
        # self.count, we need to capture or mutate *before* we put.
        # (Note that right now we're not bounding the queue, but we may
        # choose to do so in the future so this implementation will be left in case.)
        self.count -= 1
        count = self.count
        finished = self.finished
        ready = self.ready()
        put_finished = False

        if ready and count <= 0 and not finished:
            finished = self.finished = True
            put_finished = True

        if greenlet.successful():
            self.queue.put(self._iqueue_value_for_success(greenlet))
        else:
            self.queue.put(self._iqueue_value_for_failure(greenlet))

        if put_finished:
            self.queue.put(self._iqueue_value_for_finished())

    def _on_finish(self, _self):
        if self.finished:
            return

        if not self.successful():
            self.finished = True
            self.queue.put(self._iqueue_value_for_self_failure())
            return

        if self.count <= 0:
            self.finished = True
            self.queue.put(self._iqueue_value_for_finished())

    def _iqueue_value_for_success(self, greenlet):
        return greenlet.value

    def _iqueue_value_for_failure(self, greenlet):
        return Failure(greenlet.exception, getattr(greenlet,
                                                   '_raise_exception'))

    def _iqueue_value_for_finished(self):
        return Failure(StopIteration)

    def _iqueue_value_for_self_failure(self):
        return Failure(self.exception, self._raise_exception)