コード例 #1
0
    def imap_memory_check(self, concurrency):
        # checks that imap is strictly
        # ordered and consumes a constant amount of memory
        p = greenpool.GreenPool(concurrency)
        count = 1000
        it = p.imap(passthru, six.moves.range(count))
        latest = -1
        while True:
            try:
                i = six.next(it)
            except StopIteration:
                break

            if latest == -1:
                gc.collect()
                initial_obj_count = len(gc.get_objects())
            self.assert_(i > latest)
            latest = i
            if latest % 5 == 0:
                eventlet.sleep(0.001)
            if latest % 10 == 0:
                gc.collect()
                objs_created = len(gc.get_objects()) - initial_obj_count
                self.assert_(objs_created < 25 * concurrency, objs_created)
        # make sure we got to the end
        self.assertEqual(latest, count - 1)
コード例 #2
0
    def spawn_order_check(self, concurrency):
        # checks that piles are strictly ordered
        p = greenpool.GreenPile(concurrency)

        def makework(count, unique):
            for i in six.moves.range(count):
                token = (unique, i)
                p.spawn(pressure, token)

        iters = 1000
        eventlet.spawn(makework, iters, 1)
        eventlet.spawn(makework, iters, 2)
        eventlet.spawn(makework, iters, 3)
        p.spawn(pressure, (0, 0))
        latest = [-1] * 4
        received = 0
        it = iter(p)
        while True:
            try:
                i = six.next(it)
            except StressException as exc:
                i = exc.args[0]
            except StopIteration:
                break
            received += 1
            if received % 5 == 0:
                eventlet.sleep(0.0001)
            unique, order = i
            self.assert_(latest[unique] < order)
            latest[unique] = order
        for l in latest[1:]:
            self.assertEqual(l, iters - 1)
コード例 #3
0
    def test_imap_raises(self):
        # testing the case where the function raises an exception;
        # both that the caller sees that exception, and that the iterator
        # continues to be usable to get the rest of the items
        p = greenpool.GreenPool(4)

        def raiser(item):
            if item == 1 or item == 7:
                raise RuntimeError("intentional error")
            else:
                return item

        it = p.imap(raiser, range(10))
        results = []
        while True:
            try:
                results.append(six.next(it))
            except RuntimeError:
                results.append('r')
            except StopIteration:
                break
        self.assertEqual(results, [0, 'r', 2, 3, 4, 5, 6, 'r', 8, 9])
コード例 #4
0
    def generate_results(self, function, iterable, qsize=None):
        """For each tuple (sequence) in *iterable*, launch ``function(*tuple)``
        in its own coroutine -- like ``itertools.starmap()``, but in parallel.
        Yield each of the values returned by ``function()``, in the order
        they're completed rather than the order the coroutines were launched.

        Iteration stops when we've yielded results for each arguments tuple in
        *iterable*. Unlike :meth:`wait_all` and :meth:`process_all`, this
        function does not wait for any previously-submitted :meth:`execute` or
        :meth:`execute_async` calls.

        Results are temporarily buffered in a queue. If you pass *qsize=*, this
        value is used to limit the max size of the queue: an attempt to buffer
        too many results will suspend the completed :class:`CoroutinePool`
        coroutine until the requesting coroutine (the caller of
        :meth:`generate_results`) has retrieved one or more results by calling
        this generator-iterator's ``next()``.

        If any coroutine raises an uncaught exception, that exception will
        propagate to the requesting coroutine via the corresponding ``next()``
        call.

        What I particularly want these tests to illustrate is that using this
        generator function::

            for result in generate_results(function, iterable):
                # ... do something with result ...
                pass

        executes coroutines at least as aggressively as the classic eventlet
        idiom::

            events = [pool.execute(function, *args) for args in iterable]
            for event in events:
                result = event.wait()
                # ... do something with result ...

        even without a distinct event object for every arg tuple in *iterable*,
        and despite the funny flow control from interleaving launches of new
        coroutines with yields of completed coroutines' results.

        (The use case that makes this function preferable to the classic idiom
        above is when the *iterable*, which may itself be a generator, produces
        millions of items.)

        >>> from eventlet import coros
        >>> from eventlet.support import six
        >>> import string
        >>> pool = coros.CoroutinePool(max_size=5)
        >>> pausers = [coros.Event() for x in range(2)]
        >>> def longtask(evt, desc):
        ...     print("%s woke up with %s" % (desc, evt.wait()))
        ...
        >>> pool.launch_all(longtask, zip(pausers, "AB"))
        >>> def quicktask(desc):
        ...     print("returning %s" % desc)
        ...     return desc
        ...

        (Instead of using a ``for`` loop, step through :meth:`generate_results`
        items individually to illustrate timing)

        >>> step = iter(pool.generate_results(quicktask, string.ascii_lowercase))
        >>> print(six.next(step))
        returning a
        returning b
        returning c
        a
        >>> print(six.next(step))
        b
        >>> print(six.next(step))
        c
        >>> print(six.next(step))
        returning d
        returning e
        returning f
        d
        >>> pausers[0].send("A")
        >>> print(six.next(step))
        e
        >>> print(six.next(step))
        f
        >>> print(six.next(step))
        A woke up with A
        returning g
        returning h
        returning i
        g
        >>> print("".join([six.next(step) for x in range(3)]))
        returning j
        returning k
        returning l
        returning m
        hij
        >>> pausers[1].send("B")
        >>> print("".join([six.next(step) for x in range(4)]))
        B woke up with B
        returning n
        returning o
        returning p
        returning q
        klmn
        """
        # Get an iterator because of our funny nested loop below. Wrap the
        # iterable in enumerate() so we count items that come through.
        tuples = iter(enumerate(iterable))
        # If the iterable is empty, this whole function is a no-op, and we can
        # save ourselves some grief by just quitting out. In particular, once
        # we enter the outer loop below, we're going to wait on the queue --
        # but if we launched no coroutines with that queue as the destination,
        # we could end up waiting a very long time.
        try:
            index, args = six.next(tuples)
        except StopIteration:
            return
        # From this point forward, 'args' is the current arguments tuple and
        # 'index+1' counts how many such tuples we've seen.
        # This implementation relies on the fact that _execute() accepts an
        # event-like object, and -- unless it's None -- the completed
        # coroutine calls send(result). We slyly pass a queue rather than an
        # event -- the same queue instance for all coroutines. This is why our
        # queue interface intentionally resembles the event interface.
        q = coros.queue(max_size=qsize)
        # How many results have we yielded so far?
        finished = 0
        # This first loop is only until we've launched all the coroutines. Its
        # complexity is because if iterable contains more args tuples than the
        # size of our pool, attempting to _execute() the (poolsize+1)th
        # coroutine would suspend until something completes and send()s its
        # result to our queue. But to keep down queue overhead and to maximize
        # responsiveness to our caller, we'd rather suspend on reading the
        # queue. So we stuff the pool as full as we can, then wait for
        # something to finish, then stuff more coroutines into the pool.
        try:
            while True:
                # Before each yield, start as many new coroutines as we can fit.
                # (The self.free() test isn't 100% accurate: if we happen to be
                # executing in one of the pool's coroutines, we could _execute()
                # without waiting even if self.free() reports 0. See _execute().)
                # The point is that we don't want to wait in the _execute() call,
                # we want to wait in the q.wait() call.
                # IMPORTANT: at start, and whenever we've caught up with all
                # coroutines we've launched so far, we MUST iterate this inner
                # loop at least once, regardless of self.free() -- otherwise the
                # q.wait() call below will deadlock!
                # Recall that index is the index of the NEXT args tuple that we
                # haven't yet launched. Therefore it counts how many args tuples
                # we've launched so far.
                while self.free() > 0 or finished == index:
                    # Just like the implementation of execute_async(), save that
                    # we're passing our queue instead of None as the "event" to
                    # which to send() the result.
                    self._execute(q, function, args, {})
                    # We've consumed that args tuple, advance to next.
                    index, args = six.next(tuples)
                # Okay, we've filled up the pool again, yield a result -- which
                # will probably wait for a coroutine to complete. Although we do
                # have q.ready(), so we could iterate without waiting, we avoid
                # that because every yield could involve considerable real time.
                # We don't know how long it takes to return from yield, so every
                # time we do, take the opportunity to stuff more requests into the
                # pool before yielding again.
                yield q.wait()
                # Be sure to count results so we know when to stop!
                finished += 1
        except StopIteration:
            pass
        # Here we've exhausted the input iterable. index+1 is the total number
        # of coroutines we've launched. We probably haven't yielded that many
        # results yet. Wait for the rest of the results, yielding them as they
        # arrive.
        while finished < index + 1:
            yield q.wait()
            finished += 1
コード例 #5
0
ファイル: pool.py プロジェクト: ByteInternet/eventlet
    def generate_results(self, function, iterable, qsize=None):
        """For each tuple (sequence) in *iterable*, launch ``function(*tuple)``
        in its own coroutine -- like ``itertools.starmap()``, but in parallel.
        Yield each of the values returned by ``function()``, in the order
        they're completed rather than the order the coroutines were launched.

        Iteration stops when we've yielded results for each arguments tuple in
        *iterable*. Unlike :meth:`wait_all` and :meth:`process_all`, this
        function does not wait for any previously-submitted :meth:`execute` or
        :meth:`execute_async` calls.

        Results are temporarily buffered in a queue. If you pass *qsize=*, this
        value is used to limit the max size of the queue: an attempt to buffer
        too many results will suspend the completed :class:`CoroutinePool`
        coroutine until the requesting coroutine (the caller of
        :meth:`generate_results`) has retrieved one or more results by calling
        this generator-iterator's ``next()``.

        If any coroutine raises an uncaught exception, that exception will
        propagate to the requesting coroutine via the corresponding ``next()``
        call.

        What I particularly want these tests to illustrate is that using this
        generator function::

            for result in generate_results(function, iterable):
                # ... do something with result ...
                pass

        executes coroutines at least as aggressively as the classic eventlet
        idiom::

            events = [pool.execute(function, *args) for args in iterable]
            for event in events:
                result = event.wait()
                # ... do something with result ...

        even without a distinct event object for every arg tuple in *iterable*,
        and despite the funny flow control from interleaving launches of new
        coroutines with yields of completed coroutines' results.

        (The use case that makes this function preferable to the classic idiom
        above is when the *iterable*, which may itself be a generator, produces
        millions of items.)

        >>> from eventlet import coros
        >>> from eventlet.support import six
        >>> import string
        >>> pool = coros.CoroutinePool(max_size=5)
        >>> pausers = [coros.Event() for x in range(2)]
        >>> def longtask(evt, desc):
        ...     print("%s woke up with %s" % (desc, evt.wait()))
        ...
        >>> pool.launch_all(longtask, zip(pausers, "AB"))
        >>> def quicktask(desc):
        ...     print("returning %s" % desc)
        ...     return desc
        ...

        (Instead of using a ``for`` loop, step through :meth:`generate_results`
        items individually to illustrate timing)

        >>> step = iter(pool.generate_results(quicktask, string.ascii_lowercase))
        >>> print(six.next(step))
        returning a
        returning b
        returning c
        a
        >>> print(six.next(step))
        b
        >>> print(six.next(step))
        c
        >>> print(six.next(step))
        returning d
        returning e
        returning f
        d
        >>> pausers[0].send("A")
        >>> print(six.next(step))
        e
        >>> print(six.next(step))
        f
        >>> print(six.next(step))
        A woke up with A
        returning g
        returning h
        returning i
        g
        >>> print("".join([six.next(step) for x in range(3)]))
        returning j
        returning k
        returning l
        returning m
        hij
        >>> pausers[1].send("B")
        >>> print("".join([six.next(step) for x in range(4)]))
        B woke up with B
        returning n
        returning o
        returning p
        returning q
        klmn
        """
        # Get an iterator because of our funny nested loop below. Wrap the
        # iterable in enumerate() so we count items that come through.
        tuples = iter(enumerate(iterable))
        # If the iterable is empty, this whole function is a no-op, and we can
        # save ourselves some grief by just quitting out. In particular, once
        # we enter the outer loop below, we're going to wait on the queue --
        # but if we launched no coroutines with that queue as the destination,
        # we could end up waiting a very long time.
        try:
            index, args = six.next(tuples)
        except StopIteration:
            return
        # From this point forward, 'args' is the current arguments tuple and
        # 'index+1' counts how many such tuples we've seen.
        # This implementation relies on the fact that _execute() accepts an
        # event-like object, and -- unless it's None -- the completed
        # coroutine calls send(result). We slyly pass a queue rather than an
        # event -- the same queue instance for all coroutines. This is why our
        # queue interface intentionally resembles the event interface.
        q = coros.queue(max_size=qsize)
        # How many results have we yielded so far?
        finished = 0
        # This first loop is only until we've launched all the coroutines. Its
        # complexity is because if iterable contains more args tuples than the
        # size of our pool, attempting to _execute() the (poolsize+1)th
        # coroutine would suspend until something completes and send()s its
        # result to our queue. But to keep down queue overhead and to maximize
        # responsiveness to our caller, we'd rather suspend on reading the
        # queue. So we stuff the pool as full as we can, then wait for
        # something to finish, then stuff more coroutines into the pool.
        try:
            while True:
                # Before each yield, start as many new coroutines as we can fit.
                # (The self.free() test isn't 100% accurate: if we happen to be
                # executing in one of the pool's coroutines, we could _execute()
                # without waiting even if self.free() reports 0. See _execute().)
                # The point is that we don't want to wait in the _execute() call,
                # we want to wait in the q.wait() call.
                # IMPORTANT: at start, and whenever we've caught up with all
                # coroutines we've launched so far, we MUST iterate this inner
                # loop at least once, regardless of self.free() -- otherwise the
                # q.wait() call below will deadlock!
                # Recall that index is the index of the NEXT args tuple that we
                # haven't yet launched. Therefore it counts how many args tuples
                # we've launched so far.
                while self.free() > 0 or finished == index:
                    # Just like the implementation of execute_async(), save that
                    # we're passing our queue instead of None as the "event" to
                    # which to send() the result.
                    self._execute(q, function, args, {})
                    # We've consumed that args tuple, advance to next.
                    index, args = six.next(tuples)
                # Okay, we've filled up the pool again, yield a result -- which
                # will probably wait for a coroutine to complete. Although we do
                # have q.ready(), so we could iterate without waiting, we avoid
                # that because every yield could involve considerable real time.
                # We don't know how long it takes to return from yield, so every
                # time we do, take the opportunity to stuff more requests into the
                # pool before yielding again.
                yield q.wait()
                # Be sure to count results so we know when to stop!
                finished += 1
        except StopIteration:
            pass
        # Here we've exhausted the input iterable. index+1 is the total number
        # of coroutines we've launched. We probably haven't yielded that many
        # results yet. Wait for the rest of the results, yielding them as they
        # arrive.
        while finished < index + 1:
            yield q.wait()
            finished += 1