Exemple #1
0
 def _get_keyset_for_wait_each(self, keys):
     """
     wait_each(), wait_each_success() and wait_each_exception() promise
     that if you pass an iterable of keys, the method will wait for results
     from those keys -- but if you omit the keys argument, the method will
     wait for results from all known keys. This helper implements that
     distinction, returning a set() of the relevant keys.
     """
     if keys is not _MISSING:
         return set(keys)
     else:
         # keys arg omitted -- use all the keys we know about
         return set(six.iterkeys(self.coros)) | set(six.iterkeys(self.values))
Exemple #2
0
 def _get_keyset_for_wait_each(self, keys):
     """
     wait_each(), wait_each_success() and wait_each_exception() promise
     that if you pass an iterable of keys, the method will wait for results
     from those keys -- but if you omit the keys argument, the method will
     wait for results from all known keys. This helper implements that
     distinction, returning a set() of the relevant keys.
     """
     if keys is not _MISSING:
         return set(keys)
     else:
         # keys arg omitted -- use all the keys we know about
         return set(six.iterkeys(self.coros)) | set(
             six.iterkeys(self.values))
Exemple #3
0
 def keys(self):
     """
     Return a snapshot tuple of keys for which we currently have values.
     """
     # Explicitly return a copy rather than an iterator: don't assume our
     # caller will finish iterating before new values are posted.
     return tuple(six.iterkeys(self.values))
Exemple #4
0
 def keys(self):
     """
     Return a snapshot tuple of keys for which we currently have values.
     """
     # Explicitly return a copy rather than an iterator: don't assume our
     # caller will finish iterating before new values are posted.
     return tuple(six.iterkeys(self.values))
Exemple #5
0
def test_wait_each_all():
    # set up a simple linear dependency chain
    deps = dict(b="a", c="b", d="c", e="d")
    capture = Capture()
    pool = DAGPool([("a", "a")])
    # capture a different Event for each key
    events = dict((key, eventlet.event.Event()) for key in six.iterkeys(deps))
    # can't use spawn_many() because we need a different event for each
    for key, dep in six.iteritems(deps):
        pool.spawn(key, dep, observe, capture, events[key])
    keys = "abcde"                      # this specific order
    each = iter(pool.wait_each())
    for pos in range(len(keys)):
        # next value from wait_each()
        k, v = next(each)
        assert_equals(k, keys[pos])
        # advance every pool greenlet as far as it can go
        spin()
        # everything from keys[:pos+1] should have a value by now
        for k in keys[:pos + 1]:
            assert pool.get(k, _notthere) is not _notthere, \
                "greenlet {0} did not yet produce a value".format(k)
        # everything from keys[pos+1:] should not yet
        for k in keys[pos + 1:]:
            assert pool.get(k, _notthere) is _notthere, \
                "wait_each() delayed value for {0}".format(keys[pos])
        # let next greenthread complete
        if pos < len(keys) - 1:
            k = keys[pos + 1]
            events[k].send(k)
Exemple #6
0
def test_wait_each_all():
    # set up a simple linear dependency chain
    deps = dict(b="a", c="b", d="c", e="d")
    capture = Capture()
    pool = DAGPool([("a", "a")])
    # capture a different Event for each key
    events = dict((key, eventlet.event.Event()) for key in six.iterkeys(deps))
    # can't use spawn_many() because we need a different event for each
    for key, dep in six.iteritems(deps):
        pool.spawn(key, dep, observe, capture, events[key])
    keys = "abcde"  # this specific order
    each = iter(pool.wait_each())
    for pos in range(len(keys)):
        # next value from wait_each()
        k, v = next(each)
        assert_equals(k, keys[pos])
        # advance every pool greenlet as far as it can go
        spin()
        # everything from keys[:pos+1] should have a value by now
        for k in keys[:pos + 1]:
            assert pool.get(k, _notthere) is not _notthere, \
                "greenlet {0} did not yet produce a value".format(k)
        # everything from keys[pos+1:] should not yet
        for k in keys[pos + 1:]:
            assert pool.get(k, _notthere) is _notthere, \
                "wait_each() delayed value for {0}".format(keys[pos])
        # let next greenthread complete
        if pos < len(keys) - 1:
            k = keys[pos + 1]
            events[k].send(k)
Exemple #7
0
 def running_keys(self):
     """
     Return keys for running DAGPool greenthreads. This includes
     greenthreads blocked while iterating through their *results* iterable,
     that is, greenthreads waiting on values from other keys.
     """
     # return snapshot; don't assume caller will finish iterating before we
     # next modify self.coros
     return tuple(six.iterkeys(self.coros))
Exemple #8
0
 def running_keys(self):
     """
     Return keys for running DAGPool greenthreads. This includes
     greenthreads blocked while iterating through their *results* iterable,
     that is, greenthreads waiting on values from other keys.
     """
     # return snapshot; don't assume caller will finish iterating before we
     # next modify self.coros
     return tuple(six.iterkeys(self.coros))
Exemple #9
0
    def waiting_for(self, key=_MISSING):
        """
        waiting_for(key) returns a set() of the keys for which the DAGPool
        greenthread spawned with that *key* is still waiting. If you pass a
        *key* for which no greenthread was spawned, waiting_for() raises
        KeyError.

        waiting_for() without argument returns a dict. Its keys are the keys
        of DAGPool greenthreads still waiting on one or more values. In the
        returned dict, the value of each such key is the set of other keys for
        which that greenthread is still waiting.

        This method allows diagnosing a "hung" DAGPool. If certain
        greenthreads are making no progress, it's possible that they are
        waiting on keys for which there is no greenthread and no :meth:`post` data.
        """
        # We may have greenthreads whose 'pending' entry indicates they're
        # waiting on some keys even though values have now been posted for
        # some or all of those keys, because those greenthreads have not yet
        # regained control since values were posted. So make a point of
        # excluding values that are now available.
        available = set(six.iterkeys(self.values))

        if key is not _MISSING:
            # waiting_for(key) is semantically different than waiting_for().
            # It's just that they both seem to want the same method name.
            coro = self.coros.get(key, _MISSING)
            if coro is _MISSING:
                # Hmm, no running greenthread with this key. But was there
                # EVER a greenthread with this key? If not, let KeyError
                # propagate.
                self.values[key]
                # Oh good, there's a value for this key. Either the
                # greenthread finished, or somebody posted a value. Just say
                # the greenthread isn't waiting for anything.
                return set()
            else:
                # coro is the _Coro for the running greenthread with the
                # specified key.
                return coro.pending - available

        # This is a waiting_for() call, i.e. a general query rather than for a
        # specific key.

        # Start by iterating over (key, coro) pairs in self.coros. Generate
        # (key, pending) pairs in which 'pending' is the set of keys on which
        # the greenthread believes it's waiting, minus the set of keys that
        # are now available. Filter out any pair in which 'pending' is empty,
        # that is, that greenthread will be unblocked next time it resumes.
        # Make a dict from those pairs.
        return dict(
            (key, pending)
            for key, pending in ((key, (coro.pending - available))
                                 for key, coro in six.iteritems(self.coros))
            if pending)
Exemple #10
0
    def waiting_for(self, key=_MISSING):
        """
        waiting_for(key) returns a set() of the keys for which the DAGPool
        greenthread spawned with that *key* is still waiting. If you pass a
        *key* for which no greenthread was spawned, waiting_for() raises
        KeyError.

        waiting_for() without argument returns a dict. Its keys are the keys
        of DAGPool greenthreads still waiting on one or more values. In the
        returned dict, the value of each such key is the set of other keys for
        which that greenthread is still waiting.

        This method allows diagnosing a "hung" DAGPool. If certain
        greenthreads are making no progress, it's possible that they are
        waiting on keys for which there is no greenthread and no :meth:`post` data.
        """
        # We may have greenthreads whose 'pending' entry indicates they're
        # waiting on some keys even though values have now been posted for
        # some or all of those keys, because those greenthreads have not yet
        # regained control since values were posted. So make a point of
        # excluding values that are now available.
        available = set(six.iterkeys(self.values))

        if key is not _MISSING:
            # waiting_for(key) is semantically different than waiting_for().
            # It's just that they both seem to want the same method name.
            coro = self.coros.get(key, _MISSING)
            if coro is _MISSING:
                # Hmm, no running greenthread with this key. But was there
                # EVER a greenthread with this key? If not, let KeyError
                # propagate.
                self.values[key]
                # Oh good, there's a value for this key. Either the
                # greenthread finished, or somebody posted a value. Just say
                # the greenthread isn't waiting for anything.
                return set()
            else:
                # coro is the _Coro for the running greenthread with the
                # specified key.
                return coro.pending - available

        # This is a waiting_for() call, i.e. a general query rather than for a
        # specific key.

        # Start by iterating over (key, coro) pairs in self.coros. Generate
        # (key, pending) pairs in which 'pending' is the set of keys on which
        # the greenthread believes it's waiting, minus the set of keys that
        # are now available. Filter out any pair in which 'pending' is empty,
        # that is, that greenthread will be unblocked next time it resumes.
        # Make a dict from those pairs.
        return dict((key, pending)
                    for key, pending in ((key, (coro.pending - available))
                                         for key, coro in six.iteritems(self.coros))
                    if pending)
Exemple #11
0
def monkey_patch(**on):
    """Globally patches certain system modules to be greenthread-friendly.

    The keyword arguments afford some control over which modules are patched.
    If no keyword arguments are supplied, all possible modules are patched.
    If keywords are set to True, only the specified modules are patched.  E.g.,
    ``monkey_patch(socket=True, select=True)`` patches only the select and
    socket modules.  Most arguments patch the single module of the same name
    (os, time, select).  The exceptions are socket, which also patches the ssl
    module if present; and thread, which patches thread, threading, and Queue.

    It's safe to call monkey_patch multiple times.
    """
    accepted_args = set(('os', 'select', 'socket', 'thread', 'time', 'psycopg',
                         'MySQLdb', 'builtins'))
    # To make sure only one of them is passed here
    assert not ('__builtin__' in on and 'builtins' in on)
    try:
        b = on.pop('__builtin__')
    except KeyError:
        pass
    else:
        on['builtins'] = b

    default_on = on.pop("all", None)

    for k in six.iterkeys(on):
        if k not in accepted_args:
            raise TypeError("monkey_patch() got an unexpected "
                            "keyword argument %r" % k)
    if default_on is None:
        default_on = not (True in on.values())
    for modname in accepted_args:
        if modname == 'MySQLdb':
            # MySQLdb is only on when explicitly patched for the moment
            on.setdefault(modname, False)
        if modname == 'builtins':
            on.setdefault(modname, False)
        on.setdefault(modname, default_on)

    modules_to_patch = []
    for name, modules_function in [
        ('os', _green_os_modules),
        ('select', _green_select_modules),
        ('socket', _green_socket_modules),
        ('thread', _green_thread_modules),
        ('time', _green_time_modules),
        ('MySQLdb', _green_MySQLdb),
        ('builtins', _green_builtins),
    ]:
        if on[name] and not already_patched.get(name):
            modules_to_patch += modules_function()
            already_patched[name] = True

    if on['psycopg'] and not already_patched.get('psycopg'):
        try:
            from eventlet.support import psycopg2_patcher
            psycopg2_patcher.make_psycopg_green()
            already_patched['psycopg'] = True
        except ImportError:
            # note that if we get an importerror from trying to
            # monkeypatch psycopg, we will continually retry it
            # whenever monkey_patch is called; this should not be a
            # performance problem but it allows is_monkey_patched to
            # tell us whether or not we succeeded
            pass

    imp.acquire_lock()
    try:
        for name, mod in modules_to_patch:
            orig_mod = sys.modules.get(name)
            if orig_mod is None:
                orig_mod = __import__(name)
            for attr_name in mod.__patched__:
                patched_attr = getattr(mod, attr_name, None)
                if patched_attr is not None:
                    setattr(orig_mod, attr_name, patched_attr)
            deleted = getattr(mod, '__deleted__', [])
            for attr_name in deleted:
                if hasattr(orig_mod, attr_name):
                    delattr(orig_mod, attr_name)
    finally:
        imp.release_lock()

    if sys.version_info >= (3, 3):
        import importlib._bootstrap
        thread = original('_thread')
        # importlib must use real thread locks, not eventlet.Semaphore
        importlib._bootstrap._thread = thread

        # Issue #185: Since Python 3.3, threading.RLock is implemented in C and
        # so call a C function to get the thread identifier, instead of calling
        # threading.get_ident(). Force the Python implementation of RLock which
        # calls threading.get_ident() and so is compatible with eventlet.
        import threading
        threading.RLock = threading._PyRLock
Exemple #12
0
def monkey_patch(**on):
    """Globally patches certain system modules to be greenthread-friendly.

    The keyword arguments afford some control over which modules are patched.
    If no keyword arguments are supplied, all possible modules are patched.
    If keywords are set to True, only the specified modules are patched.  E.g.,
    ``monkey_patch(socket=True, select=True)`` patches only the select and
    socket modules.  Most arguments patch the single module of the same name
    (os, time, select).  The exceptions are socket, which also patches the ssl
    module if present; and thread, which patches thread, threading, and Queue.

    It's safe to call monkey_patch multiple times.
    """
    accepted_args = set(('os', 'select', 'socket',
                         'thread', 'time', 'psycopg', 'MySQLdb', '__builtin__'))
    default_on = on.pop("all", None)
    for k in six.iterkeys(on):
        if k not in accepted_args:
            raise TypeError("monkey_patch() got an unexpected "
                            "keyword argument %r" % k)
    if default_on is None:
        default_on = not (True in on.values())
    for modname in accepted_args:
        if modname == 'MySQLdb':
            # MySQLdb is only on when explicitly patched for the moment
            on.setdefault(modname, False)
        if modname == '__builtin__':
            on.setdefault(modname, False)
        on.setdefault(modname, default_on)

    modules_to_patch = []
    if on['os'] and not already_patched.get('os'):
        modules_to_patch += _green_os_modules()
        already_patched['os'] = True
    if on['select'] and not already_patched.get('select'):
        modules_to_patch += _green_select_modules()
        already_patched['select'] = True
    if on['socket'] and not already_patched.get('socket'):
        modules_to_patch += _green_socket_modules()
        already_patched['socket'] = True
    if on['thread'] and not already_patched.get('thread'):
        modules_to_patch += _green_thread_modules()
        already_patched['thread'] = True
    if on['time'] and not already_patched.get('time'):
        modules_to_patch += _green_time_modules()
        already_patched['time'] = True
    if on.get('MySQLdb') and not already_patched.get('MySQLdb'):
        modules_to_patch += _green_MySQLdb()
        already_patched['MySQLdb'] = True
    if on.get('__builtin__') and not already_patched.get('__builtin__'):
        modules_to_patch += _green_builtins()
        already_patched['__builtin__'] = True
    if on['psycopg'] and not already_patched.get('psycopg'):
        try:
            from eventlet.support import psycopg2_patcher
            psycopg2_patcher.make_psycopg_green()
            already_patched['psycopg'] = True
        except ImportError:
            # note that if we get an importerror from trying to
            # monkeypatch psycopg, we will continually retry it
            # whenever monkey_patch is called; this should not be a
            # performance problem but it allows is_monkey_patched to
            # tell us whether or not we succeeded
            pass

    imp.acquire_lock()
    try:
        for name, mod in modules_to_patch:
            orig_mod = sys.modules.get(name)
            if orig_mod is None:
                orig_mod = __import__(name)
            for attr_name in mod.__patched__:
                patched_attr = getattr(mod, attr_name, None)
                if patched_attr is not None:
                    setattr(orig_mod, attr_name, patched_attr)
    finally:
        imp.release_lock()
Exemple #13
0
def monkey_patch(**on):
    """Globally patches certain system modules to be greenthread-friendly.

    The keyword arguments afford some control over which modules are patched.
    If no keyword arguments are supplied, all possible modules are patched.
    If keywords are set to True, only the specified modules are patched.  E.g.,
    ``monkey_patch(socket=True, select=True)`` patches only the select and
    socket modules.  Most arguments patch the single module of the same name
    (os, time, select).  The exceptions are socket, which also patches the ssl
    module if present; and thread, which patches thread, threading, and Queue.

    It's safe to call monkey_patch multiple times.
    """
    accepted_args = set(('os', 'select', 'socket',
                         'thread', 'time', 'psycopg', 'MySQLdb',
                         'builtins'))
    # To make sure only one of them is passed here
    assert not ('__builtin__' in on and 'builtins' in on)
    try:
        b = on.pop('__builtin__')
    except KeyError:
        pass
    else:
        on['builtins'] = b

    default_on = on.pop("all", None)

    for k in six.iterkeys(on):
        if k not in accepted_args:
            raise TypeError("monkey_patch() got an unexpected "
                            "keyword argument %r" % k)
    if default_on is None:
        default_on = not (True in on.values())
    for modname in accepted_args:
        if modname == 'MySQLdb':
            # MySQLdb is only on when explicitly patched for the moment
            on.setdefault(modname, False)
        if modname == 'builtins':
            on.setdefault(modname, False)
        on.setdefault(modname, default_on)

    modules_to_patch = []
    if on['os'] and not already_patched.get('os'):
        modules_to_patch += _green_os_modules()
        already_patched['os'] = True
    if on['select'] and not already_patched.get('select'):
        modules_to_patch += _green_select_modules()
        already_patched['select'] = True
    if on['socket'] and not already_patched.get('socket'):
        modules_to_patch += _green_socket_modules()
        already_patched['socket'] = True
    if on['thread'] and not already_patched.get('thread'):
        modules_to_patch += _green_thread_modules()
        already_patched['thread'] = True
    if on['time'] and not already_patched.get('time'):
        modules_to_patch += _green_time_modules()
        already_patched['time'] = True
    if on.get('MySQLdb') and not already_patched.get('MySQLdb'):
        modules_to_patch += _green_MySQLdb()
        already_patched['MySQLdb'] = True
    if on.get('builtins') and not already_patched.get('builtins'):
        modules_to_patch += _green_builtins()
        already_patched['builtins'] = True
    if on['psycopg'] and not already_patched.get('psycopg'):
        try:
            from eventlet.support import psycopg2_patcher
            psycopg2_patcher.make_psycopg_green()
            already_patched['psycopg'] = True
        except ImportError:
            # note that if we get an importerror from trying to
            # monkeypatch psycopg, we will continually retry it
            # whenever monkey_patch is called; this should not be a
            # performance problem but it allows is_monkey_patched to
            # tell us whether or not we succeeded
            pass

    imp.acquire_lock()
    try:
        for name, mod in modules_to_patch:
            orig_mod = sys.modules.get(name)
            if orig_mod is None:
                orig_mod = __import__(name)
            for attr_name in mod.__patched__:
                patched_attr = getattr(mod, attr_name, None)
                if patched_attr is not None:
                    setattr(orig_mod, attr_name, patched_attr)
            deleted = getattr(mod, '__deleted__', [])
            for attr_name in deleted:
                if hasattr(orig_mod, attr_name):
                    delattr(orig_mod, attr_name)
    finally:
        imp.release_lock()

    if sys.version_info >= (3, 3):
        import importlib._bootstrap
        thread = original('_thread')
        # importlib must use real thread locks, not eventlet.Semaphore
        importlib._bootstrap._thread = thread

        # Issue #185: Since Python 3.3, threading.RLock is implemented in C and
        # so call a C function to get the thread identifier, instead of calling
        # threading.get_ident(). Force the Python implementation of RLock which
        # calls threading.get_ident() and so is compatible with eventlet.
        import threading
        threading.RLock = threading._PyRLock
def monkey_patch(**on):
    """Globally patches certain system modules to be greenthread-friendly.

    The keyword arguments afford some control over which modules are patched.
    If no keyword arguments are supplied, all possible modules are patched.
    If keywords are set to True, only the specified modules are patched.  E.g.,
    ``monkey_patch(socket=True, select=True)`` patches only the select and
    socket modules.  Most arguments patch the single module of the same name
    (os, time, select).  The exceptions are socket, which also patches the ssl
    module if present; and thread, which patches thread, threading, and Queue.

    It's safe to call monkey_patch multiple times.
    """
    accepted_args = set(
        ('os', 'select', 'socket', 'thread', 'time', 'psycopg', 'MySQLdb'))
    default_on = on.pop("all", None)
    for k in six.iterkeys(on):
        if k not in accepted_args:
            raise TypeError("monkey_patch() got an unexpected "\
                                "keyword argument %r" % k)
    if default_on is None:
        default_on = not (True in on.values())
    for modname in accepted_args:
        if modname == 'MySQLdb':
            # MySQLdb is only on when explicitly patched for the moment
            on.setdefault(modname, False)
        on.setdefault(modname, default_on)

    modules_to_patch = []
    if on['os'] and not already_patched.get('os'):
        modules_to_patch += _green_os_modules()
        already_patched['os'] = True
    if on['select'] and not already_patched.get('select'):
        modules_to_patch += _green_select_modules()
        already_patched['select'] = True
    if on['socket'] and not already_patched.get('socket'):
        modules_to_patch += _green_socket_modules()
        already_patched['socket'] = True
    if on['thread'] and not already_patched.get('thread'):
        modules_to_patch += _green_thread_modules()
        already_patched['thread'] = True
    if on['time'] and not already_patched.get('time'):
        modules_to_patch += _green_time_modules()
        already_patched['time'] = True
    if on.get('MySQLdb') and not already_patched.get('MySQLdb'):
        modules_to_patch += _green_MySQLdb()
        already_patched['MySQLdb'] = True
    if on['psycopg'] and not already_patched.get('psycopg'):
        try:
            from eventlet.support import psycopg2_patcher
            psycopg2_patcher.make_psycopg_green()
            already_patched['psycopg'] = True
        except ImportError:
            # note that if we get an importerror from trying to
            # monkeypatch psycopg, we will continually retry it
            # whenever monkey_patch is called; this should not be a
            # performance problem but it allows is_monkey_patched to
            # tell us whether or not we succeeded
            pass

    imp.acquire_lock()
    try:
        for name, mod in modules_to_patch:
            orig_mod = sys.modules.get(name)
            if orig_mod is None:
                orig_mod = __import__(name)
            for attr_name in mod.__patched__:
                patched_attr = getattr(mod, attr_name, None)
                if patched_attr is not None:
                    setattr(orig_mod, attr_name, patched_attr)
    finally:
        imp.release_lock()