Example #1
0
    def TallyTimings(self):
        oldtimings = self.sleeping
        self.sleeping = {}

        # first, unwind the main "cur"
        self.cur = self.Unwind(self.cur, self.timings)

        # we must keep the timings dicts separate for each tasklet, since it contains
        # the 'ns' item, recursion count of each function in that tasklet.  This is
        # used in the Unwind dude.
        for tasklet, (cur, timings) in six.iteritems(oldtimings):
            self.Unwind(cur, timings)

            for k, v in six.iteritems(timings):
                if k not in self.timings:
                    self.timings[k] = v
                else:
                    # accumulate all to the self.timings
                    cc, ns, tt, ct, callers = self.timings[k]
                    # ns should be 0 after unwinding
                    cc += v[0]
                    tt += v[2]
                    ct += v[3]
                    for k1, v1 in six.iteritems(v[4]):
                        callers[k1] = callers.get(k1, 0) + v1
                    self.timings[k] = cc, ns, tt, ct, callers
Example #2
0
    def TallyTimings(self):
        oldtimings = self.sleeping
        self.sleeping = {}

        # first, unwind the main "cur"
        self.cur = self.Unwind(self.cur, self.timings)

        # we must keep the timings dicts separate for each tasklet, since it contains
        # the 'ns' item, recursion count of each function in that tasklet.  This is
        # used in the Unwind dude.
        for tasklet, (cur, timings) in six.iteritems(oldtimings):
            self.Unwind(cur, timings)

            for k, v in six.iteritems(timings):
                if k not in self.timings:
                    self.timings[k] = v
                else:
                    # accumulate all to the self.timings
                    cc, ns, tt, ct, callers = self.timings[k]
                    # ns should be 0 after unwinding
                    cc += v[0]
                    tt += v[2]
                    ct += v[3]
                    for k1, v1 in six.iteritems(v[4]):
                        callers[k1] = callers.get(k1, 0) + v1
                    self.timings[k] = cc, ns, tt, ct, callers
Example #3
0
    def _obsolete(self, fileno):
        """ We've received an indication that 'fileno' has been obsoleted.
            Any current listeners must be defanged, and notifications to
            their greenlets queued up to send.
        """
        found = False
        for evtype, bucket in six.iteritems(self.secondaries):
            if fileno in bucket:
                for listener in bucket[fileno]:
                    found = True
                    self.closed.append(listener)
                    listener.defang()
                del bucket[fileno]

        # For the primary listeners, we actually need to call remove,
        # which may modify the underlying OS polling objects.
        for evtype, bucket in six.iteritems(self.listeners):
            if fileno in bucket:
                listener = bucket[fileno]
                found = True
                self.closed.append(listener)
                self.remove(listener)
                listener.defang()

        return found
Example #4
0
    def _obsolete(self, fileno):
        """ We've received an indication that 'fileno' has been obsoleted.
            Any current listeners must be defanged, and notifications to
            their greenlets queued up to send.
        """
        found = False
        for evtype, bucket in six.iteritems(self.secondaries):
            if fileno in bucket:
                for listener in bucket[fileno]:
                    found = True
                    self.closed.append(listener)
                    listener.defang()
                del bucket[fileno]

        # For the primary listeners, we actually need to call remove,
        # which may modify the underlying OS polling objects.
        for evtype, bucket in six.iteritems(self.listeners):
            if fileno in bucket:
                listener = bucket[fileno]
                found = True
                self.closed.append(listener)
                self.remove(listener)
                listener.defang()

        return found
Example #5
0
def select(read_list, write_list, error_list, timeout=None):
    # error checking like this is required by the stdlib unit tests
    if timeout is not None:
        try:
            timeout = float(timeout)
        except ValueError:
            raise TypeError("Expected number for timeout")
    hub = get_hub()
    timers = []
    current = eventlet.getcurrent()
    assert hub.greenlet is not current, 'do not call blocking functions from the mainloop'
    ds = {}
    for r in read_list:
        ds[get_fileno(r)] = {'read': r}
    for w in write_list:
        ds.setdefault(get_fileno(w), {})['write'] = w
    for e in error_list:
        ds.setdefault(get_fileno(e), {})['error'] = e

    listeners = []

    def on_read(d):
        original = ds[get_fileno(d)]['read']
        current.switch(([original], [], []))

    def on_write(d):
        original = ds[get_fileno(d)]['write']
        current.switch(([], [original], []))

    def on_timeout2():
        current.switch(([], [], []))

    def on_timeout():
        # ensure that BaseHub.run() has a chance to call self.wait()
        # at least once before timed out.  otherwise the following code
        # can time out erroneously.
        #
        # s1, s2 = socket.socketpair()
        # print(select.select([], [s1], [], 0))
        timers.append(hub.schedule_call_global(0, on_timeout2))

    if timeout is not None:
        timers.append(hub.schedule_call_global(timeout, on_timeout))
    try:
        for k, v in six.iteritems(ds):
            if v.get('read'):
                listeners.append(
                    hub.add(hub.READ, k, on_read, current.throw, lambda: None))
            if v.get('write'):
                listeners.append(
                    hub.add(hub.WRITE, k, on_write, current.throw,
                            lambda: None))
        try:
            return hub.switch()
        finally:
            for l in listeners:
                hub.remove(l)
    finally:
        for t in timers:
            t.cancel()
Example #6
0
def test_wait_each_all():
    # set up a simple linear dependency chain
    deps = dict(b="a", c="b", d="c", e="d")
    capture = Capture()
    pool = DAGPool([("a", "a")])
    # capture a different Event for each key
    events = dict((key, eventlet.event.Event()) for key in six.iterkeys(deps))
    # can't use spawn_many() because we need a different event for each
    for key, dep in six.iteritems(deps):
        pool.spawn(key, dep, observe, capture, events[key])
    keys = "abcde"                      # this specific order
    each = iter(pool.wait_each())
    for pos in range(len(keys)):
        # next value from wait_each()
        k, v = next(each)
        assert_equals(k, keys[pos])
        # advance every pool greenlet as far as it can go
        spin()
        # everything from keys[:pos+1] should have a value by now
        for k in keys[:pos + 1]:
            assert pool.get(k, _notthere) is not _notthere, \
                "greenlet {0} did not yet produce a value".format(k)
        # everything from keys[pos+1:] should not yet
        for k in keys[pos + 1:]:
            assert pool.get(k, _notthere) is _notthere, \
                "wait_each() delayed value for {0}".format(keys[pos])
        # let next greenthread complete
        if pos < len(keys) - 1:
            k = keys[pos + 1]
            events[k].send(k)
Example #7
0
def test_wait_each_all():
    # set up a simple linear dependency chain
    deps = dict(b="a", c="b", d="c", e="d")
    capture = Capture()
    pool = DAGPool([("a", "a")])
    # capture a different Event for each key
    events = dict((key, eventlet.event.Event()) for key in six.iterkeys(deps))
    # can't use spawn_many() because we need a different event for each
    for key, dep in six.iteritems(deps):
        pool.spawn(key, dep, observe, capture, events[key])
    keys = "abcde"  # this specific order
    each = iter(pool.wait_each())
    for pos in range(len(keys)):
        # next value from wait_each()
        k, v = next(each)
        assert_equals(k, keys[pos])
        # advance every pool greenlet as far as it can go
        spin()
        # everything from keys[:pos+1] should have a value by now
        for k in keys[:pos + 1]:
            assert pool.get(k, _notthere) is not _notthere, \
                "greenlet {0} did not yet produce a value".format(k)
        # everything from keys[pos+1:] should not yet
        for k in keys[pos + 1:]:
            assert pool.get(k, _notthere) is _notthere, \
                "wait_each() delayed value for {0}".format(keys[pos])
        # let next greenthread complete
        if pos < len(keys) - 1:
            k = keys[pos + 1]
            events[k].send(k)
Example #8
0
 def items(self):
     """
     Return a snapshot tuple of currently-available (key, value) pairs.
     """
     # Don't assume our caller will finish iterating before new values are
     # posted.
     return tuple((key, self._value_or_raise(value))
                  for key, value in six.iteritems(self.values))
Example #9
0
 def items(self):
     """
     Return a snapshot tuple of currently-available (key, value) pairs.
     """
     # Don't assume our caller will finish iterating before new values are
     # posted.
     return tuple((key, self._value_or_raise(value))
                  for key, value in six.iteritems(self.values))
Example #10
0
def iter_descendent_module_names(package):
    """Iterate over the modules descending from package, including package.

    Only considers modules contained in sys.modules.
    """
    prefix = package + '.'
    for name, module in six.iteritems(sys.modules):
        if name == package or name.startswith(prefix):
            yield name
Example #11
0
    def waiting_for(self, key=_MISSING):
        """
        waiting_for(key) returns a set() of the keys for which the DAGPool
        greenthread spawned with that *key* is still waiting. If you pass a
        *key* for which no greenthread was spawned, waiting_for() raises
        KeyError.

        waiting_for() without argument returns a dict. Its keys are the keys
        of DAGPool greenthreads still waiting on one or more values. In the
        returned dict, the value of each such key is the set of other keys for
        which that greenthread is still waiting.

        This method allows diagnosing a "hung" DAGPool. If certain
        greenthreads are making no progress, it's possible that they are
        waiting on keys for which there is no greenthread and no :meth:`post` data.
        """
        # We may have greenthreads whose 'pending' entry indicates they're
        # waiting on some keys even though values have now been posted for
        # some or all of those keys, because those greenthreads have not yet
        # regained control since values were posted. So make a point of
        # excluding values that are now available.
        available = set(six.iterkeys(self.values))

        if key is not _MISSING:
            # waiting_for(key) is semantically different than waiting_for().
            # It's just that they both seem to want the same method name.
            coro = self.coros.get(key, _MISSING)
            if coro is _MISSING:
                # Hmm, no running greenthread with this key. But was there
                # EVER a greenthread with this key? If not, let KeyError
                # propagate.
                self.values[key]
                # Oh good, there's a value for this key. Either the
                # greenthread finished, or somebody posted a value. Just say
                # the greenthread isn't waiting for anything.
                return set()
            else:
                # coro is the _Coro for the running greenthread with the
                # specified key.
                return coro.pending - available

        # This is a waiting_for() call, i.e. a general query rather than for a
        # specific key.

        # Start by iterating over (key, coro) pairs in self.coros. Generate
        # (key, pending) pairs in which 'pending' is the set of keys on which
        # the greenthread believes it's waiting, minus the set of keys that
        # are now available. Filter out any pair in which 'pending' is empty,
        # that is, that greenthread will be unblocked next time it resumes.
        # Make a dict from those pairs.
        return dict(
            (key, pending)
            for key, pending in ((key, (coro.pending - available))
                                 for key, coro in six.iteritems(self.coros))
            if pending)
Example #12
0
    def waiting_for(self, key=_MISSING):
        """
        waiting_for(key) returns a set() of the keys for which the DAGPool
        greenthread spawned with that *key* is still waiting. If you pass a
        *key* for which no greenthread was spawned, waiting_for() raises
        KeyError.

        waiting_for() without argument returns a dict. Its keys are the keys
        of DAGPool greenthreads still waiting on one or more values. In the
        returned dict, the value of each such key is the set of other keys for
        which that greenthread is still waiting.

        This method allows diagnosing a "hung" DAGPool. If certain
        greenthreads are making no progress, it's possible that they are
        waiting on keys for which there is no greenthread and no :meth:`post` data.
        """
        # We may have greenthreads whose 'pending' entry indicates they're
        # waiting on some keys even though values have now been posted for
        # some or all of those keys, because those greenthreads have not yet
        # regained control since values were posted. So make a point of
        # excluding values that are now available.
        available = set(six.iterkeys(self.values))

        if key is not _MISSING:
            # waiting_for(key) is semantically different than waiting_for().
            # It's just that they both seem to want the same method name.
            coro = self.coros.get(key, _MISSING)
            if coro is _MISSING:
                # Hmm, no running greenthread with this key. But was there
                # EVER a greenthread with this key? If not, let KeyError
                # propagate.
                self.values[key]
                # Oh good, there's a value for this key. Either the
                # greenthread finished, or somebody posted a value. Just say
                # the greenthread isn't waiting for anything.
                return set()
            else:
                # coro is the _Coro for the running greenthread with the
                # specified key.
                return coro.pending - available

        # This is a waiting_for() call, i.e. a general query rather than for a
        # specific key.

        # Start by iterating over (key, coro) pairs in self.coros. Generate
        # (key, pending) pairs in which 'pending' is the set of keys on which
        # the greenthread believes it's waiting, minus the set of keys that
        # are now available. Filter out any pair in which 'pending' is empty,
        # that is, that greenthread will be unblocked next time it resumes.
        # Make a dict from those pairs.
        return dict((key, pending)
                    for key, pending in ((key, (coro.pending - available))
                                         for key, coro in six.iteritems(self.coros))
                    if pending)
Example #13
0
 def restore(self):
     """Restores the modules that the saver knows about into
     sys.modules.
     """
     try:
         for modname, mod in six.iteritems(self._saved):
             if mod is not None:
                 sys.modules[modname] = mod
             else:
                 try:
                     del sys.modules[modname]
                 except KeyError:
                     pass
     finally:
         imp.release_lock()
Example #14
0
 def restore(self):
     """Restores the modules that the saver knows about into
     sys.modules.
     """
     try:
         for modname, mod in six.iteritems(self._saved):
             if mod is not None:
                 sys.modules[modname] = mod
             else:
                 try:
                     del sys.modules[modname]
                 except KeyError:
                     pass
     finally:
         imp.release_lock()
 def test_psycopg_patched(self):
     if 'PSYCOPG_TEST_DSN' not in os.environ:
         # construct a non-json dsn for the subprocess
         psycopg_auth = get_database_auth()['psycopg2']
         if isinstance(psycopg_auth, str):
             dsn = psycopg_auth
         else:
             dsn = " ".join(["%s=%s" % (k, v) for k, v in six.iteritems(psycopg_auth)])
         os.environ['PSYCOPG_TEST_DSN'] = dsn
     self.write_to_tempfile("psycopg_patcher", psycopg_test_file)
     output, lines = self.launch_subprocess('psycopg_patcher.py')
     if lines[0].startswith('Psycopg not monkeypatched'):
         print("Can't test psycopg2 patching; it's not installed.")
         return
     # if there's anything wrong with the test program it'll have a stack trace
     assert lines[0].startswith('done'), output
Example #16
0
 def getaliases(self, hostname):
     """Return a list of all the aliases of a given cname"""
     # Due to the way store aliases this is a bit inefficient, this
     # clearly was an afterthought.  But this is only used by
     # gethostbyname_ex so it's probably fine.
     aliases = []
     if hostname in self._aliases:
         cannon = self._aliases[hostname]
     else:
         cannon = hostname
     aliases.append(cannon)
     for alias, cname in six.iteritems(self._aliases):
         if cannon == cname:
             aliases.append(alias)
     aliases.remove(hostname)
     return aliases
 def test_psycopg_patched(self):
     if 'PSYCOPG_TEST_DSN' not in os.environ:
         # construct a non-json dsn for the subprocess
         psycopg_auth = get_database_auth()['psycopg2']
         if isinstance(psycopg_auth, str):
             dsn = psycopg_auth
         else:
             dsn = " ".join(
                 ["%s=%s" % (k, v) for k, v in six.iteritems(psycopg_auth)])
         os.environ['PSYCOPG_TEST_DSN'] = dsn
     self.write_to_tempfile("psycopg_patcher", psycopg_test_file)
     output, lines = self.launch_subprocess('psycopg_patcher.py')
     if lines[0].startswith('Psycopg not monkeypatched'):
         print("Can't test psycopg2 patching; it's not installed.")
         return
     # if there's anything wrong with the test program it'll have a stack trace
     assert lines[0].startswith('done'), output
Example #18
0
def measure_best(repeat, iters,
                 common_setup='pass',
                 common_cleanup='pass',
                 *funcs):
    funcs = list(funcs)
    results = dict([(f, []) for f in funcs])

    for i in six.moves.range(repeat):
        random.shuffle(funcs)
        for func in funcs:
            gc.collect()
            t = timeit.Timer(func, setup=common_setup)
            results[func].append(t.timeit(iters))
            common_cleanup()

    best_results = {}
    for func, times in six.iteritems(results):
        best_results[func] = min(times)
    return best_results
Example #19
0
    def spawn_many(self, depends, function, *args, **kwds):
        """
        spawn_many() accepts a single *function* whose parameters are the same
        as for :meth:`spawn`.

        The difference is that spawn_many() accepts a dependency dict
        *depends*. A new greenthread is spawned for each key in the dict. That
        dict key's value should be an iterable of other keys on which this
        greenthread depends.

        If the *depends* dict contains any key already passed to :meth:`spawn`
        or :meth:`post`, spawn_many() raises :class:`Collision`. It is
        indeterminate how many of the other keys in *depends* will have
        successfully spawned greenthreads.
        """
        # Iterate over 'depends' items, relying on self.spawn() not to
        # context-switch so no one can modify 'depends' along the way.
        for key, deps in six.iteritems(depends):
            self.spawn(key, deps, function, *args, **kwds)
Example #20
0
    def spawn_many(self, depends, function, *args, **kwds):
        """
        spawn_many() accepts a single *function* whose parameters are the same
        as for :meth:`spawn`.

        The difference is that spawn_many() accepts a dependency dict
        *depends*. A new greenthread is spawned for each key in the dict. That
        dict key's value should be an iterable of other keys on which this
        greenthread depends.

        If the *depends* dict contains any key already passed to :meth:`spawn`
        or :meth:`post`, spawn_many() raises :class:`Collision`. It is
        indeterminate how many of the other keys in *depends* will have
        successfully spawned greenthreads.
        """
        # Iterate over 'depends' items, relying on self.spawn() not to
        # context-switch so no one can modify 'depends' along the way.
        for key, deps in six.iteritems(depends):
            self.spawn(key, deps, function, *args, **kwds)
Example #21
0
    def __init__(self, preload={}):
        """
        DAGPool can be prepopulated with an initial dict or iterable of (key,
        value) pairs. These (key, value) pairs are of course immediately
        available for any greenthread that depends on any of those keys.
        """
        try:
            # If a dict is passed, copy it. Don't risk a subsequent
            # modification to passed dict affecting our internal state.
            iteritems = six.iteritems(preload)
        except AttributeError:
            # Not a dict, just an iterable of (key, value) pairs
            iteritems = preload

        # Load the initial dict
        self.values = dict(iteritems)

        # track greenthreads
        self.coros = {}

        # The key to blocking greenthreads is the Event.
        self.event = Event()
Example #22
0
    def __init__(self, preload={}):
        """
        DAGPool can be prepopulated with an initial dict or iterable of (key,
        value) pairs. These (key, value) pairs are of course immediately
        available for any greenthread that depends on any of those keys.
        """
        try:
            # If a dict is passed, copy it. Don't risk a subsequent
            # modification to passed dict affecting our internal state.
            iteritems = six.iteritems(preload)
        except AttributeError:
            # Not a dict, just an iterable of (key, value) pairs
            iteritems = preload

        # Load the initial dict
        self.values = dict(iteritems)

        # track greenthreads
        self.coros = {}

        # The key to blocking greenthreads is the Event.
        self.event = Event()
Example #23
0
 def _start_send_exception(self):
     links_items = list(six.iteritems(self._exception_links))
     hubs.get_hub().schedule_call_global(0, self._do_send, links_items,
                                         self._exception_links)
Example #24
0
def select(read_list, write_list, error_list, timeout=None):
    # error checking like this is required by the stdlib unit tests
    if timeout is not None:
        try:
            timeout = float(timeout)
        except ValueError:
            raise TypeError("Expected number for timeout")
    hub = get_hub()
    timers = []
    current = getcurrent()
    assert hub.greenlet is not current, 'do not call blocking functions from the mainloop'
    ds = {}
    for r in read_list:
        ds[get_fileno(r)] = {'read': r}
    for w in write_list:
        ds.setdefault(get_fileno(w), {})['write'] = w
    for e in error_list:
        ds.setdefault(get_fileno(e), {})['error'] = e

    listeners = []

    def on_read(d):
        original = ds[get_fileno(d)]['read']
        current.switch(([original], [], []))

    def on_write(d):
        original = ds[get_fileno(d)]['write']
        current.switch(([], [original], []))

    def on_error(d, _err=None):
        original = ds[get_fileno(d)]['error']
        current.switch(([], [], [original]))

    def on_timeout2():
        current.switch(([], [], []))

    def on_timeout():
        # ensure that BaseHub.run() has a chance to call self.wait()
        # at least once before timed out.  otherwise the following code
        # can time out erroneously.
        #
        # s1, s2 = socket.socketpair()
        # print(select.select([], [s1], [], 0))
        timers.append(hub.schedule_call_global(0, on_timeout2))

    if timeout is not None:
        timers.append(hub.schedule_call_global(timeout, on_timeout))
    try:
        for k, v in six.iteritems(ds):
            if v.get('read'):
                listeners.append(hub.add(hub.READ, k, on_read, on_error, lambda x: None))
            if v.get('write'):
                listeners.append(hub.add(hub.WRITE, k, on_write, on_error, lambda x: None))
        try:
            return hub.switch()
        finally:
            for l in listeners:
                hub.remove(l)
    finally:
        for t in timers:
            t.cancel()
Example #25
0
 def test_module_sandboxing(self, restore_sys_modules, module_to_green):
     ModuleInfo = collections.namedtuple(
         'ModuleInfo', ('names', 'required'))
     core_modules = tuple(ModuleInfo(*i) for i in (
         ('os', True),
         (('Queue', 'queue'), True),
         ('select', True),
         ('selectors', False),
         ('socket', True),
         ('ssl', False),
         ('subprocess', True),
         ('time', True),
         (('thread', '_thread'), True),
         ('threading', True),
     ))
     affected_modules = set()
     for info in core_modules:
         names = info.names
         if isinstance(names, six.string_types):
             names = [names]
         for name in names:
             try:
                 importlib.import_module(name)
             except ImportError:
                 pass
             else:
                 affected_modules.add(name)
                 break
         else:
             if info.required:
                 raise RuntimeError(
                     'Unable to locate any module matching names: %s' %
                     ', '.join(names))
     root_module = module_to_green.split('.', 1)[0]
     descendent_modules = set(
         utils.iter_descendent_module_names(root_module))
     affected_modules.update(
         m for m in descendent_modules if sys.modules[m] is not None)
     affected_modules = list(affected_modules)
     # Eventlet's patcher won't patch a module if a patched version
     # already exists in sys.modules, so all patched modules must be
     # deregistered in order to test the normal patching behavior.
     patched_modules = set(
         m for m in sys.modules if m.startswith('__patched_module_'))
     utils.delete_sys_modules(
         descendent_modules |
         set(affected_modules) |
         set(utils.iter_descendent_module_names('eventlet')) |
         set(utils.iter_descendent_module_names('greenlet')) |
         set([green.__name__]) |
         patched_modules
     )
     real_modules = [
         importlib.import_module(m) for m in affected_modules]
     refs_by_module_name = {}
     for module in real_modules:
         module_refs = refs_by_module_name[module.__name__] = {}
         for attr, value in inspect.getmembers(module):
             if value in real_modules:
                 module_refs[attr] = value
     importlib.import_module(green.__name__)
     current_modules = [
         importlib.import_module(m) for m in affected_modules]
     assert [id(m) for m in current_modules] == \
            [id(m) for m in real_modules]
     for module_name, refs in six.iteritems(refs_by_module_name):
         for attr, expected_value in six.iteritems(refs):
             actual_value = getattr(sys.modules[module_name], attr)
             assert id(expected_value) == id(actual_value), \
                 '%s.%s changed after import' % (module_name, attr)
Example #26
0
                pfn = None

            if pfn in callers:
                callers[pfn] = callers[pfn] + 1  # hack: gather more
            elif pfn:
                callers[pfn] = 1

            timings[rfn] = cc, ns - 1, tt + rit, ct, callers

            ppt, pit, pet, pfn, pframe, pcur = rcur
            rcur = ppt, pit + rpt, pet + frame_total, pfn, pframe, pcur
            cur = rcur
        return cur

# Add automatic tasklet detection to the callbacks.
Profile.dispatch = {key: Profile.ContextWrap(val) for key, val in six.iteritems(Profile.dispatch)}


# run statements shamelessly stolen from profile.py
def run(statement, filename=None, sort=-1):
    """Run statement under profiler optionally saving results in filename

    This function takes a single argument that can be passed to the
    "exec" statement, and an optional file name.  In all cases this
    routine attempts to "exec" its first argument and gather profiling
    statistics from the execution. If no file name is present, then this
    function automatically prints a simple profiling report, sorted by the
    standard name string (file/line/function-name) that is presented in
    each line.
    """
    prof = Profile()
Example #27
0
class Profile(profile_orig.Profile):
    base = profile_orig.Profile

    def __init__(self, timer=None, bias=None):
        self.current_tasklet = greenthread.getcurrent()
        self.thread_id = thread.get_ident()
        self.base.__init__(self, timer, bias)
        self.sleeping = {}

    def __call__(self, *args):
        """make callable, allowing an instance to be the profiler"""
        self.dispatcher(*args)

    def _setup(self):
        self._has_setup = True
        self.cur = None
        self.timings = {}
        self.current_tasklet = greenthread.getcurrent()
        self.thread_id = thread.get_ident()
        self.simulate_call("profiler")

    def start(self, name="start"):
        if getattr(self, "running", False):
            return
        self._setup()
        self.simulate_call("start")
        self.running = True
        sys.setprofile(self.dispatcher)

    def stop(self):
        sys.setprofile(None)
        self.running = False
        self.TallyTimings()

    # special cases for the original run commands, makin sure to
    # clear the timer context.
    def runctx(self, cmd, globals, locals):
        if not getattr(self, "_has_setup", False):
            self._setup()
        try:
            return profile_orig.Profile.runctx(self, cmd, globals, locals)
        finally:
            self.TallyTimings()

    def runcall(self, func, *args, **kw):
        if not getattr(self, "_has_setup", False):
            self._setup()
        try:
            return profile_orig.Profile.runcall(self, func, *args, **kw)
        finally:
            self.TallyTimings()

    def trace_dispatch_return_extend_back(self, frame, t):
        """A hack function to override error checking in parent class.  It
        allows invalid returns (where frames weren't preveiously entered into
        the profiler) which can happen for all the tasklets that suddenly start
        to get monitored. This means that the time will eventually be attributed
        to a call high in the chain, when there is a tasklet switch
        """
        if isinstance(self.cur[-2], Profile.fake_frame):
            return False
            self.trace_dispatch_call(frame, 0)
        return self.trace_dispatch_return(frame, t)

    def trace_dispatch_c_return_extend_back(self, frame, t):
        # same for c return
        if isinstance(self.cur[-2], Profile.fake_frame):
            return False  # ignore bogus returns
            self.trace_dispatch_c_call(frame, 0)
        return self.trace_dispatch_return(frame, t)

    # Add "return safety" to the dispatchers
    dispatch = dict(profile_orig.Profile.dispatch)
    dispatch.update({
        "return": trace_dispatch_return_extend_back,
        "c_return": trace_dispatch_c_return_extend_back,
    })

    def SwitchTasklet(self, t0, t1, t):
        # tally the time spent in the old tasklet
        pt, it, et, fn, frame, rcur = self.cur
        cur = (pt, it + t, et, fn, frame, rcur)

        # we are switching to a new tasklet, store the old
        self.sleeping[t0] = cur, self.timings
        self.current_tasklet = t1

        # find the new one
        try:
            self.cur, self.timings = self.sleeping.pop(t1)
        except KeyError:
            self.cur, self.timings = None, {}
            self.simulate_call("profiler")
            self.simulate_call("new_tasklet")

    def ContextWrap(f):
        @functools.wraps(f)
        def ContextWrapper(self, arg, t):
            current = greenthread.getcurrent()
            if current != self.current_tasklet:
                self.SwitchTasklet(self.current_tasklet, current, t)
                t = 0.0  # the time was billed to the previous tasklet
            return f(self, arg, t)

        return ContextWrapper

    # Add automatic tasklet detection to the callbacks.
    dispatch = dict([(key, ContextWrap(val))
                     for key, val in six.iteritems(dispatch)])

    def TallyTimings(self):
        oldtimings = self.sleeping
        self.sleeping = {}

        # first, unwind the main "cur"
        self.cur = self.Unwind(self.cur, self.timings)

        # we must keep the timings dicts separate for each tasklet, since it contains
        # the 'ns' item, recursion count of each function in that tasklet.  This is
        # used in the Unwind dude.
        for tasklet, (cur, timings) in six.iteritems(oldtimings):
            self.Unwind(cur, timings)

            for k, v in six.iteritems(timings):
                if k not in self.timings:
                    self.timings[k] = v
                else:
                    # accumulate all to the self.timings
                    cc, ns, tt, ct, callers = self.timings[k]
                    # ns should be 0 after unwinding
                    cc += v[0]
                    tt += v[2]
                    ct += v[3]
                    for k1, v1 in six.iteritems(v[4]):
                        callers[k1] = callers.get(k1, 0) + v1
                    self.timings[k] = cc, ns, tt, ct, callers

    def Unwind(self, cur, timings):
        "A function to unwind a 'cur' frame and tally the results"
        "see profile.trace_dispatch_return() for details"
        # also see simulate_cmd_complete()
        while (cur[-1]):
            rpt, rit, ret, rfn, frame, rcur = cur
            frame_total = rit + ret

            if rfn in timings:
                cc, ns, tt, ct, callers = timings[rfn]
            else:
                cc, ns, tt, ct, callers = 0, 0, 0, 0, {}

            if not ns:
                ct = ct + frame_total
                cc = cc + 1

            if rcur:
                ppt, pit, pet, pfn, pframe, pcur = rcur
            else:
                pfn = None

            if pfn in callers:
                callers[pfn] = callers[pfn] + 1  # hack: gather more
            elif pfn:
                callers[pfn] = 1

            timings[rfn] = cc, ns - 1, tt + rit, ct, callers

            ppt, pit, pet, pfn, pframe, pcur = rcur
            rcur = ppt, pit + rpt, pet + frame_total, pfn, pframe, pcur
            cur = rcur
        return cur
Example #28
0
 def _start_send_exception(self):
     links_items = list(six.iteritems(self._exception_links))
     hubs.get_hub().schedule_call_global(0, self._do_send, links_items, self._exception_links)