Esempio n. 1
0
class BaseSubject(object):
    """ Object holding all the observers, aliased to dirigent.subject
    """
    def __init__(self, init=None):
        if init:
            self.observers = WeakSet(init)
        else:
            self.observers = WeakSet()

    def register(self, func):
        """ Registers a callable.
            Can be used as a decorator.
        """
        self.observers.add(func)
        return func

    def unregister(self, func):
        """ Unregisters a callable.
        """
        if func in self.observers:
            self.observers.remove(func)
            return func
        return False

    def notify(self, *args, **kwargs):
        """ Notifies all registered observers of an event.
        """
        return [observer(*args, **kwargs) for observer in self.observers]

    def __iter__(self):
        return (observer for observer in self.observers)

    __call__ = notify
    on = bind = register
    off = unbind = unregister
Esempio n. 2
0
class Signal(object):
    def __init__(self):
        self._functions = WeakSet()
        self._methods = WeakKeyDictionary()

    def __call__(self, *args, **kargs):
        for f in self._functions:
            f(*args, **kargs)

        for obj, functions in self._methods.items():
            for f in functions:
                f(obj, *args, **kargs)

    def connect(self, slot):
        if inspect.ismethod(slot):
            if not slot.__self__ in self._methods:
                self._methods[slot.__self__] = set()
            self._methods[slot.__self__].add(slot.__func__)
        else:
            self._functions.add(slot)

    def disconnect(self, slot):
        if inspect.ismethod(slot):
            if slot.__self__ in self._methods:
                self._methods[slot.__self__].remove(slot.__func__)
        else:
            if slot in self._functions:
                self._functions.remove(slot)
Esempio n. 3
0
class ConnectionFactory(ReconnectingClientFactory):
    """Creates `.Connection` instances."""

    protocol = Connection
    log = Logger()

    def __init__(self):
        #: The `ConnectionSettings` object associated with this factory.
        self.settings = ConnectionSettings()
        #: A `WeakSet` containing associated `Connection` objects.
        self.protocols = WeakSet()

    def startedConnecting(self, connector):
        self.log.info("Attempting to connect to server")

    def buildProtocol(self, addr):
        protocol = ReconnectingClientFactory.buildProtocol(self, addr)
        protocol.settings = self.settings
        # Set various properties defined by Twisted's IRCClient.
        protocol.nickname = self.settings.nickname or protocol.nickname
        protocol.password = self.settings.password or protocol.password
        protocol.realname = self.settings.realname or protocol.realname
        protocol.username = self.settings.username or protocol.username
        protocol.userinfo = self.settings.userinfo or protocol.userinfo
        self.protocols.add(protocol)
        return protocol

    def reload_settings(self, dct):
        """Update this connection's settings using *dct*, then call
        `after_reload` on each of this factory's active connections."""
        self.log.info("Reloading settings")
        self.settings.replace(dct)
        for protocol in self.protocols:
            protocol.after_reload()
Esempio n. 4
0
 def take_damage(self, amount):
     self.damage += amount
     if self.damage >= self.health and not self.has_exploded:
         # create an explosion
         self.has_exploded = True
         Explosion(self._body.position, BLOCK_SIZE,
                   velocity=self._body.velocity)
         # remove joints
         for block in self._adjacent_blocks:
             toremove = self._joints & block._joints
             for joint in toremove:
                 block._joints.remove(joint)
         SPACE.remove(*self._joints)
         self._joints = WeakSet()
         # remove ties to the construction
         for block in self._adjacent_blocks:
             block._adjacent_blocks.remove(self)
         self._adjacent_blocks = WeakSet()
     elif self.damage >= self.health * 2:
         Explosion(self._body.position, BLOCK_SIZE,
                   velocity=self._body.velocity)
         for i in range(random.randint(1,5)):
             Resource(self)
         SPACE.remove(self._body, self._shape)
         SPACE.blocks.remove(self)
         if self in SPACE.controllable_blocks:
             SPACE.controllable_blocks.remove(self)
         if self in SPACE.controller_blocks:
             SPACE.controller_blocks.remove(self)
Esempio n. 5
0
class LazyInvalidation(object):
  def __enter__(self):
    assert threading.current_thread() == MAIN_THREAD
    assert not evaluation_stack
    self._watchMap = WeakKeyDictionary()
    self._watchable_objects = WeakSet()
    global invalidation_strategy
    invalidation_strategy._invalidate_all()
    invalidation_strategy = self

  def _watch_object(self, object):
    if object.watcher is not None and object.watcher not in self._watchMap:
      self._watchMap[object.watcher] = WeakWatchIntermediary(object, object.watcher)

  def _add_dependency(self, object):
    if evaluation_stack:
      evaluation_stack[-1].deps.append(object)

  def _unwatch_object(self, object):
    object.invalidate()
    self._watchable_objects.discard(object)

  def __exit__(self, type, value, traceback):
    global invalidation_strategy
    invalidation_strategy = LazyConstants()
    for intermediary in self._watchMap.itervalues():
      intermediary.release()
    self._watchMap.clear()

  def _invalidate_all(self):
    raise TypeError('Cannot nest lazy_invalidation contexts')
Esempio n. 6
0
 def _setup(self):
     self.children = WeakSet()
     self.daemon_children = WeakSet()
     self.exception = None
     self.tb_list = None
     self.stack_list = None
     self.except_func = None
     self.finally_func = None
Esempio n. 7
0
 def test_init(self):
     s = WeakSet()
     s.__init__(self.items)
     self.assertEqual(s, self.s)
     s.__init__(self.items2)
     self.assertEqual(s, WeakSet(self.items2))
     self.assertRaises(TypeError, s.__init__, s, 2);
     self.assertRaises(TypeError, s.__init__, 1);
Esempio n. 8
0
 def reset(self):
     for call in self.calls:
         if call.active():
             call.cancel()
     for loop in self.loops:
         if loop.running:
             loop.stop()
     self.calls = WeakSet()
     self.loops = WeakSet()
Esempio n. 9
0
 def setUp(self):
     # need to keep references to them
     self.items = [ustr(c) for c in ("a", "b", "c")]
     self.items2 = [ustr(c) for c in ("x", "y", "z")]
     self.letters = [ustr(c) for c in string.ascii_letters]
     self.s = WeakSet(self.items)
     self.d = dict.fromkeys(self.items)
     self.obj = ustr("F")
     self.fs = WeakSet([self.obj])
Esempio n. 10
0
 def setUp(self):
     # need to keep references to them
     self.items = [SomeClass(c) for c in ('a', 'b', 'c')]
     self.items2 = [SomeClass(c) for c in ('x', 'y', 'z')]
     self.letters = [SomeClass(c) for c in string.ascii_letters]
     self.s = WeakSet(self.items)
     self.d = dict.fromkeys(self.items)
     self.obj = SomeClass('F')
     self.fs = WeakSet([self.obj])
Esempio n. 11
0
    def __init__(self, **kwargs):
        self.__functions = WeakSet()
        self.__methods = WeakKeyDictionary()
        self.__signals = WeakSet()
        self.__type = kwargs.get("type", Signal.Auto)

        self.__emitting = False
        self.__connect_queue = []
        self.__disconnect_queue = []
Esempio n. 12
0
 def subscribe(self, event, listener_object):
     event_handler_name = "on_%s_handler" % event
     if not hasattr(listener_object, event_handler_name):
         raise AttributeError("Listener object has no '%s' event handler." % event)
     try:
         event_listeners = self._listeners[event]
     except KeyError:
         event_listeners = WeakSet()
         self._listeners[event] = event_listeners
     event_listeners.add(listener_object)
Esempio n. 13
0
class Channel:

    """A communication channel."""

    def __init__(self, template="{msg}", members=None, logged=False):
        """Create a new channel.

        :param str template: A formatting string to use as a message template
        :param members: Optional, a list of sessions to fill members with;
                        if callable, it should return a list of sessions on-
                        demand in place of member tracking
        :param bool logged: Whether to log messages to the console or not
        :returns None:

        """
        self.template = template
        self.logged = logged
        if callable(members):
            self.members = members
        else:
            self.members = WeakSet()
            if members:
                for session in members:
                    self.members.add(session)

    def send(self, data, *more, sep=" ", context=None, members=None):
        """Send a message to a channel.

        `data` and all members of `more` will be converted to strings
        and joined together by `sep` via the joins function.

        :param any data: An initial chunk of data
        :param any more: Optional, any additional data to send
        :param str sep: Optional, a separator to join the resulting output by
        :param dict context: Optional, additional context to be passed to
                             the template formatter
        :param members: Optional, a list of sessions to use in place of the
                        channels own list; if callable, it should return a list
                        of sessions to use
        :returns None:

        """
        if not members:
            members = self.members
        if callable(members):
            members = members()
        message = joins(data, *more, sep=sep)
        context = context or {}
        message = self.template.format(msg=message, **context)
        if self.logged:
            log.info(strip_caret_codes(message))
        for session in members:
            session.send(message)
Esempio n. 14
0
 def test_len(self):
     obj = Object()
     obj2 = Object()
     ws = WeakSet([obj])
     self.assertIn(obj, ws)
     self.assertEqual(len(ws), 1)
     ws.add(obj2)
     self.assertEqual(len(ws), 2)
     self.assertIn(obj2, ws)
     del obj
     self.assertEqual(len(ws), 1)
     self.assertIn(obj2, ws)
class Subject(object):
    def __init__(self, name):
        self.name = name
        self._observers = WeakSet()

    def register_observer(self, observer):
        self._observers.add(observer)
        print('observer {0} now listening on {1}'.format(observer.name, self.name))

    def notify_observers(self, msg):
        print('{0} notifying observers about {1}'.format(self.__class__.__name__, msg))
        for observer in self._observers:
            observer.notify(self, msg)
Esempio n. 16
0
class LazyResult(object):
  inited = False
  deps = None  # Stores hard references to upstream dependencies for invalidation purposes

  def __init__(self, watcher = None):
    self.watcher = watcher

  def invalidate(self):
    if not hasattr(self, '_value'):
      return
    if threading.current_thread() != MAIN_THREAD or evaluation_stack:
      invalidation_queue.append(self)
      invalidation_event.set()
      return
    del self._value
    self.deps = None
    try:
      refs = tuple(self._refs)
    except AttributeError:
      return
    self._refs.clear()
    for ref in refs:
      ref.invalidate()

  def set(self, value):
    assert not hasattr(self, '_value')
    self._value = (value, None)

  def get(self, f, *args):
    assert threading.current_thread() == MAIN_THREAD
    if not self.inited:
      invalidation_strategy._watch_object(self)
      self.inited = True
    if evaluation_stack:
      if not hasattr(self, '_refs'):
        self._refs = WeakSet()
      self._refs.add(evaluation_stack[-1])
    try:
      value, e = self._value
    except AttributeError:
      with LazyEvaluationContext(self):
        try:
          value = f(*args)
          self._value = (value, None)
          return value
        except Exception, e:
          self._value = (None, e)
          raise
    if e:
      raise e
    return value
Esempio n. 17
0
    def test_weak_destroy_and_mutate_while_iterating(self):
        # Issue #7105: iterators shouldn't crash when a key is implicitly removed
        items = [SomeClass(c) for c in string.ascii_letters]
        s = WeakSet(items)
        @contextlib.contextmanager
        def testcontext():
            try:
                it = iter(s)
                next(it)
                # Schedule an item for removal and recreate it
                u = SomeClass(str(items.pop()))
                test_support.gc_collect()      # just in case
                yield u
            finally:
                it = None           # should commit all removals

        test_support.gc_collect()

        with testcontext() as u:
            self.assertNotIn(u, s)
        with testcontext() as u:
            self.assertRaises(KeyError, s.remove, u)
        self.assertNotIn(u, s)
        with testcontext() as u:
            s.add(u)
        self.assertIn(u, s)
        t = s.copy()
        with testcontext() as u:
            s.update(t)
        self.assertEqual(len(s), len(t))
        with testcontext() as u:
            s.clear()
        self.assertEqual(len(s), 0)
Esempio n. 18
0
class Environments(object):
    """ A common object for all environments in a request. """
    def __init__(self):
        self.envs = WeakSet()           # weak set of environments
        self.todo = {}                  # recomputations {field: [records]}
        self.mode = False               # flag for draft/onchange

    def add(self, env):
        """ Add the environment `env`. """
        self.envs.add(env)

    def __iter__(self):
        """ Iterate over environments. """
        return iter(self.envs)
Esempio n. 19
0
 def test_intersection(self):
     s = WeakSet(self.letters)
     i = s.intersection(self.items2)
     for c in self.letters:
         self.assertEqual(c in i, c in self.items2 and c in self.letters)
     self.assertEqual(s, WeakSet(self.letters))
     self.assertEqual(type(i), WeakSet)
     for C in set, frozenset, dict.fromkeys, list, tuple:
         x = WeakSet([])
         self.assertEqual(i.intersection(C(self.items)), x)
     self.assertEqual(len(i), len(self.items2))
     self.items2.pop()
     test_support.gc_collect()
     self.assertEqual(len(list(i)), len(list(self.items2)))
Esempio n. 20
0
class Signal(object):
    def __init__(self):
        self._functions = WeakSet()
        self._methods = WeakKeyDictionary()

    def __call__(self, *args, **kargs):
        # Call handler functions
        to_be_removed = []
        for func in self._functions.copy():
            try:
                func(*args, **kargs)
            except RuntimeError:
                Warning.warn('Signals func->RuntimeError: func "{}" will be removed.'.format(func))
                to_be_removed.append(func)

        for remove in to_be_removed:
            self._functions.discard(remove)

        # Call handler methods
        to_be_removed = []
        emitters = self._methods.copy()
        for obj, funcs in emitters.items():
            msg_debug('obj is type "{}"'.format(type(obj)))
            for func in funcs.copy():
                try:
                    func(obj, *args, **kargs)
                except RuntimeError:
                    warnings.warn('Signals methods->RuntimeError, obj.func "{}.{}" will be removed'.format(obj, func))
                    to_be_removed.append((obj, func))

        for obj, func in to_be_removed:
            self._methods[obj].discard(func)

    def connect(self, slot):
        if inspect.ismethod(slot):
            if slot.__self__ not in self._methods:
                self._methods[slot.__self__] = set()

            self._methods[slot.__self__].add(slot.__func__)

        else:
            self._functions.add(slot)

    def disconnect(self, slot):
        if inspect.ismethod(slot):
            if slot.__self__ in self._methods:
                self._methods[slot.__self__].remove(slot.__func__)
        else:
            if slot in self._functions:
                self._functions.remove(slot)

    def clear(self):
        self._functions.clear()
        self._methods.clear()
Esempio n. 21
0
class ObservableEvent(object):
    def __init__(self, weakref=False):
        self.listeners = WeakSet() if weakref else set()

    def __iadd__(self, ob):
        self.listeners.add(ob)
        return self

    def __isub__(self, ob):
        self.listeners.discard(ob)
        return self

    def notify(self, *a, **k):
        for ob in list(self.listeners):
            ob(*a, **k)
Esempio n. 22
0
 def setUp(self):
     # need to keep references to them
     self.items = [ustr(c) for c in ('a', 'b', 'c')]
     self.items2 = [ustr(c) for c in ('x', 'y', 'z')]
     self.ab_items = [ustr(c) for c in 'ab']
     self.abcde_items = [ustr(c) for c in 'abcde']
     self.def_items = [ustr(c) for c in 'def']
     self.ab_weakset = WeakSet(self.ab_items)
     self.abcde_weakset = WeakSet(self.abcde_items)
     self.def_weakset = WeakSet(self.def_items)
     self.letters = [ustr(c) for c in string.ascii_letters]
     self.s = WeakSet(self.items)
     self.d = dict.fromkeys(self.items)
     self.obj = ustr('F')
     self.fs = WeakSet([self.obj])
Esempio n. 23
0
    def __init__(self, parent, isGroup, order):
        """
        Constructs a resource node.

        @param parent: Node|None
            The parent node of this node, can be None if is a root node.
        @param isGroup: boolean
            True if the node represents a group of models, False otherwise.
        @param order: integer
            The order index of the node, this will be used in correctly ordering the children's to have a proper
            order when searching for path matching.
        @ivar root: Node
            The root node.
        @ivar get: Invoker
            The invoker that provides the data elements fetch, populated by assemblers.
        @ivar insert: Invoker
            The invoker that provides the insert of elements, populated by assemblers.
        @ivar update: Invoker
            The invoker that provides the update of elements, populated by assemblers.
        @ivar delete: Invoker
            The invoker that provides the deletion of elements, populated by assemblers.
        """
        assert parent is None or isinstance(parent, Node), "Invalid parent %s, can be None" % parent
        assert isinstance(isGroup, bool), "Invalid is group flag %s" % isGroup
        assert isinstance(order, int), "Invalid order %s" % order
        self.parent = parent
        if parent is None:
            self.root = self
        else:
            self.root = parent.root
        self.isGroup = isGroup
        self.order = order

        self._listeners = WeakSet()
        self._structure = WeakSet()

        self.get = None
        self.insert = None
        self.update = None
        self.delete = None

        self._children = []
        if parent is not None:
            assert isinstance(parent, Node), "Invalid parent node %s" % parent
            assert self not in parent._children, "Already contains child node %s" % self
            parent._children.append(self)
            parent._children.sort(key=lambda node: node.order)
            parent._onChildAdded(self)
Esempio n. 24
0
class Channel:

    """A communication channel."""

    def __init__(self, header="", msg_color="^~", members=None):
        """Create a new channel.

        :param str header: A block of text to prepend to all messages
        :param str msg_color: A color code to use for messages
        :param members: Optional, a list of sessions to prefill members with;
                        if callable, it should return a list of sessions on-
                        demand in place of member tracking
        :returns None:

        """
        self.header = header
        self.msg_color = msg_color
        if callable(members):
            self.members = members
        else:
            self.members = WeakSet()
            if members:
                for session in members:
                    self.members.add(session)

    def send(self, data, *more, sep=" ", members=None):
        """Send a message to a channel.

        `data` and all members of `more` will be converted to strings
        and joined together by `sep` via the joins function.

        :param any data: An initial chunk of data
        :param any more: Optional, any additional data to send
        :param str sep: Optional, a separator to join the resulting output by

        :param members: Optional, a list of sessions to use in place of the
                        channels own list; if callable, it should return a list
                        of sessions to use
        :returns None:

        """
        if not members:
            members = self.members
        if callable(members):
            members = members()
        msg = joins(data, *more, sep=sep)
        for session in members:
            session.send(self.header, " ", self.msg_color, msg, "^~", sep="")
    def __init__(self):
        self._loop_lock = Lock()
        self._started = False
        self._shutdown = False

        self._conns_lock = Lock()
        self._conns = WeakSet()
Esempio n. 26
0
 def __init__(self, connection, poll_gap=0.1, include_idle=False):
     self._connection = connection
     self._handlers = []
     self._handler_instances = WeakSet()
     self._poll_gap = poll_gap
     self._include_idle = include_idle
     self._track_players = {}
Esempio n. 27
0
    def __init__(self, dep_cls, *init_args, **init_kwargs):
        self.dep_cls = dep_cls
        self.args = init_args
        self.kwargs = init_kwargs

        # keep a reference to every created instance
        self.instances = WeakSet()
Esempio n. 28
0
File: broker.py Progetto: p2k/CPL
 def addRangeObserver(self, rangeObserver):
     assert IRangeObserver.providedBy(rangeObserver)
     
     if self.__robservers == None:
         self.__robservers = WeakSet()
     
     self.__robservers.add(rangeObserver)
Esempio n. 29
0
    def __init__(self, rpc, name, target=None):
        self.rpc = rpc
        self.name = '%s.%s' % (__name__, name)

        self.stats = {
            'requests': 0,
            'already_locked': 0,
            'try_failed': 0,
            'acquired': 0,
            'released': 0,
            'timeout': 0,
            'unexpected': 0,
            'failed': 0,
            'failed_timeout': 0,
            'exceptions': 0,
        }

        self.lock = Semaphore()
        self.locks = WeakValueDictionary()
        self.waiting = WeakSet()

        self._get_lock = rpc.add(self._get_lock, '%s.get_lock' % self.name)
        self._get_lock_stream_sync = lambda *args: self._get_lock.stream_sync(target, *args)

        self._is_locked = rpc.add(self._is_locked, '%s.is_locked' % self.name)
        self._is_locked_execute = lambda *args: self.is_locked.execute(target, *args)
Esempio n. 30
0
    def exit(self, exc: Optional[Exception] = None):
        old_task = self._task
        try:
            if self.exited:
                return
            self._task = None
            if exc:
                logger.debug("%s is exiting because %s", self, exc)
            else:
                logger.debug("%s is exiting normally", self)
            for link in self._links:
                link.exit(exc)

            for monitor in self._monitors:
                method = getattr(monitor, 'on_monitored_exit', None)
                if method:
                    method(self, exc)
                else:
                    logger.warning("%s was monitoring an agent %s, but doesn't implemented on_monitored_exit",
                                   monitor, self)
        finally:
            if old_task is not None:
                old_task.cancel()
                self._links = WeakSet()
                self._monitors = WeakSet()
Esempio n. 31
0
 def __init__(self):
     self.envs = WeakSet()  # weak set of environments
     self.cache = Cache()  # cache for all records
     self.todo = {}  # recomputations {field: [records]}
     self.mode = False  # flag for draft/onchange
     self.recompute = True
Esempio n. 32
0
class _state(param.Parameterized):
    """
    Holds global state associated with running apps, allowing running
    apps to indicate their state to a user.
    """

    base_url = param.String(default='/', readonly=True, doc="""
       Base URL for all server paths.""")

    busy = param.Boolean(default=False, readonly=True, doc="""
       Whether the application is currently busy processing a user
       callback.""")

    cache = param.Dict(default={}, doc="""
       Global location you can use to cache large datasets or expensive computation results
       across multiple client sessions for a given server.""")

    encryption = param.Parameter(default=None, doc="""
       Object with encrypt and decrypt methods to support encryption
       of secret variables including OAuth information.""")

    session_info = param.Dict(default={'total': 0, 'live': 0,
                                       'sessions': OrderedDict()}, doc="""
       Tracks information and statistics about user sessions.""")

    webdriver = param.Parameter(default=None, doc="""
      Selenium webdriver used to export bokeh models to pngs.""")

    _curdoc = param.ClassSelector(class_=Document, doc="""
        The bokeh Document for which a server event is currently being
        processed.""")

    # Whether to hold comm events
    _hold = False

    # Used to ensure that events are not scheduled from the wrong thread
    _thread_id = None

    _comm_manager = _CommManager

    # Locations
    _location = None # Global location, e.g. for notebook context
    _locations = WeakKeyDictionary() # Server locations indexed by document

    # An index of all currently active views
    _views = {}

    # For templates to keep reference to their main root
    _fake_roots = []

    # An index of all currently active servers
    _servers = {}

    # Jupyter display handles
    _handles = {}

    # Dictionary of callbacks to be triggered on app load
    _onload = WeakKeyDictionary()

    # Stores a set of locked Websockets, reset after every change event
    _locks = WeakSet()

    # Indicators listening to the busy state
    _indicators = []

    # Endpoints
    _rest_endpoints = {}

    def __repr__(self):
        server_info = []
        for server, panel, docs in self._servers.values():
            server_info.append(
                "{}:{:d} - {!r}".format(server.address or "localhost", server.port, panel)
            )
        if not server_info:
            return "state(servers=[])"
        return "state(servers=[\n  {}\n])".format(",\n  ".join(server_info))

    def _unblocked(self, doc):
        thread = threading.current_thread()
        thread_id = thread.ident if thread else None
        return doc is self.curdoc and self._thread_id == thread_id

    @param.depends('busy', watch=True)
    def _update_busy(self):
        for indicator in self._indicators:
            indicator.value = self.busy

    def _init_session(self, event):
        if not self.curdoc.session_context:
            return
        session_id = self.curdoc.session_context.id
        self.session_info['live'] += 1
        session_info = self.session_info['sessions'][session_id]
        session_info.update({
            'rendered': dt.datetime.now().timestamp(),
        })

    def _get_callback(self, endpoint):
        _updating = {}
        def link(*events):
            event = events[0]
            obj = event.cls if event.obj is None else event.obj
            parameterizeds = self._rest_endpoints[endpoint][0]
            if obj not in parameterizeds:
                return
            updating = _updating.get(id(obj), [])
            values = {event.name: event.new for event in events
                      if event.name not in updating}
            if not values:
                return
            _updating[id(obj)] = list(values)
            for parameterized in parameterizeds:
                if parameterized in _updating:
                    continue
                try:
                    parameterized.param.set_param(**values)
                except Exception:
                    raise
                finally:
                    if id(obj) in _updating:
                        not_updated = [p for p in _updating[id(obj)] if p not in values]
                        _updating[id(obj)] = not_updated
        return link

    def _on_load(self, event):
        for cb in self._onload.pop(self.curdoc, []):
            cb()

    #----------------------------------------------------------------
    # Public Methods
    #----------------------------------------------------------------

    def as_cached(self, key, fn, **kwargs):
        """
        Caches the return value of a function, memoizing on the given
        key and supplied keyword arguments.

        Note: Keyword arguments must be hashable.

        Arguments
        ---------
        key: (str)
          The key to cache the return value under.
        fn: (callable)
          The function or callable whose return value will be cached.
        **kwargs: dict
          Additional keyword arguments to supply to the function,
          which will be memoized over as well.

        Returns
        -------
        Returns the value returned by the cache or the value in
        the cache.
        """
        key = (key,)+tuple((k, v) for k, v in sorted(kwargs.items()))
        if key in self.cache:
            ret = self.cache.get(key)
        else:
            ret = self.cache[key] = fn(**kwargs)
        return ret

    def add_periodic_callback(self, callback, period=500, count=None,
                              timeout=None, start=True):
        """
        Schedules a periodic callback to be run at an interval set by
        the period. Returns a PeriodicCallback object with the option
        to stop and start the callback.

        Arguments
        ---------
        callback: callable
          Callable function to be executed at periodic interval.
        period: int
          Interval in milliseconds at which callback will be executed.
        count: int
          Maximum number of times callback will be invoked.
        timeout: int
          Timeout in seconds when the callback should be stopped.
        start: boolean (default=True)
          Whether to start callback immediately.

        Returns
        -------
        Return a PeriodicCallback object with start and stop methods.
        """
        from .callbacks import PeriodicCallback

        cb = PeriodicCallback(callback=callback, period=period,
                              count=count, timeout=timeout)
        if start:
            cb.start()
        return cb

    def kill_all_servers(self):
        """Stop all servers and clear them from the current state."""
        for server_id in self._servers:
            try:
                self._servers[server_id][0].stop()
            except AssertionError:  # can't stop a server twice
                pass
        self._servers = {}

    def onload(self, callback):
        """
        Callback that is triggered when a session has been served.
        """
        if self.curdoc is None:
            callback()
            return
        if self.curdoc not in self._onload:
            self._onload[self.curdoc] = []
            self.curdoc.on_event('document_ready', self._on_load)
        self._onload[self.curdoc].append(callback)

    def publish(self, endpoint, parameterized, parameters=None):
        """
        Publish parameters on a Parameterized object as a REST API.

        Arguments
        ---------
        endpoint: str
          The endpoint at which to serve the REST API.
        parameterized: param.Parameterized
          The Parameterized object to publish parameters from.
        parameters: list(str) or None
          A subset of parameters on the Parameterized to publish.
        """
        if parameters is None:
            parameters = list(parameterized.param)
        if endpoint.startswith('/'):
            endpoint = endpoint[1:]
        if endpoint in self._rest_endpoints:
            parameterizeds, old_parameters, cb = self._rest_endpoints[endpoint]
            if set(parameters) != set(old_parameters):
                raise ValueError("Param REST API output parameters must match across sessions.")
            values = {k: v for k, v in parameterizeds[0].param.get_param_values() if k in parameters}
            parameterized.param.set_param(**values)
            parameterizeds.append(parameterized)
        else:
            cb = self._get_callback(endpoint)
            self._rest_endpoints[endpoint] = ([parameterized], parameters, cb)
        parameterized.param.watch(cb, parameters)

    def sync_busy(self, indicator):
        """
        Syncs the busy state with an indicator with a boolean value
        parameter.

        Arguments
        ---------
        indicator: An BooleanIndicator to sync with the busy property
        """
        if not isinstance(indicator.param.value, param.Boolean):
            raise ValueError("Busy indicator must have a value parameter"
                             "of Boolean type.")
        self._indicators.append(indicator)

    #----------------------------------------------------------------
    # Public Properties
    #----------------------------------------------------------------

    @property
    def access_token(self):
        from ..config import config
        access_token = self.cookies.get('access_token')
        if access_token is None:
            return None
        access_token = decode_signed_value(config.cookie_secret, 'access_token', access_token)
        if self.encryption is None:
            return access_token.decode('utf-8')
        return self.encryption.decrypt(access_token).decode('utf-8')

    @property
    def app_url(self):
        if not self.curdoc:
            return
        app_url = self.curdoc.session_context.server_context.application_context.url
        app_url = app_url[1:] if app_url.startswith('/') else app_url
        return urljoin(self.base_url, app_url)

    @property
    def curdoc(self):
        if self._curdoc:
            return self._curdoc
        elif _curdoc().session_context:
            return _curdoc()

    @curdoc.setter
    def curdoc(self, doc):
        self._curdoc = doc

    @property
    def cookies(self):
        return self.curdoc.session_context.request.cookies if self.curdoc else {}

    @property
    def headers(self):
        return self.curdoc.session_context.request.headers if self.curdoc else {}

    @property
    def location(self):
        if self.curdoc and self.curdoc not in self._locations:
            from .location import Location
            self._locations[self.curdoc] = loc = Location()
            return loc
        elif self.curdoc is None:
            return self._location
        else:
            return self._locations.get(self.curdoc) if self.curdoc else None

    @property
    def session_args(self):
        return self.curdoc.session_context.request.arguments if self.curdoc else {}

    @property
    def user(self):
        from ..config import config
        user = self.cookies.get('user')
        if user is None or config.cookie_secret is None:
            return None
        return decode_signed_value(config.cookie_secret, 'user', user).decode('utf-8')

    @property
    def user_info(self):
        from ..config import config
        id_token = self.cookies.get('id_token')
        if id_token is None or config.cookie_secret is None:
            return None
        id_token = decode_signed_value(config.cookie_secret, 'id_token', id_token)
        if self.encryption is None:
            id_token = id_token
        else:
            id_token = self.encryption.decrypt(id_token)
        if b"." in id_token:
            signing_input, _ = id_token.rsplit(b".", 1)
            _, payload_segment = signing_input.split(b".", 1)
        else:
            payload_segment = id_token
        return json.loads(base64url_decode(payload_segment).decode('utf-8'))
Esempio n. 33
0
class ProcessPool(ProcessPoolExecutor, NewExecutorPoolMixin):
    """Simple ProcessPool covered ProcessPoolExecutor.
    ::

        from torequests.main import ProcessPool
        import time

        pool = ProcessPool()


        def use_submit(i):
            time.sleep(i)
            result = 'use_submit: %s' % i
            print(result)
            return result


        def main():
            tasks = [pool.submit(use_submit, i) for i in (2, 1, 0)]
            # pool.x can be ignore
            pool.x
            results = [i.x for i in tasks]
            print(results)


        if __name__ == '__main__':
            main()

        # ['use_submit: 2', 'use_submit: 1', 'use_submit: 0']
        # use_submit: 0
        # use_submit: 1
        # use_submit: 2
    """
    def __init__(self,
                 n=None,
                 timeout=None,
                 default_callback=None,
                 catch_exception=True,
                 *args,
                 **kwargs):
        n = n or kwargs.pop("max_workers", None)
        if PY2 and n is None:
            # python2 n!=None
            n = self._get_cpu_count() or 1
        super(ProcessPool, self).__init__(n, *args, **kwargs)
        self._timeout = timeout
        self.default_callback = default_callback
        self._all_futures = WeakSet()
        self.catch_exception = catch_exception

    def submit(self, func, *args, **kwargs):
        """Submit a function to the pool, `self.submit(function,arg1,arg2,arg3=3)`"""

        with self._shutdown_lock:
            if PY3 and self._broken:
                raise BrokenProcessPool(
                    "A child process terminated "
                    "abruptly, the process pool is not usable anymore")
            if self._shutdown_thread:
                raise RuntimeError(
                    "cannot schedule new futures after shutdown")
            callback = kwargs.pop("callback", self.default_callback)
            future = NewFuture(
                self._timeout,
                args,
                kwargs,
                callback=callback,
                catch_exception=self.catch_exception,
            )
            w = _WorkItem(future, func, args, kwargs)
            self._pending_work_items[self._queue_count] = w
            self._work_ids.put(self._queue_count)
            self._queue_count += 1
            self._result_queue.put(None)
            self._start_queue_management_thread()
            if PY2:
                self._adjust_process_count()
            self._all_futures.add(future)
            return future

    def async_func(self, *args):
        """Decorator mode not support for ProcessPool for _pickle.PicklingError."""
        raise NotImplementedError
Esempio n. 34
0
class Consumer(Service, ConsumerT):
    """Base Consumer."""

    app: AppT

    logger = logger

    #: Tuple of exception types that may be raised when the
    #: underlying consumer driver is stopped.
    consumer_stopped_errors: ClassVar[Tuple[Type[BaseException], ...]] = ()

    # Mapping of TP to list of gap in offsets.
    _gap: MutableMapping[TP, List[int]]

    # Mapping of TP to list of acked offsets.
    _acked: MutableMapping[TP, List[int]]

    #: Fast lookup to see if tp+offset was acked.
    _acked_index: MutableMapping[TP, Set[int]]

    #: Keeps track of the currently read offset in each TP
    _read_offset: MutableMapping[TP, Optional[int]]

    #: Keeps track of the currently committed offset in each TP.
    _committed_offset: MutableMapping[TP, Optional[int]]

    #: The consumer.wait_empty() method will set this to be notified
    #: when something acks a message.
    _waiting_for_ack: Optional[asyncio.Future] = None

    #: Used by .commit to ensure only one thread is comitting at a time.
    #: Other thread starting to commit while a commit is already active,
    #: will wait for the original request to finish, and do nothing.
    _commit_fut: Optional[asyncio.Future] = None

    #: Set of unacked messages: that is messages that we started processing
    #: and that we MUST attempt to complete processing of, before
    #: shutting down or resuming a rebalance.
    _unacked_messages: MutableSet[Message]

    #: Time of when the consumer was started.
    _time_start: float

    # How often to poll and track log end offsets.
    _end_offset_monitor_interval: float

    _commit_every: Optional[int]
    _n_acked: int = 0

    _active_partitions: Optional[Set[TP]]
    _paused_partitions: Set[TP]
    _buffered_partitions: Set[TP]

    flow_active: bool = True
    can_resume_flow: Event

    def __init__(
        self,
        transport: TransportT,
        callback: ConsumerCallback,
        on_partitions_revoked: PartitionsRevokedCallback,
        on_partitions_assigned: PartitionsAssignedCallback,
        *,
        commit_interval: float = None,
        commit_livelock_soft_timeout: float = None,
        loop: asyncio.AbstractEventLoop = None,
        **kwargs: Any,
    ) -> None:
        assert callback is not None
        self.transport = transport
        self.app = self.transport.app
        self.in_transaction = self.app.in_transaction
        self.callback = callback
        self._on_message_in = self.app.sensors.on_message_in
        self._on_partitions_revoked = on_partitions_revoked
        self._on_partitions_assigned = on_partitions_assigned
        self._commit_every = self.app.conf.broker_commit_every
        self.scheduler = self.app.conf.ConsumerScheduler()  # type: ignore
        self.commit_interval = commit_interval or self.app.conf.broker_commit_interval
        self.commit_livelock_soft_timeout = (
            commit_livelock_soft_timeout
            or self.app.conf.broker_commit_livelock_soft_timeout
        )
        self._gap = defaultdict(list)
        self._acked = defaultdict(list)
        self._acked_index = defaultdict(set)
        self._read_offset = defaultdict(lambda: None)
        self._committed_offset = defaultdict(lambda: None)
        self._unacked_messages = WeakSet()
        self._buffered_partitions = set()
        self._waiting_for_ack = None
        self._time_start = monotonic()
        self._end_offset_monitor_interval = self.commit_interval * 2
        self.randomly_assigned_topics = set()
        self.can_resume_flow = Event()
        self._reset_state()
        super().__init__(loop=loop or self.transport.loop, **kwargs)
        self.transactions = self.transport.create_transaction_manager(
            consumer=self,
            producer=self.app.producer,
            beacon=self.beacon,
            loop=self.loop,
        )

    def on_init_dependencies(self) -> Iterable[ServiceT]:
        """Return list of services this consumer depends on."""
        # We start the TransactionManager only if
        # processing_guarantee='exactly_once'
        if self.in_transaction:
            return [self.transactions]
        return []

    def _reset_state(self) -> None:
        self._active_partitions = None
        self._paused_partitions = set()
        self._buffered_partitions = set()
        self.can_resume_flow.clear()
        self.flow_active = True
        self._time_start = monotonic()

    async def on_restart(self) -> None:
        """Call when the consumer is restarted."""
        self._reset_state()
        self.on_init()

    def _get_active_partitions(self) -> Set[TP]:
        tps = self._active_partitions
        if tps is None:
            return self._set_active_tps(self.assignment())
        assert all(isinstance(x, TP) for x in tps)
        return tps

    def _set_active_tps(self, tps: Set[TP]) -> Set[TP]:
        xtps = self._active_partitions = ensure_TPset(tps)  # copy
        xtps.difference_update(self._paused_partitions)
        return xtps

    def on_buffer_full(self, tp: TP) -> None:
        active_partitions = self._get_active_partitions()
        active_partitions.discard(tp)
        self._buffered_partitions.add(tp)

    def on_buffer_drop(self, tp: TP) -> None:
        buffered_partitions = self._buffered_partitions
        if tp in buffered_partitions:
            active_partitions = self._get_active_partitions()
            active_partitions.add(tp)
            buffered_partitions.discard(tp)

    @abc.abstractmethod
    async def _commit(self, offsets: Mapping[TP, int]) -> bool:  # pragma: no cover
        ...

    async def perform_seek(self) -> None:
        """Seek all partitions to their current committed position."""
        read_offset = self._read_offset
        _committed_offsets = await self.seek_to_committed()
        read_offset.update(
            {
                tp: offset if offset is not None and offset >= 0 else None
                for tp, offset in _committed_offsets.items()
            }
        )
        committed_offsets = {
            ensure_TP(tp): offset if offset else None
            for tp, offset in _committed_offsets.items()
            if offset is not None
        }
        self._committed_offset.update(committed_offsets)

    @abc.abstractmethod
    async def seek_to_committed(self) -> Mapping[TP, int]:
        """Seek all partitions to their committed offsets."""
        ...

    async def seek(self, partition: TP, offset: int) -> None:
        """Seek partition to specific offset."""
        self.log.dev("SEEK %r -> %r", partition, offset)
        # reset livelock detection
        await self._seek(partition, offset)
        # set new read offset so we will reread messages
        self._read_offset[ensure_TP(partition)] = offset if offset else None

    @abc.abstractmethod
    async def _seek(self, partition: TP, offset: int) -> None:
        ...

    def stop_flow(self) -> None:
        """Block consumer from processing any more messages."""
        self.flow_active = False
        self.can_resume_flow.clear()

    def resume_flow(self) -> None:
        """Allow consumer to process messages."""
        self.flow_active = True
        self.can_resume_flow.set()

    def pause_partitions(self, tps: Iterable[TP]) -> None:
        """Pause fetching from partitions."""
        tpset = ensure_TPset(tps)
        self._get_active_partitions().difference_update(tpset)
        self._paused_partitions.update(tpset)

    def resume_partitions(self, tps: Iterable[TP]) -> None:
        """Resume fetching from partitions."""
        tpset = ensure_TPset(tps)
        self._get_active_partitions().update(tps)
        self._paused_partitions.difference_update(tpset)

    @abc.abstractmethod
    def _new_topicpartition(self, topic: str, partition: int) -> TP:  # pragma: no cover
        ...

    def _is_changelog_tp(self, tp: TP) -> bool:
        return tp.topic in self.app.tables.changelog_topics

    @Service.transitions_to(CONSUMER_PARTITIONS_REVOKED)
    async def on_partitions_revoked(self, revoked: Set[TP]) -> None:
        """Call during rebalancing when partitions are being revoked."""
        # NOTE:
        # The ConsumerRebalanceListener is responsible for calling
        # app.on_rebalance_start(), and this must have happened
        # before we get to this point (see aiokafka implementation).
        span = self.app._start_span_from_rebalancing("on_partitions_revoked")
        T = traced_from_parent_span(span)
        with span:
            # see comment in on_partitions_assigned
            # remove revoked partitions from active + paused tps.
            if self._active_partitions is not None:
                self._active_partitions.difference_update(revoked)
            self._paused_partitions.difference_update(revoked)
            await T(self._on_partitions_revoked, partitions=revoked)(revoked)

    @Service.transitions_to(CONSUMER_PARTITIONS_ASSIGNED)
    async def on_partitions_assigned(self, assigned: Set[TP]) -> None:
        """Call during rebalancing when partitions are being assigned."""
        span = self.app._start_span_from_rebalancing("on_partitions_assigned")
        T = traced_from_parent_span(span)
        with span:
            # remove recently revoked tps from set of paused tps.
            self._paused_partitions.intersection_update(assigned)
            # cache set of assigned partitions
            self._set_active_tps(assigned)
            # start callback chain of assigned callbacks.
            #   need to copy set at this point, since we cannot have
            #   the callbacks mutate our active list.
            await T(self._on_partitions_assigned, partitions=assigned)(assigned)
        self.app.on_rebalance_return()

    @abc.abstractmethod
    async def _getmany(
        self, active_partitions: Optional[Set[TP]], timeout: float
    ) -> RecordMap:
        ...

    async def getmany(self, timeout: float) -> AsyncIterator[Tuple[TP, Message]]:
        """Fetch batch of messages from server."""
        # records' contain mapping from TP to list of messages.
        # if there are two agents, consuming from topics t1 and t2,
        # normal order of iteration would be to process each
        # tp in the dict:
        #    for tp. messages in records.items():
        #        for message in messages:
        #           yield tp, message
        #
        # The problem with this, is if we have prefetched 16k records
        # for one partition, the other partitions won't even start processing
        # before those 16k records are completed.
        #
        # So we try round-robin between the tps instead:
        #
        #    iterators: Dict[TP, Iterator] = {
        #        tp: iter(messages)
        #        for tp, messages in records.items()
        #    }
        #    while iterators:
        #        for tp, messages in iterators.items():
        #            yield tp, next(messages)
        #            # remove from iterators if empty.
        #
        # The problem with this implementation is that
        # the records mapping is ordered by TP, so records.keys()
        # will look like this:
        #
        #  TP(topic='bar', partition=0)
        #  TP(topic='bar', partition=1)
        #  TP(topic='bar', partition=2)
        #  TP(topic='bar', partition=3)
        #  TP(topic='foo', partition=0)
        #  TP(topic='foo', partition=1)
        #  TP(topic='foo', partition=2)
        #  TP(topic='foo', partition=3)
        #
        # If there are 100 partitions for each topic,
        # it will process 100 items in the first topic, then 100 items
        # in the other topic, but even worse if partition counts
        # vary greatly, t1 has 1000 partitions and t2
        # has 1 partition, then t2 will end up being starved most of the time.
        #
        # We solve this by going round-robin through each topic.
        records, active_partitions = await self._wait_next_records(timeout)
        if records is None or self.should_stop:
            return

        records_it = self.scheduler.iterate(records)
        to_message = self._to_message  # localize
        if self.flow_active:
            for tp, record in records_it:
                if not self.flow_active:
                    break
                if active_partitions is None or tp in active_partitions:
                    highwater_mark = self.highwater(tp)
                    self.app.monitor.track_tp_end_offset(tp, highwater_mark)
                    # convert timestamp to seconds from int milliseconds.
                    yield tp, to_message(tp, record)

    async def _wait_next_records(
        self, timeout: float
    ) -> Tuple[Optional[RecordMap], Optional[Set[TP]]]:
        if not self.flow_active:
            await self.wait(self.can_resume_flow)
        # Implementation for the Fetcher service.

        is_client_only = self.app.client_only

        active_partitions: Optional[Set[TP]]
        if is_client_only:
            active_partitions = None
        else:
            active_partitions = self._get_active_partitions()

        records: RecordMap = {}
        if is_client_only or active_partitions:
            # Fetch records only if active partitions to avoid the risk of
            # fetching all partitions in the beginning when none of the
            # partitions is paused/resumed.
            records = await self._getmany(
                active_partitions=active_partitions,
                timeout=timeout,
            )
        else:
            # We should still release to the event loop
            await self.sleep(1)
        return records, active_partitions

    @abc.abstractmethod
    def _to_message(self, tp: TP, record: Any) -> ConsumerMessage:
        ...

    def track_message(self, message: Message) -> None:
        """Track message and mark it as pending ack."""
        # add to set of pending messages that must be acked for graceful
        # shutdown.  This is called by transport.Conductor,
        # before delivering messages to streams.
        self._unacked_messages.add(message)
        # call sensors
        self._on_message_in(message.tp, message.offset, message)

    def ack(self, message: Message) -> bool:
        """Mark message as being acknowledged by stream."""
        if not message.acked:
            message.acked = True
            tp = message.tp
            offset = message.offset
            if self.app.topics.acks_enabled_for(message.topic):
                committed = self._committed_offset[tp]
                try:
                    if committed is None or offset > committed:
                        acked_index = self._acked_index[tp]
                        if offset not in acked_index:
                            self._unacked_messages.discard(message)
                            acked_index.add(offset)
                            acked_for_tp = self._acked[tp]
                            acked_for_tp.append(offset)
                            self._n_acked += 1
                            return True
                finally:
                    notify(self._waiting_for_ack)
        return False

    async def _wait_for_ack(self, timeout: float) -> None:
        # arm future so that `ack()` can wake us up
        self._waiting_for_ack = asyncio.Future(loop=self.loop)
        try:
            # wait for `ack()` to wake us up
            await asyncio.wait_for(self._waiting_for_ack, loop=self.loop, timeout=1)
        except (asyncio.TimeoutError, asyncio.CancelledError):  # pragma: no cover
            pass
        finally:
            self._waiting_for_ack = None

    @Service.transitions_to(CONSUMER_WAIT_EMPTY)
    async def wait_empty(self) -> None:
        """Wait for all messages that started processing to be acked."""
        wait_count = 0
        T = traced_from_parent_span()
        while not self.should_stop and self._unacked_messages:
            wait_count += 1
            if not wait_count % 10:  # pragma: no cover
                remaining = [(m.refcount, m) for m in self._unacked_messages]
                self.log.warning("wait_empty: Waiting for tasks %r", remaining)
                self.log.info(
                    "Agent tracebacks:\n%s",
                    self.app.agents.human_tracebacks(),
                )
            self.log.dev("STILL WAITING FOR ALL STREAMS TO FINISH")
            self.log.dev("WAITING FOR %r EVENTS", len(self._unacked_messages))
            gc.collect()
            await T(self.commit)()
            if not self._unacked_messages:
                break
            await T(self._wait_for_ack)(timeout=1)
            self._clean_unacked_messages()

        self.log.dev("COMMITTING AGAIN AFTER STREAMS DONE")
        await T(self.commit_and_end_transactions)()

    def _clean_unacked_messages(self) -> None:
        # remove actually acked messages from weakset.
        self._unacked_messages -= {
            message for message in self._unacked_messages if message.acked
        }

    async def commit_and_end_transactions(self) -> None:
        """Commit all safe offsets and end transaction."""
        await self.commit(start_new_transaction=False)

    async def on_stop(self) -> None:
        """Call when consumer is stopping."""
        if self.app.conf.stream_wait_empty:
            await self.wait_empty()
        else:
            await self.commit_and_end_transactions()

    @Service.task
    async def _commit_handler(self) -> None:
        interval = self.commit_interval

        await self.sleep(interval)
        async for sleep_time in self.itertimer(interval, name="commit"):
            await self.commit()

    @Service.task
    async def _commit_livelock_detector(self) -> None:  # pragma: no cover
        interval: float = self.commit_interval * 2.5
        await self.sleep(interval)
        async for sleep_time in self.itertimer(interval, name="livelock"):
            if not self.app.rebalancing:
                await self.verify_all_partitions_active()

    async def verify_all_partitions_active(self) -> None:
        now = monotonic()
        for tp in self.assignment():
            await self.sleep(0)
            if not self.should_stop:
                self.verify_event_path(now, tp)

    def verify_event_path(self, now: float, tp: TP) -> None:
        ...

    def verify_recovery_event_path(self, now: float, tp: TP) -> None:
        ...

    async def commit(
        self, topics: TPorTopicSet = None, start_new_transaction: bool = True
    ) -> bool:
        """Maybe commit the offset for all or specific topics.

        Arguments:
            topics: Set containing topics and/or TopicPartitions to commit.
        """
        if self.app.client_only:
            # client only cannot commit as consumer does not have group_id
            return False
        if await self.maybe_wait_for_commit_to_finish():
            # original commit finished, return False as we did not commit
            return False

        self._commit_fut = asyncio.Future(loop=self.loop)
        try:
            return await self.force_commit(
                topics,
                start_new_transaction=start_new_transaction,
            )
        finally:
            # set commit_fut to None so that next call will commit.
            fut, self._commit_fut = self._commit_fut, None
            # notify followers that the commit is done.
            notify(fut)

    async def maybe_wait_for_commit_to_finish(self) -> bool:
        """Wait for any existing commit operation to finish."""
        # Only one coroutine allowed to commit at a time,
        # and other coroutines should wait for the original commit to finish
        # then do nothing.
        if self._commit_fut is not None:
            # something is already committing so wait for that future.
            try:
                await self._commit_fut
            except asyncio.CancelledError:
                # if future is cancelled we have to start new commit
                pass
            else:
                return True
        return False

    @Service.transitions_to(CONSUMER_COMMITTING)
    async def force_commit(
        self, topics: TPorTopicSet = None, start_new_transaction: bool = True
    ) -> bool:
        """Force offset commit."""
        sensor_state = self.app.sensors.on_commit_initiated(self)

        # Go over the ack list in each topic/partition
        commit_tps = list(self._filter_tps_with_pending_acks(topics))
        did_commit = await self._commit_tps(
            commit_tps, start_new_transaction=start_new_transaction
        )

        self.app.sensors.on_commit_completed(self, sensor_state)
        return did_commit

    async def _commit_tps(self, tps: Iterable[TP], start_new_transaction: bool) -> bool:
        commit_offsets = self._filter_committable_offsets(tps)
        if commit_offsets:
            try:
                # send all messages attached to the new offset
                await self._handle_attached(commit_offsets)
            except ProducerSendError as exc:
                await self.crash(exc)
            else:
                return await self._commit_offsets(
                    commit_offsets, start_new_transaction=start_new_transaction
                )
        return False

    def _filter_committable_offsets(self, tps: Iterable[TP]) -> Dict[TP, int]:
        commit_offsets = {}
        for tp in tps:
            # Find the latest offset we can commit in this tp
            offset = self._new_offset(tp)
            # check if we can commit to this offset
            if offset is not None and self._should_commit(tp, offset):
                commit_offsets[tp] = offset
        return commit_offsets

    async def _handle_attached(self, commit_offsets: Mapping[TP, int]) -> None:
        for tp, offset in commit_offsets.items():
            app = cast(_App, self.app)
            attachments = app._attachments
            producer = app.producer
            # Start publishing the messages and return a list of pending
            # futures.
            pending = await attachments.publish_for_tp_offset(tp, offset)
            # then we wait for either
            #  1) all the attached messages to be published, or
            #  2) the producer crashing
            #
            # If the producer crashes we will not be able to send any messages
            # and it only crashes when there's an irrecoverable error.
            #
            # If we cannot commit it means the events will be processed again,
            # so conforms to at-least-once semantics.
            if pending:
                await cast(Service, producer).wait_many(pending)

    async def _commit_offsets(
        self, offsets: Mapping[TP, int], start_new_transaction: bool = True
    ) -> bool:
        table = terminal.logtable(
            [(str(tp), str(offset)) for tp, offset in offsets.items()],
            title="Commit Offsets",
            headers=["TP", "Offset"],
        )
        self.log.dev("COMMITTING OFFSETS:\n%s", table)
        assignment = self.assignment()
        committable_offsets: Dict[TP, int] = {}
        revoked: Dict[TP, int] = {}
        for tp, offset in offsets.items():
            if tp in assignment:
                committable_offsets[tp] = offset
            else:
                revoked[tp] = offset
        if revoked:
            self.log.info(
                "Discarded commit for revoked partitions that "
                "will be eventually processed again: %r",
                revoked,
            )
        if not committable_offsets:
            return False
        with flight_recorder(self.log, timeout=300.0) as on_timeout:
            did_commit = False
            on_timeout.info("+consumer.commit()")
            if self.in_transaction:
                did_commit = await self.transactions.commit(
                    committable_offsets,
                    start_new_transaction=start_new_transaction,
                )
            else:
                did_commit = await self._commit(committable_offsets)
            on_timeout.info("-consumer.commit()")
            if did_commit:
                on_timeout.info("+tables.on_commit")
                self.app.tables.on_commit(committable_offsets)
                on_timeout.info("-tables.on_commit")
        self._committed_offset.update(committable_offsets)
        self.app.monitor.on_tp_commit(committable_offsets)
        return did_commit

    def _filter_tps_with_pending_acks(
        self, topics: TPorTopicSet = None
    ) -> Iterator[TP]:
        return (
            tp
            for tp in self._acked
            if topics is None or tp in topics or tp.topic in topics
        )

    def _should_commit(self, tp: TP, offset: int) -> bool:
        committed = self._committed_offset[tp]
        return committed is None or bool(offset) and offset > committed

    def _new_offset(self, tp: TP) -> Optional[int]:
        # get the new offset for this tp, by going through
        # its list of acked messages.
        acked = self._acked[tp]

        # We iterate over it until we find a gap
        # then return the offset before that.
        # For example if acked[tp] is:
        #   1 2 3 4 5 6 7 8 9
        # the return value will be: 9
        # If acked[tp] is:
        #  34 35 36 40 41 42 43 44
        #          ^--- gap
        # the return value will be: 36
        if acked:
            max_offset = max(acked)
            gap_for_tp = self._gap[tp]
            if gap_for_tp:
                gap_index = next(
                    (i for i, x in enumerate(gap_for_tp) if x > max_offset),
                    len(gap_for_tp),
                )
                gaps = gap_for_tp[:gap_index]
                acked.extend(gaps)
                gap_for_tp[:gap_index] = []
            acked.sort()
            # Note: acked is always kept sorted.
            # find first list of consecutive numbers
            batch = next(consecutive_numbers(acked))
            # remove them from the list to clean up.
            acked[: len(batch) - 1] = []
            self._acked_index[tp].difference_update(batch)
            # return the highest commit offset
            return batch[-1]
        return None

    async def on_task_error(self, exc: BaseException) -> None:
        """Call when processing a message failed."""
        await self.commit()

    def _add_gap(self, tp: TP, offset_from: int, offset_to: int) -> None:
        committed = self._committed_offset[tp]
        gap_for_tp = self._gap[tp]
        for offset in range(offset_from, offset_to):
            if committed is None or offset > committed:
                gap_for_tp.append(offset)

    async def _drain_messages(self, fetcher: ServiceT) -> None:  # pragma: no cover
        # This is the background thread started by Fetcher, used to
        # constantly read messages using Consumer.getmany.
        # It takes Fetcher as argument, because we must be able to
        # stop it using `await Fetcher.stop()`.
        callback = self.callback
        getmany = self.getmany
        consumer_should_stop = cast(Service, self)._stopped.is_set
        fetcher_should_stop = cast(Service, fetcher)._stopped.is_set

        get_read_offset = self._read_offset.__getitem__
        set_read_offset = self._read_offset.__setitem__
        flag_consumer_fetching = CONSUMER_FETCHING
        set_flag = self.diag.set_flag
        unset_flag = self.diag.unset_flag
        commit_every = self._commit_every
        acks_enabled_for = self.app.topics.acks_enabled_for

        yield_every = 100
        num_since_yield = 0
        sleep = asyncio.sleep

        try:
            while not (consumer_should_stop() or fetcher_should_stop()):
                set_flag(flag_consumer_fetching)
                ait = cast(AsyncIterator, getmany(timeout=1.0))

                # Sleeping because sometimes getmany is called in a loop
                # never releasing to the event loop
                await self.sleep(0)
                if not self.should_stop:
                    async for tp, message in ait:
                        num_since_yield += 1
                        if num_since_yield > yield_every:
                            await sleep(0)
                            num_since_yield = 0

                        offset = message.offset
                        r_offset = get_read_offset(tp)
                        if r_offset is None or offset > r_offset:
                            gap = offset - (r_offset or 0)
                            # We have a gap in income messages
                            if gap > 1 and r_offset:
                                acks_enabled = acks_enabled_for(message.topic)
                                if acks_enabled:
                                    self._add_gap(tp, r_offset + 1, offset)
                            if commit_every is not None:
                                if self._n_acked >= commit_every:
                                    self._n_acked = 0
                                    await self.commit()
                            await callback(message)
                            set_read_offset(tp, offset)
                        else:
                            self.log.dev(
                                "DROPPED MESSAGE ROFF %r: k=%r v=%r",
                                offset,
                                message.key,
                                message.value,
                            )
                    unset_flag(flag_consumer_fetching)

        except self.consumer_stopped_errors:
            if self.transport.app.should_stop:
                # we're already stopping so ignore
                self.log.info("Broker stopped consumer, shutting down...")
                return
            raise
        except asyncio.CancelledError:
            if self.transport.app.should_stop:
                # we're already stopping so ignore
                self.log.info("Consumer shutting down for user cancel.")
                return
            raise
        except Exception as exc:
            self.log.exception("Drain messages raised: %r", exc)
            raise
        finally:
            unset_flag(flag_consumer_fetching)

    def close(self) -> None:
        """Close consumer for graceful shutdown."""
        ...

    @property
    def unacked(self) -> Set[Message]:
        """Return the set of currently unacknowledged messages."""
        return cast(Set[Message], self._unacked_messages)
Esempio n. 35
0
 def union(self, other):
     return WeakSet(set(self).union(other))
Esempio n. 36
0
 def test_isdisjoint(self):
     self.assertTrue(self.s.isdisjoint(WeakSet(self.items2)))
     self.assertTrue(not self.s.isdisjoint(WeakSet(self.letters)))
Esempio n. 37
0
    def __init__(self, name_or_fd: Union[int, str] = None) -> None:
        """Constructor for the Display object"""
        super().__init__(None)

        self._children: WeakSet = WeakSet()
        self._name_or_fd = name_or_fd
Esempio n. 38
0
class TestWeakSet(unittest.TestCase):
    def setUp(self):
        # need to keep references to them
        self.items = [SomeClass(c) for c in ('a', 'b', 'c')]
        self.items2 = [SomeClass(c) for c in ('x', 'y', 'z')]
        self.letters = [SomeClass(c) for c in string.ascii_letters]
        self.ab_items = [SomeClass(c) for c in 'ab']
        self.abcde_items = [SomeClass(c) for c in 'abcde']
        self.def_items = [SomeClass(c) for c in 'def']
        self.ab_weakset = WeakSet(self.ab_items)
        self.abcde_weakset = WeakSet(self.abcde_items)
        self.def_weakset = WeakSet(self.def_items)
        self.s = WeakSet(self.items)
        self.d = dict.fromkeys(self.items)
        self.obj = SomeClass('F')
        self.fs = WeakSet([self.obj])

    def test_methods(self):
        weaksetmethods = dir(WeakSet)
        for method in dir(set):
            if method == 'test_c_api' or method.startswith('_'):
                continue
            self.assertIn(method, weaksetmethods,
                          "WeakSet missing method " + method)

    def test_new_or_init(self):
        self.assertRaises(TypeError, WeakSet, [], 2)

    def test_len(self):
        self.assertEqual(len(self.s), len(self.d))
        self.assertEqual(len(self.fs), 1)
        del self.obj
        self.assertEqual(len(self.fs), 0)

    def test_contains(self):
        for c in self.letters:
            self.assertEqual(c in self.s, c in self.d)
        # 1 is not weakref'able, but that TypeError is caught by __contains__
        self.assertNotIn(1, self.s)
        self.assertIn(self.obj, self.fs)
        del self.obj
        self.assertNotIn(SomeClass('F'), self.fs)

    def test_union(self):
        u = self.s.union(self.items2)
        for c in self.letters:
            self.assertEqual(c in u, c in self.d or c in self.items2)
        self.assertEqual(self.s, WeakSet(self.items))
        self.assertEqual(type(u), WeakSet)
        self.assertRaises(TypeError, self.s.union, [[]])
        for C in set, frozenset, dict.fromkeys, list, tuple:
            x = WeakSet(self.items + self.items2)
            c = C(self.items2)
            self.assertEqual(self.s.union(c), x)
            del c
        self.assertEqual(len(u), len(self.items) + len(self.items2))
        self.items2.pop()
        gc.collect()
        self.assertEqual(len(u), len(self.items) + len(self.items2))

    def test_or(self):
        i = self.s.union(self.items2)
        self.assertEqual(self.s | set(self.items2), i)
        self.assertEqual(self.s | frozenset(self.items2), i)

    def test_intersection(self):
        s = WeakSet(self.letters)
        i = s.intersection(self.items2)
        for c in self.letters:
            self.assertEqual(c in i, c in self.items2 and c in self.letters)
        self.assertEqual(s, WeakSet(self.letters))
        self.assertEqual(type(i), WeakSet)
        for C in set, frozenset, dict.fromkeys, list, tuple:
            x = WeakSet([])
            self.assertEqual(i.intersection(C(self.items)), x)
        self.assertEqual(len(i), len(self.items2))
        self.items2.pop()
        gc.collect()
        self.assertEqual(len(i), len(self.items2))

    def test_isdisjoint(self):
        self.assertTrue(self.s.isdisjoint(WeakSet(self.items2)))
        self.assertTrue(not self.s.isdisjoint(WeakSet(self.letters)))

    def test_and(self):
        i = self.s.intersection(self.items2)
        self.assertEqual(self.s & set(self.items2), i)
        self.assertEqual(self.s & frozenset(self.items2), i)

    def test_difference(self):
        i = self.s.difference(self.items2)
        for c in self.letters:
            self.assertEqual(c in i, c in self.d and c not in self.items2)
        self.assertEqual(self.s, WeakSet(self.items))
        self.assertEqual(type(i), WeakSet)
        self.assertRaises(TypeError, self.s.difference, [[]])

    def test_sub(self):
        i = self.s.difference(self.items2)
        self.assertEqual(self.s - set(self.items2), i)
        self.assertEqual(self.s - frozenset(self.items2), i)

    def test_symmetric_difference(self):
        i = self.s.symmetric_difference(self.items2)
        for c in self.letters:
            self.assertEqual(c in i, (c in self.d) ^ (c in self.items2))
        self.assertEqual(self.s, WeakSet(self.items))
        self.assertEqual(type(i), WeakSet)
        self.assertRaises(TypeError, self.s.symmetric_difference, [[]])
        self.assertEqual(len(i), len(self.items) + len(self.items2))
        self.items2.pop()
        gc.collect()
        self.assertEqual(len(i), len(self.items) + len(self.items2))

    def test_xor(self):
        i = self.s.symmetric_difference(self.items2)
        self.assertEqual(self.s ^ set(self.items2), i)
        self.assertEqual(self.s ^ frozenset(self.items2), i)

    def test_sub_and_super(self):
        self.assertTrue(self.ab_weakset <= self.abcde_weakset)
        self.assertTrue(self.abcde_weakset <= self.abcde_weakset)
        self.assertTrue(self.abcde_weakset >= self.ab_weakset)
        self.assertFalse(self.abcde_weakset <= self.def_weakset)
        self.assertFalse(self.abcde_weakset >= self.def_weakset)
        self.assertTrue(set('a').issubset('abc'))
        self.assertTrue(set('abc').issuperset('a'))
        self.assertFalse(set('a').issubset('cbs'))
        self.assertFalse(set('cbs').issuperset('a'))

    def test_lt(self):
        self.assertTrue(self.ab_weakset < self.abcde_weakset)
        self.assertFalse(self.abcde_weakset < self.def_weakset)
        self.assertFalse(self.ab_weakset < self.ab_weakset)
        self.assertFalse(WeakSet() < WeakSet())

    def test_gt(self):
        self.assertTrue(self.abcde_weakset > self.ab_weakset)
        self.assertFalse(self.abcde_weakset > self.def_weakset)
        self.assertFalse(self.ab_weakset > self.ab_weakset)
        self.assertFalse(WeakSet() > WeakSet())

    def test_gc(self):
        # Create a nest of cycles to exercise overall ref count check
        s = WeakSet(Foo() for i in range(1000))
        for elem in s:
            elem.cycle = s
            elem.sub = elem
            elem.set = WeakSet([elem])

    def test_subclass_with_custom_hash(self):
        # Bug #1257731
        class H(WeakSet):
            def __hash__(self):
                return int(id(self) & 0x7fffffff)

        s = H()
        f = set()
        f.add(s)
        self.assertIn(s, f)
        f.remove(s)
        f.add(s)
        f.discard(s)

    def test_init(self):
        s = WeakSet()
        s.__init__(self.items)
        self.assertEqual(s, self.s)
        s.__init__(self.items2)
        self.assertEqual(s, WeakSet(self.items2))
        self.assertRaises(TypeError, s.__init__, s, 2)
        self.assertRaises(TypeError, s.__init__, 1)

    def test_constructor_identity(self):
        s = WeakSet(self.items)
        t = WeakSet(s)
        self.assertNotEqual(id(s), id(t))

    def test_hash(self):
        self.assertRaises(TypeError, hash, self.s)

    def test_clear(self):
        self.s.clear()
        self.assertEqual(self.s, WeakSet([]))
        self.assertEqual(len(self.s), 0)

    def test_copy(self):
        dup = self.s.copy()
        self.assertEqual(self.s, dup)
        self.assertNotEqual(id(self.s), id(dup))

    def test_add(self):
        x = SomeClass('Q')
        self.s.add(x)
        self.assertIn(x, self.s)
        dup = self.s.copy()
        self.s.add(x)
        self.assertEqual(self.s, dup)
        self.assertRaises(TypeError, self.s.add, [])
        self.fs.add(Foo())
        self.assertTrue(len(self.fs) == 1)
        self.fs.add(self.obj)
        self.assertTrue(len(self.fs) == 1)

    def test_remove(self):
        x = SomeClass('a')
        self.s.remove(x)
        self.assertNotIn(x, self.s)
        self.assertRaises(KeyError, self.s.remove, x)
        self.assertRaises(TypeError, self.s.remove, [])

    def test_discard(self):
        a, q = SomeClass('a'), SomeClass('Q')
        self.s.discard(a)
        self.assertNotIn(a, self.s)
        self.s.discard(q)
        self.assertRaises(TypeError, self.s.discard, [])

    def test_pop(self):
        for i in range(len(self.s)):
            elem = self.s.pop()
            self.assertNotIn(elem, self.s)
        self.assertRaises(KeyError, self.s.pop)

    def test_update(self):
        retval = self.s.update(self.items2)
        self.assertEqual(retval, None)
        for c in (self.items + self.items2):
            self.assertIn(c, self.s)
        self.assertRaises(TypeError, self.s.update, [[]])

    def test_update_set(self):
        self.s.update(set(self.items2))
        for c in (self.items + self.items2):
            self.assertIn(c, self.s)

    def test_ior(self):
        self.s |= set(self.items2)
        for c in (self.items + self.items2):
            self.assertIn(c, self.s)

    def test_intersection_update(self):
        retval = self.s.intersection_update(self.items2)
        self.assertEqual(retval, None)
        for c in (self.items + self.items2):
            if c in self.items2 and c in self.items:
                self.assertIn(c, self.s)
            else:
                self.assertNotIn(c, self.s)
        self.assertRaises(TypeError, self.s.intersection_update, [[]])

    def test_iand(self):
        self.s &= set(self.items2)
        for c in (self.items + self.items2):
            if c in self.items2 and c in self.items:
                self.assertIn(c, self.s)
            else:
                self.assertNotIn(c, self.s)

    def test_difference_update(self):
        retval = self.s.difference_update(self.items2)
        self.assertEqual(retval, None)
        for c in (self.items + self.items2):
            if c in self.items and c not in self.items2:
                self.assertIn(c, self.s)
            else:
                self.assertNotIn(c, self.s)
        self.assertRaises(TypeError, self.s.difference_update, [[]])
        self.assertRaises(TypeError, self.s.symmetric_difference_update, [[]])

    def test_isub(self):
        self.s -= set(self.items2)
        for c in (self.items + self.items2):
            if c in self.items and c not in self.items2:
                self.assertIn(c, self.s)
            else:
                self.assertNotIn(c, self.s)

    def test_symmetric_difference_update(self):
        retval = self.s.symmetric_difference_update(self.items2)
        self.assertEqual(retval, None)
        for c in (self.items + self.items2):
            if (c in self.items) ^ (c in self.items2):
                self.assertIn(c, self.s)
            else:
                self.assertNotIn(c, self.s)
        self.assertRaises(TypeError, self.s.symmetric_difference_update, [[]])

    def test_ixor(self):
        self.s ^= set(self.items2)
        for c in (self.items + self.items2):
            if (c in self.items) ^ (c in self.items2):
                self.assertIn(c, self.s)
            else:
                self.assertNotIn(c, self.s)

    def test_inplace_on_self(self):
        t = self.s.copy()
        t |= t
        self.assertEqual(t, self.s)
        t &= t
        self.assertEqual(t, self.s)
        t -= t
        self.assertEqual(t, WeakSet())
        t = self.s.copy()
        t ^= t
        self.assertEqual(t, WeakSet())

    def test_eq(self):
        # issue 5964
        self.assertTrue(self.s == self.s)
        self.assertTrue(self.s == WeakSet(self.items))
        self.assertFalse(self.s == set(self.items))
        self.assertFalse(self.s == list(self.items))
        self.assertFalse(self.s == tuple(self.items))
        self.assertFalse(self.s == 1)

    def test_ne(self):
        self.assertTrue(self.s != set(self.items))
        s1 = WeakSet()
        s2 = WeakSet()
        self.assertFalse(s1 != s2)

    def test_weak_destroy_while_iterating(self):
        # Issue #7105: iterators shouldn't crash when a key is implicitly removed
        # Create new items to be sure no-one else holds a reference
        items = [SomeClass(c) for c in ('a', 'b', 'c')]
        s = WeakSet(items)
        it = iter(s)
        next(it)  # Trigger internal iteration
        # Destroy an item
        del items[-1]
        gc.collect()  # just in case
        # We have removed either the first consumed items, or another one
        self.assertIn(len(list(it)), [len(items), len(items) - 1])
        del it
        # The removal has been committed
        self.assertEqual(len(s), len(items))

    def test_weak_destroy_and_mutate_while_iterating(self):
        # Issue #7105: iterators shouldn't crash when a key is implicitly removed
        items = [SomeClass(c) for c in string.ascii_letters]
        s = WeakSet(items)

        @contextlib.contextmanager
        def testcontext():
            try:
                it = iter(s)
                next(it)
                # Schedule an item for removal and recreate it
                u = SomeClass(str(items.pop()))
                gc.collect()  # just in case
                yield u
            finally:
                it = None  # should commit all removals

        with testcontext() as u:
            self.assertNotIn(u, s)
        with testcontext() as u:
            self.assertRaises(KeyError, s.remove, u)
        self.assertNotIn(u, s)
        with testcontext() as u:
            s.add(u)
        self.assertIn(u, s)
        t = s.copy()
        with testcontext() as u:
            s.update(t)
        self.assertEqual(len(s), len(t))
        with testcontext() as u:
            s.clear()
        self.assertEqual(len(s), 0)

    def test_len_cycles(self):
        N = 20
        items = [RefCycle() for i in range(N)]
        s = WeakSet(items)
        del items
        it = iter(s)
        try:
            next(it)
        except StopIteration:
            pass
        gc.collect()
        n1 = len(s)
        del it
        gc.collect()
        n2 = len(s)
        # one item may be kept alive inside the iterator
        self.assertIn(n1, (0, 1))
        self.assertEqual(n2, 0)

    def test_len_race(self):
        # Extended sanity checks for len() in the face of cyclic collection
        self.addCleanup(gc.set_threshold, *gc.get_threshold())
        for th in range(1, 100):
            N = 20
            gc.collect(0)
            gc.set_threshold(th, th, th)
            items = [RefCycle() for i in range(N)]
            s = WeakSet(items)
            del items
            # All items will be collected at next garbage collection pass
            it = iter(s)
            try:
                next(it)
            except StopIteration:
                pass
            n1 = len(s)
            del it
            n2 = len(s)
            self.assertGreaterEqual(n1, 0)
            self.assertLessEqual(n1, N)
            self.assertGreaterEqual(n2, 0)
            self.assertLessEqual(n2, n1)
Esempio n. 39
0
 def __init__(self):
     super(DiscordHandler, self).__init__()
     self.bot = Client()
     self.message_handlers = WeakSet()
     self.channels = WeakValueDictionary()
Esempio n. 40
0
 def test_ne(self):
     self.assertTrue(self.s != set(self.items))
     s1 = WeakSet()
     s2 = WeakSet()
     self.assertFalse(s1 != s2)
Esempio n. 41
0
 def test_clear(self):
     self.s.clear()
     self.assertEqual(self.s, WeakSet([]))
     self.assertEqual(len(self.s), 0)
Esempio n. 42
0
 def __init__(self, context):
     self.__context = context
     self.__statements = WeakSet()
Esempio n. 43
0
 def test_gt(self):
     self.assertTrue(self.abcde_weakset > self.ab_weakset)
     self.assertFalse(self.abcde_weakset > self.def_weakset)
     self.assertFalse(self.ab_weakset > self.ab_weakset)
     self.assertFalse(WeakSet() > WeakSet())
Esempio n. 44
0
 def test_lt(self):
     self.assertTrue(self.ab_weakset < self.abcde_weakset)
     self.assertFalse(self.abcde_weakset < self.def_weakset)
     self.assertFalse(self.ab_weakset < self.ab_weakset)
     self.assertFalse(WeakSet() < WeakSet())
Esempio n. 45
0
class IOPubThread(object):
    """An object for sending IOPub messages in a background thread

    Prevents a blocking main thread from delaying output from threads.

    IOPubThread(pub_socket).background_socket is a Socket-API-providing object
    whose IO is always run in a thread.
    """

    def __init__(self, socket, pipe=False):
        """Create IOPub thread

        Parameters
        ----------
        socket : zmq.PUB Socket
            the socket on which messages will be sent.
        pipe : bool
            Whether this process should listen for IOPub messages
            piped from subprocesses.
        """
        self.socket = socket
        self.background_socket = BackgroundSocket(self)
        self._master_pid = os.getpid()
        self._pipe_flag = pipe
        self.io_loop = IOLoop(make_current=False)
        if pipe:
            self._setup_pipe_in()
        self._local = threading.local()
        self._events = deque()
        self._event_pipes = WeakSet()
        self._setup_event_pipe()
        self.thread = threading.Thread(target=self._thread_main)
        self.thread.daemon = True
        self.thread.pydev_do_not_trace = True
        self.thread.is_pydev_daemon_thread = True

    def _thread_main(self):
        """The inner loop that's actually run in a thread"""
        self.io_loop.make_current()
        self.io_loop.start()
        self.io_loop.close(all_fds=True)

    def _setup_event_pipe(self):
        """Create the PULL socket listening for events that should fire in this thread."""
        ctx = self.socket.context
        pipe_in = ctx.socket(zmq.PULL)
        pipe_in.linger = 0

        _uuid = b2a_hex(os.urandom(16)).decode('ascii')
        iface = self._event_interface = 'inproc://%s' % _uuid
        pipe_in.bind(iface)
        self._event_puller = ZMQStream(pipe_in, self.io_loop)
        self._event_puller.on_recv(self._handle_event)

    @property
    def _event_pipe(self):
        """thread-local event pipe for signaling events that should be processed in the thread"""
        try:
            event_pipe = self._local.event_pipe
        except AttributeError:
            # new thread, new event pipe
            ctx = self.socket.context
            event_pipe = ctx.socket(zmq.PUSH)
            event_pipe.linger = 0
            event_pipe.connect(self._event_interface)
            self._local.event_pipe = event_pipe
            # WeakSet so that event pipes will be closed by garbage collection
            # when their threads are terminated
            self._event_pipes.add(event_pipe)
        return event_pipe

    def _handle_event(self, msg):
        """Handle an event on the event pipe

        Content of the message is ignored.

        Whenever *an* event arrives on the event stream,
        *all* waiting events are processed in order.
        """
        # freeze event count so new writes don't extend the queue
        # while we are processing
        n_events = len(self._events)
        for i in range(n_events):
            event_f = self._events.popleft()
            event_f()

    def _setup_pipe_in(self):
        """setup listening pipe for IOPub from forked subprocesses"""
        ctx = self.socket.context

        # use UUID to authenticate pipe messages
        self._pipe_uuid = os.urandom(16)

        pipe_in = ctx.socket(zmq.PULL)
        pipe_in.linger = 0

        try:
            self._pipe_port = pipe_in.bind_to_random_port("tcp://127.0.0.1")
        except zmq.ZMQError as e:
            warnings.warn("Couldn't bind IOPub Pipe to 127.0.0.1: %s" % e +
                "\nsubprocess output will be unavailable."
            )
            self._pipe_flag = False
            pipe_in.close()
            return
        self._pipe_in = ZMQStream(pipe_in, self.io_loop)
        self._pipe_in.on_recv(self._handle_pipe_msg)

    def _handle_pipe_msg(self, msg):
        """handle a pipe message from a subprocess"""
        if not self._pipe_flag or not self._is_master_process():
            return
        if msg[0] != self._pipe_uuid:
            print("Bad pipe message: %s", msg, file=sys.__stderr__)
            return
        self.send_multipart(msg[1:])

    def _setup_pipe_out(self):
        # must be new context after fork
        ctx = zmq.Context()
        pipe_out = ctx.socket(zmq.PUSH)
        pipe_out.linger = 3000 # 3s timeout for pipe_out sends before discarding the message
        pipe_out.connect("tcp://127.0.0.1:%i" % self._pipe_port)
        return ctx, pipe_out

    def _is_master_process(self):
        return os.getpid() == self._master_pid

    def _check_mp_mode(self):
        """check for forks, and switch to zmq pipeline if necessary"""
        if not self._pipe_flag or self._is_master_process():
            return MASTER
        else:
            return CHILD

    def start(self):
        """Start the IOPub thread"""
        self.thread.start()
        # make sure we don't prevent process exit
        # I'm not sure why setting daemon=True above isn't enough, but it doesn't appear to be.
        atexit.register(self.stop)

    def stop(self):
        """Stop the IOPub thread"""
        if not self.thread.is_alive():
            return
        self.io_loop.add_callback(self.io_loop.stop)
        self.thread.join()
        # close *all* event pipes, created in any thread
        # event pipes can only be used from other threads while self.thread.is_alive()
        # so after thread.join, this should be safe
        for event_pipe in self._event_pipes:
            event_pipe.close()

    def close(self):
        if self.closed:
            return
        self.socket.close()
        self.socket = None

    @property
    def closed(self):
        return self.socket is None

    def schedule(self, f):
        """Schedule a function to be called in our IO thread.

        If the thread is not running, call immediately.
        """
        if self.thread.is_alive():
            self._events.append(f)
            # wake event thread (message content is ignored)
            self._event_pipe.send(b'')
        else:
            f()

    def send_multipart(self, *args, **kwargs):
        """send_multipart schedules actual zmq send in my thread.

        If my thread isn't running (e.g. forked process), send immediately.
        """
        self.schedule(lambda : self._really_send(*args, **kwargs))

    def _really_send(self, msg, *args, **kwargs):
        """The callback that actually sends messages"""
        mp_mode = self._check_mp_mode()

        if mp_mode != CHILD:
            # we are master, do a regular send
            self.socket.send_multipart(msg, *args, **kwargs)
        else:
            # we are a child, pipe to master
            # new context/socket for every pipe-out
            # since forks don't teardown politely, use ctx.term to ensure send has completed
            ctx, pipe_out = self._setup_pipe_out()
            pipe_out.send_multipart([self._pipe_uuid] + msg, *args, **kwargs)
            pipe_out.close()
            ctx.term()
Esempio n. 46
0
class RemoteServiceServer(RemoteServiceBase):
    """The server side of a RPC communication.

    Considers all messages coming from the other end as requests for
    RPCs executions. Will perform them and send results as responses.

    After having created an instance and initialized it with a socket
    the reader loop should be started by calling run.

    """
    def __init__(self, local_service, remote_address):
        """Create a responder for the given service.

        local_service (Service): the object whose methods should be
            called via RPC.

        For other arguments see RemoteServiceBase.

        """
        super(RemoteServiceServer, self).__init__(remote_address)
        self.local_service = local_service

        self.pending_incoming_requests_threads = WeakSet()

    def finalize(self, reason=""):
        """See RemoteServiceBase.finalize."""
        super(RemoteServiceServer, self).finalize(reason)

        for thread in self.pending_incoming_requests_threads:
            thread.kill(RPCError(reason), block=False)

        self.pending_incoming_requests_threads.clear()

    def handle(self, socket_):
        self.initialize(socket_, self.remote_address)
        gevent.spawn(self.run)

    def run(self):
        """Start listening for requests, and go on forever.

        Read messages from the socket and issue greenlets to parse
        them, execute methods and send the response to the client.
        This method won't return as long as there's something to read,
        it's therefore advisable to spawn a greenlet to call it.

        """
        while True:
            try:
                data = self._read()
            except IOError:
                break

            if len(data) == 0:
                self.finalize("Connection closed.")
                break

            gevent.spawn(self.process_data, data)

    def process_data(self, data):
        """Handle the message.

        JSON-decode it and forward it to process_incoming_request
        (unconditionally!).

        data (bytes): the message read from the socket.

        """
        # Decode the incoming data.
        try:
            message = json.loads(data, encoding='utf-8')
        except ValueError:
            logger.warning("Cannot parse incoming message, discarding.")
            return

        self.process_incoming_request(message)

    def process_incoming_request(self, request):
        """Handle the request.

        Parse the request, execute the method it asks for, format the
        result and send the response.

        request (dict): the JSON-decoded request.

        """
        # Validate the request.
        if not {"__id", "__method", "__data"}.issubset(request.iterkeys()):
            logger.warning("Request is missing some fields, ingoring.")
            return

        # Determine the ID.
        id_ = request["__id"]

        # Store the request.
        self.pending_incoming_requests_threads.add(gevent.getcurrent())

        # Build the response.
        response = {"__id": id_, "__data": None, "__error": None}

        method_name = request["__method"]

        if not hasattr(self.local_service, method_name):
            response["__error"] = "Method %s doesn't exist." % method_name
        else:
            method = getattr(self.local_service, method_name)

            if not getattr(method, "rpc_callable", False):
                response["__error"] = "Method %s isn't callable." % method_name
            else:
                try:
                    response["__data"] = method(**request["__data"])
                except Exception as error:
                    response["__error"] = "%s: %s\n%s" % \
                        (error.__class__.__name__, error,
                         traceback.format_exc())

        # Encode it.
        try:
            data = json.dumps(response, encoding='utf-8')
        except (TypeError, ValueError):
            logger.warning("JSON encoding failed.", exc_info=True)
            return

        # Send it.
        try:
            self._write(data)
        except IOError:
            # Log messages have already been produced.
            return
Esempio n. 47
0
class Context(ContextBase, AttributeSetter):
    """Create a zmq Context

    A zmq Context creates sockets via its ``ctx.socket`` method.
    """

    sockopts = None
    _instance = None
    _instance_lock = Lock()
    _instance_pid = None
    _shadow = False
    _sockets = None

    def __init__(self, io_threads=1, **kwargs):
        super(Context, self).__init__(io_threads=io_threads, **kwargs)
        if kwargs.get('shadow', False):
            self._shadow = True
        else:
            self._shadow = False
        self.sockopts = {}
        self._sockets = WeakSet()

    def __del__(self):
        """deleting a Context should terminate it, without trying non-threadsafe destroy"""

        # Calling locals() here conceals issue #1167 on Windows CPython 3.5.4.
        locals()

        if not self._shadow and not _exiting and not self.closed:
            warnings.warn(
                f"unclosed context {self}",
                ResourceWarning,
                stacklevel=2,
                source=self,
            )
            self.term()

    _repr_cls = "zmq.Context"

    def __repr__(self):
        cls = self.__class__
        # look up _repr_cls on exact class, not inherited
        _repr_cls = cls.__dict__.get("_repr_cls", None)
        if _repr_cls is None:
            _repr_cls = f"{cls.__module__}.{cls.__name__}"

        closed = ' closed' if self.closed else ''
        if self._sockets:
            n_sockets = len(self._sockets)
            s = 's' if n_sockets > 1 else ''
            sockets = f"{n_sockets} socket{s}"
        else:
            sockets = ""
        return f"<{_repr_cls}({sockets}) at {hex(id(self))}{closed}>"

    def __enter__(self):
        return self

    def __exit__(self, *args, **kwargs):
        self.term()

    def __copy__(self, memo=None):
        """Copying a Context creates a shadow copy"""
        return self.__class__.shadow(self.underlying)

    __deepcopy__ = __copy__

    @classmethod
    def shadow(cls, address):
        """Shadow an existing libzmq context

        address is the integer address of the libzmq context
        or an FFI pointer to it.

        .. versionadded:: 14.1
        """
        from zmq.utils.interop import cast_int_addr

        address = cast_int_addr(address)
        return cls(shadow=address)

    @classmethod
    def shadow_pyczmq(cls, ctx):
        """Shadow an existing pyczmq context

        ctx is the FFI `zctx_t *` pointer

        .. versionadded:: 14.1
        """
        from pyczmq import zctx  # type: ignore
        from zmq.utils.interop import cast_int_addr

        underlying = zctx.underlying(ctx)
        address = cast_int_addr(underlying)
        return cls(shadow=address)

    # static method copied from tornado IOLoop.instance
    @classmethod
    def instance(cls: Type[T], io_threads=1) -> T:
        """Returns a global Context instance.

        Most single-threaded applications have a single, global Context.
        Use this method instead of passing around Context instances
        throughout your code.

        A common pattern for classes that depend on Contexts is to use
        a default argument to enable programs with multiple Contexts
        but not require the argument for simpler applications::

            class MyClass(object):
                def __init__(self, context=None):
                    self.context = context or Context.instance()

        .. versionchanged:: 18.1

            When called in a subprocess after forking,
            a new global instance is created instead of inheriting
            a Context that won't work from the parent process.
        """
        if (cls._instance is None or cls._instance_pid != os.getpid()
                or cls._instance.closed):
            with cls._instance_lock:
                if (cls._instance is None or cls._instance_pid != os.getpid()
                        or cls._instance.closed):
                    cls._instance = cls(io_threads=io_threads)
                    cls._instance_pid = os.getpid()
        return cls._instance

    def term(self):
        """Close or terminate the context.

        Context termination is performed in the following steps:

        - Any blocking operations currently in progress on sockets open within context shall
          raise :class:`zmq.ContextTerminated`.
          With the exception of socket.close(), any further operations on sockets open within this context
          shall raise :class:`zmq.ContextTerminated`.
        - After interrupting all blocking calls, term shall block until the following conditions are satisfied:
            - All sockets open within context have been closed.
            - For each socket within context, all messages sent on the socket have either been
              physically transferred to a network peer,
              or the socket's linger period set with the zmq.LINGER socket option has expired.

        For further details regarding socket linger behaviour refer to libzmq documentation for ZMQ_LINGER.

        This can be called to close the context by hand. If this is not called,
        the context will automatically be closed when it is garbage collected.
        """
        return super(Context, self).term()

    # -------------------------------------------------------------------------
    # Hooks for ctxopt completion
    # -------------------------------------------------------------------------

    def __dir__(self):
        keys = dir(self.__class__)

        for collection in (ctx_opt_names, ):
            keys.extend(collection)
        return keys

    # -------------------------------------------------------------------------
    # Creating Sockets
    # -------------------------------------------------------------------------

    def _add_socket(self, socket):
        """Add a weakref to a socket for Context.destroy / reference counting"""
        self._sockets.add(socket)

    def _rm_socket(self, socket):
        """Remove a socket for Context.destroy / reference counting"""
        # allow _sockets to be None in case of process teardown
        if getattr(self, "_sockets", None) is not None:
            self._sockets.discard(socket)

    def destroy(self, linger=None):
        """Close all sockets associated with this context and then terminate
        the context.

        .. warning::

            destroy involves calling ``zmq_close()``, which is **NOT** threadsafe.
            If there are active sockets in other threads, this must not be called.

        Parameters
        ----------

        linger : int, optional
            If specified, set LINGER on sockets prior to closing them.
        """
        if self.closed:
            return

        sockets = self._sockets
        self._sockets = WeakSet()
        for s in sockets:
            if s and not s.closed:
                if linger is not None:
                    s.setsockopt(LINGER, linger)
                s.close()

        self.term()

    @property
    def _socket_class(self):
        return Socket

    def socket(self, socket_type, **kwargs):
        """Create a Socket associated with this Context.

        Parameters
        ----------
        socket_type : int
            The socket type, which can be any of the 0MQ socket types:
            REQ, REP, PUB, SUB, PAIR, DEALER, ROUTER, PULL, PUSH, etc.

        kwargs:
            will be passed to the __init__ method of the socket class.
        """
        if self.closed:
            raise ZMQError(ENOTSUP)
        s = self._socket_class(  # set PYTHONTRACEMALLOC=2 to get the calling frame
            self, socket_type, **kwargs)
        for opt, value in self.sockopts.items():
            try:
                s.setsockopt(opt, value)
            except ZMQError:
                # ignore ZMQErrors, which are likely for socket options
                # that do not apply to a particular socket type, e.g.
                # SUBSCRIBE for non-SUB sockets.
                pass
        self._add_socket(s)
        return s

    def setsockopt(self, opt, value):
        """set default socket options for new sockets created by this Context

        .. versionadded:: 13.0
        """
        self.sockopts[opt] = value

    def getsockopt(self, opt):
        """get default socket options for new sockets created by this Context

        .. versionadded:: 13.0
        """
        return self.sockopts[opt]

    def _set_attr_opt(self, name, opt, value):
        """set default sockopts as attributes"""
        if name in constants.ctx_opt_names:
            return self.set(opt, value)
        else:
            self.sockopts[opt] = value

    def _get_attr_opt(self, name, opt):
        """get default sockopts as attributes"""
        if name in constants.ctx_opt_names:
            return self.get(opt)
        else:
            if opt not in self.sockopts:
                raise AttributeError(name)
            else:
                return self.sockopts[opt]

    def __delattr__(self, key):
        """delete default sockopts as attributes"""
        key = key.upper()
        try:
            opt = getattr(constants, key)
        except AttributeError:
            raise AttributeError("no such socket option: %s" % key)
        else:
            if opt not in self.sockopts:
                raise AttributeError(key)
            else:
                del self.sockopts[opt]
Esempio n. 48
0
class DiscordHandler(GenericHandler):
    def __init__(self):
        super(DiscordHandler, self).__init__()
        self.bot = Client()
        self.message_handlers = WeakSet()
        self.channels = WeakValueDictionary()

    async def setup(self, token, client_secret):
        await self.bot.login(token=token, bot=True)
        asyncio.ensure_future(self.bot.connect(reconnect=True))
        await self.bot.wait_until_ready()
        print("Logged into Discord")
        self.bot.event(self.on_message)
        self.bot.event(self.on_guild_channel_create)
        self.bot.event(self.on_guild_channel_delete)
        self.bot.event(self.on_guild_channel_update)

    def get_channel(self, serialised) -> Optional[DiscordChannel]:
        channel_id = serialised.get("id")
        try:
            return self.channels[channel_id]
        except KeyError:
            pass
        try:
            channel = self.bot.get_channel(int(channel_id))
        except ValueError:
            return
        if not channel:
            return
        rtn = DiscordChannel(channel)
        self.message_handlers.add(rtn.on_message_handler)
        self.channels[channel_id] = rtn
        return rtn

    @property
    def serialised_channels(self):
        return [{
            "type": "discord",
            "id": f"{channel.id}",
            "name": channel.name,
            "server": {
                "id": f"{channel.guild.id}",
                "name": channel.guild.name,
                "icon": str(channel.guild.icon_url_as(format="png", size=256))
            }
        } for channel in self.bot.get_all_channels()
                if (channel.permissions_for(channel.guild.me).manage_webhooks
                    and isinstance(channel, TextChannel))]

    async def on_message(self, message):
        await asyncio.gather(
            *[handler(message) for handler in self.message_handlers])

    async def on_guild_channel_create(self, channel):
        await self.process_channel_handlers()

    async def on_guild_channel_delete(self, channel):
        await self.process_channel_handlers()

    async def on_guild_channel_update(self, before, after):
        await self.process_channel_handlers()
Esempio n. 49
0
class TestWeakSet(unittest.TestCase):

    def setUp(self):
        # need to keep references to them
        self.items = [SomeClass(c) for c in ('a', 'b', 'c')]
        self.items2 = [SomeClass(c) for c in ('x', 'y', 'z')]
        self.letters = [SomeClass(c) for c in string.ascii_letters]
        self.s = WeakSet(self.items)
        self.d = dict.fromkeys(self.items)
        self.obj = SomeClass('F')
        self.fs = WeakSet([self.obj])

    def test_methods(self):
        weaksetmethods = dir(WeakSet)
        for method in dir(set):
            if method == 'test_c_api' or method.startswith('_'):
                continue
            self.assertIn(method, weaksetmethods,
                         "WeakSet missing method " + method)

    def test_new_or_init(self):
        self.assertRaises(TypeError, WeakSet, [], 2)

    def test_len(self):
        self.assertEqual(len(self.s), len(self.d))
        self.assertEqual(len(self.fs), 1)
        del self.obj
        test_support.gc_collect()
        self.assertEqual(len(self.fs), 0)

    def test_contains(self):
        for c in self.letters:
            self.assertEqual(c in self.s, c in self.d)
        # 1 is not weakref'able, but that TypeError is caught by __contains__
        self.assertNotIn(1, self.s)
        self.assertIn(self.obj, self.fs)
        del self.obj
        test_support.gc_collect()
        self.assertNotIn(SomeClass('F'), self.fs)

    def test_union(self):
        u = self.s.union(self.items2)
        for c in self.letters:
            self.assertEqual(c in u, c in self.d or c in self.items2)
        self.assertEqual(self.s, WeakSet(self.items))
        self.assertEqual(type(u), WeakSet)
        self.assertRaises(TypeError, self.s.union, [[]])
        for C in set, frozenset, dict.fromkeys, list, tuple:
            x = WeakSet(self.items + self.items2)
            c = C(self.items2)
            self.assertEqual(self.s.union(c), x)

    def test_or(self):
        i = self.s.union(self.items2)
        self.assertEqual(self.s | set(self.items2), i)
        self.assertEqual(self.s | frozenset(self.items2), i)

    def test_intersection(self):
        i = self.s.intersection(self.items2)
        for c in self.letters:
            self.assertEqual(c in i, c in self.d and c in self.items2)
        self.assertEqual(self.s, WeakSet(self.items))
        self.assertEqual(type(i), WeakSet)
        for C in set, frozenset, dict.fromkeys, list, tuple:
            x = WeakSet([])
            self.assertEqual(self.s.intersection(C(self.items2)), x)

    def test_isdisjoint(self):
        self.assertTrue(self.s.isdisjoint(WeakSet(self.items2)))
        self.assertTrue(not self.s.isdisjoint(WeakSet(self.letters)))

    def test_and(self):
        i = self.s.intersection(self.items2)
        self.assertEqual(self.s & set(self.items2), i)
        self.assertEqual(self.s & frozenset(self.items2), i)

    def test_difference(self):
        i = self.s.difference(self.items2)
        for c in self.letters:
            self.assertEqual(c in i, c in self.d and c not in self.items2)
        self.assertEqual(self.s, WeakSet(self.items))
        self.assertEqual(type(i), WeakSet)
        self.assertRaises(TypeError, self.s.difference, [[]])

    def test_sub(self):
        i = self.s.difference(self.items2)
        self.assertEqual(self.s - set(self.items2), i)
        self.assertEqual(self.s - frozenset(self.items2), i)

    def test_symmetric_difference(self):
        i = self.s.symmetric_difference(self.items2)
        for c in self.letters:
            self.assertEqual(c in i, (c in self.d) ^ (c in self.items2))
        self.assertEqual(self.s, WeakSet(self.items))
        self.assertEqual(type(i), WeakSet)
        self.assertRaises(TypeError, self.s.symmetric_difference, [[]])

    def test_xor(self):
        i = self.s.symmetric_difference(self.items2)
        self.assertEqual(self.s ^ set(self.items2), i)
        self.assertEqual(self.s ^ frozenset(self.items2), i)

    def test_sub_and_super(self):
        pl, ql, rl = map(lambda s: [SomeClass(c) for c in s], ['ab', 'abcde', 'def'])
        p, q, r = map(WeakSet, (pl, ql, rl))
        self.assertTrue(p < q)
        self.assertTrue(p <= q)
        self.assertTrue(q <= q)
        self.assertTrue(q > p)
        self.assertTrue(q >= p)
        self.assertFalse(q < r)
        self.assertFalse(q <= r)
        self.assertFalse(q > r)
        self.assertFalse(q >= r)
        self.assertTrue(set('a').issubset('abc'))
        self.assertTrue(set('abc').issuperset('a'))
        self.assertFalse(set('a').issubset('cbs'))
        self.assertFalse(set('cbs').issuperset('a'))

    def test_gc(self):
        # Create a nest of cycles to exercise overall ref count check
        s = WeakSet(Foo() for i in range(1000))
        for elem in s:
            elem.cycle = s
            elem.sub = elem
            elem.set = WeakSet([elem])

    def test_subclass_with_custom_hash(self):
        # Bug #1257731
        class H(WeakSet):
            def __hash__(self):
                return int(id(self) & 0x7fffffff)
        s=H()
        f=set()
        f.add(s)
        self.assertIn(s, f)
        f.remove(s)
        f.add(s)
        f.discard(s)

    def test_init(self):
        s = WeakSet()
        s.__init__(self.items)
        self.assertEqual(s, self.s)
        s.__init__(self.items2)
        self.assertEqual(s, WeakSet(self.items2))
        self.assertRaises(TypeError, s.__init__, s, 2);
        self.assertRaises(TypeError, s.__init__, 1);

    def test_constructor_identity(self):
        s = WeakSet(self.items)
        t = WeakSet(s)
        self.assertNotEqual(id(s), id(t))

    def test_hash(self):
        self.assertRaises(TypeError, hash, self.s)

    def test_clear(self):
        self.s.clear()
        self.assertEqual(self.s, WeakSet([]))
        self.assertEqual(len(self.s), 0)

    def test_copy(self):
        dup = self.s.copy()
        self.assertEqual(self.s, dup)
        self.assertNotEqual(id(self.s), id(dup))

    def test_add(self):
        x = SomeClass('Q')
        self.s.add(x)
        self.assertIn(x, self.s)
        dup = self.s.copy()
        self.s.add(x)
        self.assertEqual(self.s, dup)
        self.assertRaises(TypeError, self.s.add, [])
        self.fs.add(Foo())
        test_support.gc_collect()
        self.assertTrue(len(self.fs) == 1)
        self.fs.add(self.obj)
        self.assertTrue(len(self.fs) == 1)

    def test_remove(self):
        x = SomeClass('a')
        self.s.remove(x)
        self.assertNotIn(x, self.s)
        self.assertRaises(KeyError, self.s.remove, x)
        self.assertRaises(TypeError, self.s.remove, [])

    def test_discard(self):
        a, q = SomeClass('a'), SomeClass('Q')
        self.s.discard(a)
        self.assertNotIn(a, self.s)
        self.s.discard(q)
        self.assertRaises(TypeError, self.s.discard, [])

    def test_pop(self):
        for i in range(len(self.s)):
            elem = self.s.pop()
            self.assertNotIn(elem, self.s)
        self.assertRaises(KeyError, self.s.pop)

    def test_update(self):
        retval = self.s.update(self.items2)
        self.assertEqual(retval, None)
        for c in (self.items + self.items2):
            self.assertIn(c, self.s)
        self.assertRaises(TypeError, self.s.update, [[]])

    def test_update_set(self):
        self.s.update(set(self.items2))
        for c in (self.items + self.items2):
            self.assertIn(c, self.s)

    def test_ior(self):
        self.s |= set(self.items2)
        for c in (self.items + self.items2):
            self.assertIn(c, self.s)

    def test_intersection_update(self):
        retval = self.s.intersection_update(self.items2)
        self.assertEqual(retval, None)
        for c in (self.items + self.items2):
            if c in self.items2 and c in self.items:
                self.assertIn(c, self.s)
            else:
                self.assertNotIn(c, self.s)
        self.assertRaises(TypeError, self.s.intersection_update, [[]])

    def test_iand(self):
        self.s &= set(self.items2)
        for c in (self.items + self.items2):
            if c in self.items2 and c in self.items:
                self.assertIn(c, self.s)
            else:
                self.assertNotIn(c, self.s)

    def test_difference_update(self):
        retval = self.s.difference_update(self.items2)
        self.assertEqual(retval, None)
        for c in (self.items + self.items2):
            if c in self.items and c not in self.items2:
                self.assertIn(c, self.s)
            else:
                self.assertNotIn(c, self.s)
        self.assertRaises(TypeError, self.s.difference_update, [[]])
        self.assertRaises(TypeError, self.s.symmetric_difference_update, [[]])

    def test_isub(self):
        self.s -= set(self.items2)
        for c in (self.items + self.items2):
            if c in self.items and c not in self.items2:
                self.assertIn(c, self.s)
            else:
                self.assertNotIn(c, self.s)

    def test_symmetric_difference_update(self):
        retval = self.s.symmetric_difference_update(self.items2)
        self.assertEqual(retval, None)
        for c in (self.items + self.items2):
            if (c in self.items) ^ (c in self.items2):
                self.assertIn(c, self.s)
            else:
                self.assertNotIn(c, self.s)
        self.assertRaises(TypeError, self.s.symmetric_difference_update, [[]])

    def test_ixor(self):
        self.s ^= set(self.items2)
        for c in (self.items + self.items2):
            if (c in self.items) ^ (c in self.items2):
                self.assertIn(c, self.s)
            else:
                self.assertNotIn(c, self.s)

    def test_inplace_on_self(self):
        t = self.s.copy()
        t |= t
        self.assertEqual(t, self.s)
        t &= t
        self.assertEqual(t, self.s)
        t -= t
        self.assertEqual(t, WeakSet())
        t = self.s.copy()
        t ^= t
        self.assertEqual(t, WeakSet())

    def test_eq(self):
        # issue 5964
        self.assertTrue(self.s == self.s)
        self.assertTrue(self.s == WeakSet(self.items))
        self.assertFalse(self.s == set(self.items))
        self.assertFalse(self.s == list(self.items))
        self.assertFalse(self.s == tuple(self.items))
        self.assertFalse(self.s == 1)

    def test_weak_destroy_while_iterating(self):
        # Issue #7105: iterators shouldn't crash when a key is implicitly removed
        # Create new items to be sure no-one else holds a reference
        items = [SomeClass(c) for c in ('a', 'b', 'c')]
        s = WeakSet(items)
        it = iter(s)
        next(it)             # Trigger internal iteration
        # Destroy an item
        del items[-1]
        test_support.gc_collect()
        # We have removed either the first consumed items, or another one
        self.assertIn(len(list(it)), [len(items), len(items) - 1])
        del it
        test_support.gc_collect()
        # The removal has been committed
        self.assertEqual(len(s), len(items))

    def test_weak_destroy_and_mutate_while_iterating(self):
        # Issue #7105: iterators shouldn't crash when a key is implicitly removed
        items = [SomeClass(c) for c in string.ascii_letters]
        s = WeakSet(items)
        @contextlib.contextmanager
        def testcontext():
            try:
                it = iter(s)
                next(it)
                # Schedule an item for removal and recreate it
                u = SomeClass(str(items.pop()))
                gc.collect()      # just in case
                yield u
            finally:
                it = None           # should commit all removals

        with testcontext() as u:
            self.assertNotIn(u, s)
        with testcontext() as u:
            self.assertRaises(KeyError, s.remove, u)
        self.assertNotIn(u, s)
        with testcontext() as u:
            s.add(u)
        self.assertIn(u, s)
        t = s.copy()
        with testcontext() as u:
            s.update(t)
        self.assertEqual(len(s), len(t))
        with testcontext() as u:
            s.clear()
        self.assertEqual(len(s), 0)
class CollectManyInteraction(SuperInteraction):
    __qualname__ = 'CollectManyInteraction'
    INTERACTION_TARGET = 'interaction_target'
    INSTANCE_TUNABLES = {
        'aggregate_object':
        TunableVariant(
            description=
            '\n            The type of object to use as the aggregate object.  If a definition\n            is specified, the aggregate object will be created using that\n            definition.  If "interaction_target" is specified, the aggregate object\n            will be created using the definition of the interaction target.\n            ',
            definition=TunableReference(
                description=
                '\n                A reference to the type of object that will be created as part\n                of this interaction to represent the many collected objects the\n                participant has picked up.\n                ',
                manager=services.definition_manager()),
            locked_args={'interaction_target': INTERACTION_TARGET},
            default='definition'),
        'destroy_original_object':
        Tunable(
            description=
            "\n            If checked, the original object (the target of this interaction),\n            will be destroyed and replaced with the specified aggregate object.\n            If unchecked, the aggregate object will be created in the Sim's\n            hand, but the original object will not be destroyed.\n            ",
            tunable_type=bool,
            default=True)
    }
    DIRTY_DISH_ACTOR_NAME = 'dirtydish'
    ITEMS_PARAM = 'items'
    _object_create_helper = None
    _collected_targets = WeakSet()

    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self._original_carry_target = None

    @property
    def create_object_owner(self):
        return self.sim

    @property
    def _aggregate_object_definition(self):
        if self.aggregate_object is self.INTERACTION_TARGET:
            return self.target.definition
        return self.aggregate_object

    @property
    def create_target(self):
        if self.context.carry_target is not None:
            return
        return self._aggregate_object_definition

    @property
    def created_target(self):
        return self.context.carry_target

    @classmethod
    def _test(cls, target, context, **interaction_parameters):
        if target is not None and target in cls._collected_targets:
            return TestResult(False, 'Target was already collected.')
        return super()._test(target, context, **interaction_parameters)

    def _setup_collected_object(self, obj):
        self.context.carry_target = obj

    def setup_asm_default(self, asm, *args, **kwargs):
        result = super().setup_asm_default(asm, *args, **kwargs)
        if self.target is not None:
            surface_height = get_surface_height_parameter_for_object(
                self.target)
            asm.set_parameter('surfaceHeight', surface_height)
            if self.target.parent is not None:
                asm.set_actor('surface', self.target.parent)
        if self._original_carry_target is not None:
            param_overrides = self._original_carry_target.get_param_overrides(
                self.DIRTY_DISH_ACTOR_NAME, only_for_keys=(self.ITEMS_PARAM, ))
            if param_overrides is not None:
                asm.update_locked_params(param_overrides)
        return result

    def build_basic_content(self, sequence=(), **kwargs):
        self.animation_context.register_event_handler(
            self._xevt_callback, handler_id=SCRIPT_EVENT_ID_START_CARRY)
        if self._aggregate_object_definition is None or self.carry_target is not None and self._aggregate_object_definition is self.carry_target.definition:
            if self.context.carry_target is None:
                self._setup_collected_object(self.target)
            return super().build_basic_content(sequence, **kwargs)
        if self.carry_target is not None:
            swap_carry = True
            self._original_carry_target = self.carry_target
        else:
            swap_carry = False
        self._object_create_helper = CreateObjectHelper(
            self.sim,
            self._aggregate_object_definition.id,
            self,
            post_add=self._setup_collected_object,
            tag='Aggregate object created for a CollectManyInteraction.')
        super_build_basic_content = super().build_basic_content

        def grab_sequence(timeline):
            nonlocal sequence
            sequence = super_build_basic_content(sequence)
            if swap_carry:
                sequence = swap_carry_while_holding(
                    self,
                    self._original_carry_target,
                    self.created_target,
                    callback=self._object_create_helper.claim,
                    sequence=sequence)
            else:
                sequence = enter_carry_while_holding(
                    self,
                    self.created_target,
                    callback=self._object_create_helper.claim,
                    create_si_fn=None,
                    sequence=sequence)
            result = yield element_utils.run_child(timeline, sequence)

        return self._object_create_helper.create(grab_sequence)

    def _xevt_callback(self, *_, **__):
        if self.carry_target is not None and self.target is not None:
            if self._object_create_helper is None:
                for statistic in self.target.statistic_tracker:
                    self.carry_target.statistic_tracker.add_value(
                        statistic.stat_type, statistic.get_value())
            elif self._original_carry_target is not None:
                for statistic in self._original_carry_target.statistic_tracker:
                    self.carry_target.statistic_tracker.add_value(
                        statistic.stat_type, statistic.get_value())
            elif self.aggregate_object is self.INTERACTION_TARGET:
                self.carry_target.copy_state_values(self.target)
            else:
                for statistic in self.target.statistic_tracker:
                    self.carry_target.statistic_tracker.set_value(
                        statistic.stat_type, statistic.get_value())
        if self.destroy_original_object and self.target is not None:
            self._collected_targets.add(self.target)
            self.target.transient = True
            self.target.remove_from_client()

    @classproperty
    def requires_target_support(cls):
        return False
Esempio n. 51
0
class BaseService(CancellableMixin, AsyncioServiceAPI):
    # Use a WeakSet so that we don't have to bother updating it when tasks finish.
    _child_services: 'WeakSet[AsyncioServiceAPI]'
    _tasks: 'WeakSet[asyncio.Future[Any]]'
    _finished_callbacks: List[Callable[[AsyncioServiceAPI], None]]
    # Number of seconds cancel() will wait for run() to finish.
    _wait_until_finished_timeout = 5

    # the custom event loop to run in, or None if the default loop should be used
    _loop: asyncio.AbstractEventLoop = None

    _logger: ExtendedDebugLogger = None

    _start_time: float = None

    def __init__(self,
                 token: CancelToken = None,
                 loop: asyncio.AbstractEventLoop = None) -> None:
        self.events = ServiceEvents()
        self._run_lock = asyncio.Lock()
        self._child_services = WeakSet()
        self._tasks = WeakSet()
        self._finished_callbacks = []

        self._loop = loop

        base_token = CancelToken(type(self).__name__, loop=loop)

        if token is None:
            self.cancel_token = base_token
        else:
            self.cancel_token = base_token.chain(token)

    @property
    def logger(self) -> ExtendedDebugLogger:
        if self._logger is None:
            self._logger = get_logger(
                self.__module__ + '.' + self.__class__.__name__
            )
        return self._logger

    @property
    def uptime(self) -> float:
        if self._start_time is None:
            return 0.0
        else:
            return time.monotonic() - self._start_time

    def get_event_loop(self) -> asyncio.AbstractEventLoop:
        if self._loop is None:
            return asyncio.get_event_loop()
        else:
            return self._loop

    async def run(
            self,
            finished_callback: Optional[Callable[[AsyncioServiceAPI], None]] = None) -> None:
        """Await for the service's _run() coroutine.

        Once _run() returns, triggers the cancel token, call cleanup() and
        finished_callback (if one was passed).
        """
        if self.is_running:
            raise ValidationError("Cannot start the service while it's already running: %s", self)
        elif self.is_cancelled:
            raise ValidationError(
                "Cannot restart a service that has already been cancelled: %s -> %s",
                self,
                self.cancel_token.triggered_token,
            )

        if finished_callback:
            self._finished_callbacks.append(finished_callback)

        try:
            async with self._run_lock:
                self.events.started.set()
                self._start_time = time.monotonic()
                await self._run()
        except OperationCancelled as e:
            self.logger.debug("%s finished: %s", self, e)
        except asyncio.CancelledError:
            # If a cancellation occurs we just want to re-raise it.  No need to log anything.
            raise
        except Exception:
            self.logger.exception("Unexpected error in %r, exiting", self)
        else:
            if self.is_cancelled:
                self.logger.debug("%s cancelled, cleaning up...", self)
            else:
                self.logger.debug("%s had nothing left to do, ceasing operation...", self)
        finally:
            # Trigger our cancel token to ensure all pending asyncio tasks and background
            # coroutines started by this service exit cleanly.
            self.events.cancelled.set()
            self.cancel_token.trigger()

            await self.cleanup()

            for callback in self._finished_callbacks:
                callback(self)

            self.events.finished.set()
            self.logger.debug("%s halted cleanly", self)

    def add_finished_callback(self, finished_callback: Callable[[AsyncioServiceAPI], None]) -> None:
        self._finished_callbacks.append(finished_callback)

    def run_task(self, awaitable: Awaitable[Any]) -> None:
        """Run the given awaitable in the background.

        The awaitable should return whenever this service's cancel token is triggered.

        If it raises OperationCancelled, that is caught and ignored.
        """
        @functools.wraps(awaitable)  # type: ignore
        async def _run_task_wrapper() -> None:
            self.logger.debug2("Running task %s", awaitable)
            try:
                await awaitable
            except OperationCancelled:
                pass
            except asyncio.CancelledError:
                pass
            except Exception as e:
                self.logger.warning("Task %s finished unexpectedly: %r", awaitable, e)
                self.logger.debug("Task failure traceback", exc_info=True)
            else:
                self.logger.debug2("Task %s finished with no errors", awaitable)
        self._tasks.add(asyncio.ensure_future(_run_task_wrapper()))

    def run_daemon_task(self, awaitable: Awaitable[Any]) -> None:
        """Run the given awaitable in the background.

        Like :meth:`run_task` but if the task ends without cancelling, then this
        this service will terminate as well.
        """
        @functools.wraps(awaitable)  # type: ignore
        async def _run_daemon_task_wrapper() -> None:
            try:
                await awaitable
            finally:
                if not self.is_cancelled:
                    self.logger.debug(
                        "%s finished while %s is still running, terminating as well",
                        awaitable,
                        self,
                    )
                    self.cancel_nowait()
        self.run_task(_run_daemon_task_wrapper())

    def run_child_service(self, child_service: AsyncioServiceAPI) -> None:
        """
        Run a child service and keep a reference to it to be considered during the cleanup.
        """
        if child_service.is_running:
            raise ValidationError(
                f"Can't start service {child_service!r}, child of {self!r}: it's already running"
            )
        elif child_service.is_cancelled:
            raise ValidationError(
                f"Can't restart {child_service!r}, child of {self!r}: it's already completed"
            )

        self._child_services.add(child_service)
        self.run_task(child_service.run())

    def run_daemon(self, service: AsyncioServiceAPI) -> None:
        """
        Run a service and keep a reference to it to be considered during the cleanup.

        If the service finishes while we're still running, we'll terminate as well.
        """
        if service.is_running:
            raise ValidationError(
                f"Can't start daemon {service!r}, child of {self!r}: it's already running"
            )
        elif service.is_cancelled:
            raise ValidationError(
                f"Can't restart daemon {service!r}, child of {self!r}: it's already completed"
            )

        self._child_services.add(service)

        @functools.wraps(service.run)
        async def _run_daemon_wrapper() -> None:
            try:
                await service.run()
            except OperationCancelled:
                pass
            except Exception as e:
                self.logger.warning("Daemon Service %s finished unexpectedly: %s", service, e)
                self.logger.debug("Daemon Service failure traceback", exc_info=True)
            finally:
                if not self.is_cancelled:
                    self.logger.debug(
                        "%s finished while %s is still running, terminating as well",
                        service,
                        self,
                    )
                    self.cancel_nowait()

        self.run_task(_run_daemon_wrapper())

    def call_later(self, delay: float, callback: 'Callable[..., None]', *args: Any) -> None:

        @functools.wraps(callback)
        async def _call_later_wrapped() -> None:
            await self.sleep(delay)
            callback(*args)

        self.run_task(_call_later_wrapped())

    async def _run_in_executor(self,
                               executor: concurrent.futures.Executor,
                               callback: Callable[..., Any], *args: Any) -> Any:

        loop = self.get_event_loop()
        try:
            return await self.wait(loop.run_in_executor(executor, callback, *args))
        except concurrent.futures.process.BrokenProcessPool:
            self.logger.exception("Fatal error. Process pool died. Cancelling operations.")
            await self.cancel()

    async def cleanup(self) -> None:
        """
        Run the ``_cleanup()`` coroutine and set the ``cleaned_up`` event after the service as
        well as all child services finished their cleanup.

        The ``_cleanup()`` coroutine is invoked before the child services may have finished
        their cleanup.
        """
        if self._child_services:
            self.logger.debug("%s waiting for child services: %s", self, list(self._child_services))
            wait_for_clean_up_tasks = (
                child_service.events.cleaned_up.wait()
                for child_service in self._child_services
            )
            await asyncio.gather(*wait_for_clean_up_tasks)
            self.logger.debug("%s: All child services finished", self)
        if self._tasks:
            self._log_tasks("Waiting for tasks")
            await asyncio.gather(*self._tasks)
            self.logger.debug("All tasks finished")

        await self._cleanup()
        self.events.cleaned_up.set()

    def _log_tasks(self, message: str) -> None:
        MAX_DISPLAY_TASKS = 50
        task_list = list(self._tasks)
        if len(self._tasks) > MAX_DISPLAY_TASKS:
            task_display = ''.join(map(str, [
                task_list[:MAX_DISPLAY_TASKS // 2],
                '...',
                task_list[-1 * MAX_DISPLAY_TASKS // 2:],
            ]))
        else:
            task_display = str(task_list)
        self.logger.debug("%s: %s (%d): %s", self, message, len(self._tasks), task_display)

    def cancel_nowait(self) -> None:
        if self.is_cancelled:
            self.logger.warning("Tried to cancel %s, but it was already cancelled", self)
            self.logger.debug("Second cancellation of %s: stack trace", self, stack_info=True)
            return
        elif not self.is_running:
            raise ValidationError("Cannot cancel a service that has not been started")

        self.logger.debug("Cancelling service: %s", self)
        self.events.cancelled.set()
        self.cancel_token.trigger()

    async def cancel(self) -> None:
        """Trigger the CancelToken and wait for the cleaned_up event to be set."""
        self.cancel_nowait()

        try:
            await asyncio.wait_for(
                self.events.cleaned_up.wait(), timeout=self._wait_until_finished_timeout)
        except asyncio.TimeoutError:
            self.logger.info(
                "Timed out waiting for %s to finish its cleanup, forcibly cancelling pending "
                "tasks and exiting anyway", self)
            if self._tasks:
                self._log_tasks("Pending tasks")
            if self._child_services:
                self.logger.debug("Pending child services: %s", list(self._child_services))
            await self._forcibly_cancel_all_tasks()
            # Sleep a bit because the Future.cancel() method just schedules the callbacks, so we
            # need to give the event loop a chance to actually call them.
            await asyncio.sleep(0.01)
        else:
            self.logger.debug("%s finished cleanly", self)

    async def _forcibly_cancel_all_tasks(self) -> None:
        for task in self._tasks:
            task.cancel()
            try:
                await task
            except asyncio.CancelledError:
                pass

    @property
    def is_cancelled(self) -> bool:
        return self.cancel_token.triggered

    @property
    def is_operational(self) -> bool:
        return self.events.started.is_set() and not self.cancel_token.triggered

    @property
    def is_running(self) -> bool:
        return self._run_lock.locked()

    async def cancellation(self) -> None:
        """
        Pause until this service is cancelled
        """
        await self.wait(self.events.cancelled.wait())

    async def threadsafe_cancel(self) -> None:
        """
        Cancel service in another thread. Block until service is cleaned up.

        :param poll_period: how many seconds to wait in between each check for service cleanup
        """
        asyncio.run_coroutine_threadsafe(self.cancel(), loop=self.get_event_loop())
        await asyncio.wait_for(
            self.events.cleaned_up.wait(),
            timeout=self._wait_until_finished_timeout,
        )

    async def sleep(self, delay: float) -> None:
        """Coroutine that completes after a given time (in seconds)."""
        await self.wait(asyncio.sleep(delay))

    @abstractmethod
    async def _run(self) -> None:
        """Run the service's loop.

        Should return or raise OperationCancelled when the CancelToken is triggered.
        """
        pass

    async def _cleanup(self) -> None:
        """Clean up any resources held by this service.

        Called after the service's _run() method returns.
        """
        pass

    def as_new_service(self) -> ServiceAPI:
        return WrappedLegacyService(self)
Esempio n. 52
0
class EObject(ENotifer, metaclass=Metasubinstance):
    _staticEClass = True
    _instances = WeakSet()

    def __new__(cls, *args, **kwargs):
        instance = super().__new__(cls)
        instance._internal_id = None
        instance._isset = set()
        instance._container = None
        instance._containment_feature = None
        instance._eresource = None
        instance.listeners = []
        instance._eternal_listener = []
        instance._inverse_rels = set()
        instance._staticEClass = False
        cls._instances.add(instance)
        return instance

    def __init__(self, **kwargs):
        super().__init__(**kwargs)

    @classmethod
    def allInstances(cls, resources=None):
        if resources:
            yield from (x for x in cls._instances
                        if isinstance(x, cls) and x.eResource in resources)
        else:
            yield from (x for x in cls._instances if isinstance(x, cls))

    def eContainer(self):
        return self._container

    def eContainmentFeature(self):
        return self._containment_feature

    def eIsSet(self, feature):
        if isinstance(feature, str):
            feature = self.eClass.findEStructuralFeature(feature)
        return feature in self._isset

    @property
    def eResource(self):
        if self.eContainer():
            with ignored(AttributeError):
                return self.eContainer().eResource
        return self._eresource

    def eGet(self, feature):
        if isinstance(feature, str):
            return self.__getattribute__(feature)
        elif isinstance(feature, EStructuralFeature):
            return self.__getattribute__(feature.name)
        raise TypeError('Feature must have str or EStructuralFeature type')

    def eSet(self, feature, value):
        if isinstance(feature, str):
            self.__setattr__(feature, value)
        elif isinstance(feature, EStructuralFeature):
            self.__setattr__(feature.name, value)
        else:
            raise TypeError('Feature must have str or '
                            'EStructuralFeature type')

    def delete(self, recursive=True):
        if recursive:
            for obj in self.eAllContents():
                obj.delete()
        seek = set(self._inverse_rels)
        # we also clean all the object references
        seek.update((self, ref) for ref in self.eClass.eAllReferences())
        for owner, feature in seek:
            fvalue = owner.eGet(feature)
            if feature.many:
                if self in fvalue:
                    fvalue.remove(self)
                    continue
                elif self is owner:
                    fvalue.clear()
                    continue
                value = next((val for val in fvalue
                              if getattr(val, '_wrapped', None) is self), None)
                if value:
                    fvalue.remove(value)
            else:
                if self is fvalue or self is owner:
                    owner.eSet(feature, None)
                    continue
                value = (fvalue if getattr(fvalue, '_wrapped', None) is self
                         else None)
                if value:
                    owner.eSet(feature, None)

    @property
    def eContents(self):
        children = []
        for feature in self.eClass.eAllReferences():
            if not feature.containment or feature.derived:
                continue
            if feature.many:
                values = self.__getattribute__(feature.name)
            else:
                values = [self.__getattribute__(feature.name)]
            children.extend((x for x in values if x))
        return children

    def eAllContents(self):
        contents = self.eContents
        yield from contents
        for x in contents:
            yield from x.eAllContents()

    def eURIFragment(self):
        if not self.eContainer():
            if not self.eResource or len(self.eResource.contents) == 1:
                return '/'
            else:
                return '/{}'.format(self.eResource.contents.index(self))
        feat = self.eContainmentFeature()
        parent = self.eContainer()
        name = feat.name
        if feat.many:
            index = parent.__getattribute__(name).index(self)
            return '{0}/@{1}.{2}' \
                   .format(parent.eURIFragment(), name, str(index))
        else:
            return '{0}/@{1}'.format(parent.eURIFragment(), name)

    def eRoot(self):
        if not self.eContainer():
            return self
        if not isinstance(self.eContainer(), EObject):
            return self.eContainer()
        return self.eContainer().eRoot()

    def __dir__(self):
        eclass = self.eClass
        relevant = [x.name for x in eclass.eAllStructuralFeatures()]
        relevant.extend([
            x.name for x in eclass.eAllOperations()
            if not x.name.startswith('_')
        ])
        return relevant
Esempio n. 53
0
class Task(futures.Future):
    """A coroutine wrapped in a Future."""

    # An important invariant maintained while a Task not done:
    #
    # - Either _fut_waiter is None, and _step() is scheduled;
    # - or _fut_waiter is some Future, and _step() is *not* scheduled.
    #
    # The only transition from the latter to the former is through
    # _wakeup().  When _fut_waiter is not None, one of its callbacks
    # must be _wakeup().

    # Weak set containing all tasks alive.
    _all_tasks = WeakSet()

    # Dictionary containing tasks that are currently active in
    # all running event loops.  {EventLoop: Task}
    _current_tasks = {}

    # If False, don't log a message if the task is destroyed whereas its
    # status is still pending
    _log_destroy_pending = True

    @classmethod
    def current_task(cls, loop=None):
        """Return the currently running task in an event loop or None.

        By default the current task for the current event loop is returned.

        None is returned when called not in the context of a Task.
        """
        if loop is None:
            loop = events.get_event_loop()
        return cls._current_tasks.get(loop)

    @classmethod
    def all_tasks(cls, loop=None):
        """Return a set of all tasks for an event loop.

        By default all tasks for the current event loop are returned.
        """
        if loop is None:
            loop = events.get_event_loop()
        return set(t for t in cls._all_tasks if t._loop is loop)

    def __init__(self, coro, loop=None):
        assert coroutines.iscoroutine(coro), repr(
            coro)  # Not a coroutine function!
        super(Task, self).__init__(loop=loop)
        if self._source_traceback:
            del self._source_traceback[-1]
        self._coro = iter(coro)  # Use the iterator just in case.
        self._fut_waiter = None
        self._must_cancel = False
        self._loop.call_soon(self._step)
        self.__class__._all_tasks.add(self)

    # On Python 3.3 or older, objects with a destructor that are part of a
    # reference cycle are never destroyed. That's not the case any more on
    # Python 3.4 thanks to the PEP 442.
    if _PY34:

        def __del__(self):
            if self._state == futures._PENDING and self._log_destroy_pending:
                context = {
                    'task': self,
                    'message': 'Task was destroyed but it is pending!',
                }
                if self._source_traceback:
                    context['source_traceback'] = self._source_traceback
                self._loop.call_exception_handler(context)
            futures.Future.__del__(self)

    def _repr_info(self):
        info = super(Task, self)._repr_info()

        if self._must_cancel:
            # replace status
            info[0] = 'cancelling'

        coro = coroutines._format_coroutine(self._coro)
        info.insert(1, 'coro=<%s>' % coro)

        if self._fut_waiter is not None:
            info.insert(2, 'wait_for=%r' % self._fut_waiter)
        return info

    def get_stack(self, limit=None):
        """Return the list of stack frames for this task's coroutine.

        If the coroutine is not done, this returns the stack where it is
        suspended.  If the coroutine has completed successfully or was
        cancelled, this returns an empty list.  If the coroutine was
        terminated by an exception, this returns the list of traceback
        frames.

        The frames are always ordered from oldest to newest.

        The optional limit gives the maximum number of frames to
        return; by default all available frames are returned.  Its
        meaning differs depending on whether a stack or a traceback is
        returned: the newest frames of a stack are returned, but the
        oldest frames of a traceback are returned.  (This matches the
        behavior of the traceback module.)

        For reasons beyond our control, only one stack frame is
        returned for a suspended coroutine.
        """
        frames = []
        f = self._coro.gi_frame
        if f is not None:
            while f is not None:
                if limit is not None:
                    if limit <= 0:
                        break
                    limit -= 1
                frames.append(f)
                f = f.f_back
            frames.reverse()
        elif self._exception is not None:
            tb = self._exception.__traceback__
            while tb is not None:
                if limit is not None:
                    if limit <= 0:
                        break
                    limit -= 1
                frames.append(tb.tb_frame)
                tb = tb.tb_next
        return frames

    def print_stack(self, limit=None, file=None):
        """Print the stack or traceback for this task's coroutine.

        This produces output similar to that of the traceback module,
        for the frames retrieved by get_stack().  The limit argument
        is passed to get_stack().  The file argument is an I/O stream
        to which the output is written; by default output is written
        to sys.stderr.
        """
        extracted_list = []
        checked = set()
        for f in self.get_stack(limit=limit):
            lineno = f.f_lineno
            co = f.f_code
            filename = co.co_filename
            name = co.co_name
            if filename not in checked:
                checked.add(filename)
                linecache.checkcache(filename)
            line = linecache.getline(filename, lineno, f.f_globals)
            extracted_list.append((filename, lineno, name, line))
        exc = self._exception
        if not extracted_list:
            print('No stack for %r' % self, file=file)
        elif exc is not None:
            print('Traceback for %r (most recent call last):' % self,
                  file=file)
        else:
            print('Stack for %r (most recent call last):' % self, file=file)
        traceback.print_list(extracted_list, file=file)
        if exc is not None:
            for line in traceback.format_exception_only(exc.__class__, exc):
                print(line, file=file, end='')

    def cancel(self):
        """Request that this task cancel itself.

        This arranges for a CancelledError to be thrown into the
        wrapped coroutine on the next cycle through the event loop.
        The coroutine then has a chance to clean up or even deny
        the request using try/except/finally.

        Unlike Future.cancel, this does not guarantee that the
        task will be cancelled: the exception might be caught and
        acted upon, delaying cancellation of the task or preventing
        cancellation completely.  The task may also return a value or
        raise a different exception.

        Immediately after this method is called, Task.cancelled() will
        not return True (unless the task was already cancelled).  A
        task will be marked as cancelled when the wrapped coroutine
        terminates with a CancelledError exception (even if cancel()
        was not called).
        """
        if self.done():
            return False
        if self._fut_waiter is not None:
            if self._fut_waiter.cancel():
                # Leave self._fut_waiter; it may be a Task that
                # catches and ignores the cancellation so we may have
                # to cancel it again later.
                return True
        # It must be the case that self._step is already scheduled.
        self._must_cancel = True
        return True

    def _step(self, value=None, exc=None, exc_tb=None):
        assert not self.done(), \
            '_step(): already done: {0!r}, {1!r}, {2!r}'.format(self, value, exc)
        if self._must_cancel:
            if not isinstance(exc, futures.CancelledError):
                exc = futures.CancelledError()
            self._must_cancel = False
        coro = self._coro
        self._fut_waiter = None

        if exc_tb is not None:
            init_exc = exc
        else:
            init_exc = None
        self.__class__._current_tasks[self._loop] = self
        # Call either coro.throw(exc) or coro.send(value).
        try:
            if exc is not None:
                result = coro.throw(exc)
            elif value is not None:
                result = coro.send(value)
            else:
                result = next(coro)
        except StopIteration as exc:
            if compat.PY33:
                # asyncio Task object? get the result of the coroutine
                result = exc.value
            else:
                if isinstance(exc, Return):
                    exc.raised = True
                    result = exc.value
                else:
                    result = None
            self.set_result(result)
        except futures.CancelledError as exc:
            super(Task, self).cancel()  # I.e., Future.cancel(self).
        except BaseException as exc:
            if exc is init_exc:
                self._set_exception_with_tb(exc, exc_tb)
                exc_tb = None
            else:
                self.set_exception(exc)

            if not isinstance(exc, Exception):
                # reraise BaseException
                raise
        else:
            if coroutines._DEBUG:
                if not coroutines._coroutine_at_yield_from(self._coro):
                    # trollius coroutine must "yield From(...)"
                    if not isinstance(result, coroutines.FromWrapper):
                        self._loop.call_soon(
                            self._step, None,
                            RuntimeError("yield used without From"))
                        return
                    result = result.obj
                else:
                    # asyncio coroutine using "yield from ..."
                    if isinstance(result, coroutines.FromWrapper):
                        result = result.obj
            elif isinstance(result, coroutines.FromWrapper):
                result = result.obj

            if coroutines.iscoroutine(result):
                # "yield coroutine" creates a task, the current task
                # will wait until the new task is done
                result = self._loop.create_task(result)
            # FIXME: faster check. common base class? hasattr?
            elif isinstance(result, (Lock, Condition, Semaphore)):
                coro = _lock_coroutine(result)
                result = self._loop.create_task(coro)

            if isinstance(result, futures._FUTURE_CLASSES):
                # Yielded Future must come from Future.__iter__().
                result.add_done_callback(self._wakeup)
                self._fut_waiter = result
                if self._must_cancel:
                    if self._fut_waiter.cancel():
                        self._must_cancel = False
            elif result is None:
                # Bare yield relinquishes control for one event loop iteration.
                self._loop.call_soon(self._step)
            else:
                # Yielding something else is an error.
                self._loop.call_soon(
                    self._step, None,
                    RuntimeError('Task got bad yield: {0!r}'.format(result)))
        finally:
            self.__class__._current_tasks.pop(self._loop)
            self = None  # Needed to break cycles when an exception occurs.

    def _wakeup(self, future):
        if (future._state == futures._FINISHED
                and future._exception is not None):
            # Get the traceback before calling exception(), because calling
            # the exception() method clears the traceback
            exc_tb = future._get_exception_tb()
            exc = future.exception()
            self._step(None, exc, exc_tb)
            exc_tb = None
        else:
            try:
                value = future.result()
            except Exception as exc:
                # This may also be a cancellation.
                self._step(None, exc)
            else:
                self._step(value, None)
        self = None  # Needed to break cycles when an exception occurs.
Esempio n. 54
0
 def __init__(self, updaters=[]):
     from weakref import WeakSet
     self._updaters = WeakSet(updaters)
Esempio n. 55
0
class _Memoize:
    """Decorates a function call and caches return value for given inputs.
    - If `db_path` is provided, memos will persist on disk and reloaded during initialization.
    - If `duration` is provided, memos will only be valid for given `duration`.
    - If `keygen` is provided, memo hash keys will be created with given `keygen`.
    - If `pickler` is provided, persistent memos will (de)serialize using given `pickler`.
    - If `size` is provided, LRU memo will be evicted if current count exceeds given `size`.

    ### Examples

    - Body will run once for unique input `bar` and result is cached.
        ```python3
        @memoize
        def foo(bar) -> Any: ...

        foo(1)  # Function actually called. Result cached.
        foo(1)  # Function not called. Cached result returned.
        foo(2)  # Function actually called. Result cached.
        ```

    - Same as above, but async.
        ```python3
        @memoize
        async def foo(bar) -> Any: ...

        # Concurrent calls from the same event loop are safe. Only one call is generated. The
        # other nine calls in this example wait for the result.
        await asyncio.gather(*[foo(1) for _ in range(10)])
        ```

    - Classes may be memoized.
        ```python3
        @memoize
        Class Foo:
            def init(self, _): ...

        Foo(1)  # Instance is actually created.
        Foo(1)  # Instance not created. Cached instance returned.
        Foo(2)  # Instance is actually created.
        ```

    - Calls `foo(1)`, `foo(bar=1)`, and `foo(1, baz='baz')` are equivalent and only cached once.
        ```python3
        @memoize
        def foo(bar, baz='baz'): ...
        ```

    - Only 2 items are cached. Acts as an LRU.
        ```python3
        @memoize(size=2)
        def foo(bar) -> Any: ...

        foo(1)  # LRU cache order [foo(1)]
        foo(2)  # LRU cache order [foo(1), foo(2)]
        foo(1)  # LRU cache order [foo(2), foo(1)]
        foo(3)  # LRU cache order [foo(1), foo(3)], foo(2) is evicted to keep cache size at 2
        ```

    - Items are evicted after 1 minute.
        ```python3
        @memoize(duration=datetime.timedelta(minutes=1))
        def foo(bar) -> Any: ...

        foo(1)  # Function actually called. Result cached.
        foo(1)  # Function not called. Cached result returned.
        sleep(61)
        foo(1)  # Function actually called. Cached result was too old.
        ```

    - Memoize can be explicitly reset through the function's `.memoize` attribute
        ```python3
        @memoize
        def foo(bar) -> Any: ...

        foo(1)  # Function actually called. Result cached.
        foo(1)  # Function not called. Cached result returned.
        foo.memoize.reset()
        foo(1)  # Function actually called. Cache was emptied.
        ```

    - Current cache length can be accessed through the function's `.memoize` attribute
        ```python3
        @memoize
        def foo(bar) -> Any: ...

        foo(1)
        foo(2)
        len(foo.memoize)  # returns 2
        ```

    - Alternate memo hash function can be specified. The inputs must match the function's.
        ```python3
        Class Foo:
            @memoize(keygen=lambda self, a, b, c: (a, b, c))  # Omit 'self' from hash key.
            def bar(self, a, b, c) -> Any: ...

        a, b = Foo(), Foo()

        # Hash key will be (a, b, c)
        a.bar(1, 2, 3)  # LRU cache order [Foo.bar(a, 1, 2, 3)]

        # Hash key will again be (a, b, c)
        # Be aware, in this example the returned result comes from a.bar(...), not b.bar(...).
        b.bar(1, 2, 3)  # Function not called. Cached result returned.
        ```

    - If part of the returned key from keygen is awaitable, it will be awaited.
        ```python3
        async def awaitable_key_part() -> Hashable: ...

        @memoize(keygen=lambda bar: (bar, awaitable_key_part()))
        async def foo(bar) -> Any: ...
        ```

    - If the memoized function is async and any part of the key is awaitable, it is awaited.
        ```python3
        async def morph_a(a: int) -> int: ...

        @memoize(keygen=lambda a, b, c: (morph_a(a), b, c))
        def foo(a, b, c) -> Any: ...
        ```

    - Properties can be memoized.
        ```python3
        Class Foo:
            @property
            @memoize
            def bar(self) -> Any: ...

        a = Foo()
        a.bar  # Function actually called. Result cached.
        a.bar  # Function not called. Cached result returned.

        b = Foo() # Memoize uses 'self' parameter in hash. 'b' does not share returns with 'a'
        b.bar  # Function actually called. Result cached.
        b.bar  # Function not called. Cached result returned.
        ```

    - Be careful with eviction on instance methods. Memoize is not instance-specific.
        ```python3
        Class Foo:
            @memoize(size=1)
            def bar(self, baz) -> Any: ...

        a, b = Foo(), Foo()
        a.bar(1)  # LRU cache order [Foo.bar(a, 1)]
        b.bar(1)  # LRU cache order [Foo.bar(b, 1)], Foo.bar(a, 1) is evicted
        a.bar(1)  # Foo.bar(a, 1) is actually called and cached again.
        ```

    - Values can persist to disk and be reloaded when memoize is initialized again.
        ```python3
        @memoize(db_path=Path.home() / '.memoize')
        def foo(a) -> Any: ...

        foo(1)  # Function actually called. Result cached.

        # Process is restarted. Upon restart, the state of the memoize decorator is reloaded.

        foo(1)  # Function not called. Cached result returned.
        ```

    - If not applied to a function, calling the decorator returns a partial application.
        ```python3
        memoize_db = memoize(db_path=Path.home() / '.memoize')

        @memoize_db(size=1)
        def foo(a) -> Any: ...

        @memoize_db(duration=datetime.timedelta(hours=1))
        def bar(b) -> Any: ...
        ```

    - Comparison equality does not affect memoize. Only hash equality matters.
        ```python3
        # Inherits object.__hash__
        class Foo:
            # Don't be fooled. memoize only cares about the hash.
            def __eq__(self, other: Foo) -> bool:
                return True

        @memoize
        def bar(foo: Foo) -> Any: ...

        foo0, foo1 = Foo(), Foo()
        assert foo0 == foo1
        bar(foo0)  # Function called. Result cached.
        bar(foo1)  # Function called again, despite equality, due to different hash.
        ```

    ### A warning about arguments that inherit `object.__hash__`:

    It doesn't make sense to keep a memo if it's impossible to generate the same input again. Inputs
    that inherit the default `object.__hash__` are unique based on their id, and thus, their
    location in memory. If such inputs are garbage-collected, they are gone forever. For that
    reason, when those inputs are garbage collected, `memoize` will drop memos created using those
    inputs.

    - Memo lifetime is bound to the lifetime of any arguments that inherit `object.__hash__`.
        ```python3
        # Inherits object.__hash__
        class Foo:
            ...

        @memoize
        def bar(foo: Foo) -> Any: ...

        bar(Foo())  # Memo is immediately deleted since Foo() is garbage collected.

        foo = Foo()
        bar(foo)  # Memo isn't deleted until foo is deleted.
        del foo  # Memo is deleted at the same time as foo.
        ```

    - Types that have specific, consistent hash functions (int, str, etc.) won't cause problems.
        ```python3
        @memoize
        def foo(a: int, b: str, c: Tuple[int, ...], d: range) -> Any: ...

        foo(1, 'bar', (1, 2, 3), range(42))  # Function called. Result cached.
        foo(1, 'bar', (1, 2, 3), range(42))  # Function not called. Cached result returned.
        ```

    - Classmethods rely on classes, which inherit from `object.__hash__`. However, classes are
      almost never garbage collected until a process exits so memoize will work as expected.

        ```python3
        class Foo:
          @classmethod
          @memoize
          def bar(cls) -> Any: ...

        foo = Foo()
        foo.bar()  # Function called. Result cached.
        foo.bar()  # Function not called. Cached result returned.

        del foo  # Memo not cleared since lifetime is bound to class Foo.

        foo = Foo()
        foo.bar()  # Function not called. Cached result returned.
        foo.bar()  # Function not called. Cached result returned.
        ```

    - Long-lasting object instances that inherit from `object.__hash__`.

        ```python3
        class Foo:

            @memoize
            def bar(self) -> Any: ...

        foo = Foo()
        foo.bar()  # Function called. Result cached.

        # foo instance is kept around somewhere and used later.
        foo.bar()  # Function not called. Cached result returned.
        ```

    - Custom pickler may be specified for unpickleable return types.

        ```python3
        import dill

        @memoize(db_path='~/.memoize`, pickler=dill)
        def foo() -> Callable[[], None]:
            return lambda: None
        ```
    """

    _all_decorators = WeakSet()

    @staticmethod
    def __call__(
        _decoratee: Optional[Decoratee] = None,
        *,
        db_path: Optional[Path] = None,
        duration: Optional[Union[int, float, timedelta]] = None,
        keygen: Optional[Keygen] = None,
        pickler: Optional[Pickler] = None,
        size: Optional[int] = None,
    ) -> Union[Decoratee]:
        if _decoratee is None:
            return partial(memoize,
                           db_path=db_path,
                           duration=duration,
                           keygen=keygen,
                           pickler=pickler,
                           size=size)

        if inspect.isclass(_decoratee):
            assert db_path is None, 'Class memoization not allowed with db.'

            class WrappedMeta(type(_decoratee)):
                # noinspection PyMethodParameters
                @memoize(duration=duration, size=size)
                def __call__(cls, *args, **kwargs):
                    return super().__call__(*args, **kwargs)

            class Wrapped(_decoratee, metaclass=WrappedMeta):
                pass

            return type(_decoratee.__name__, (Wrapped, ),
                        {'__doc__': _decoratee.__doc__})

        db = connect(f'{db_path}') if db_path is not None else None
        duration = timedelta(
            seconds=duration) if isinstance(duration,
                                            (int, float)) else duration
        assert (duration is None) or (duration.total_seconds() > 0)
        pickler = pickle if pickler is None else pickler
        assert (size is None) or (size > 0)
        fn = _decoratee
        default_kwargs: Mapping[str, Any] = {
            k: v.default
            for k, v in inspect.signature(fn).parameters.items()
        }

        if inspect.iscoroutinefunction(_decoratee):
            decorator_cls = _AsyncMemoize
        else:
            decorator_cls = _SyncMemoize

        # noinspection PyArgumentList
        decorator = decorator_cls(
            db=db,
            default_kwargs=default_kwargs,
            duration=duration,
            fn=fn,
            keygen=keygen,
            pickler=pickler,
            size=size,
        ).get_decorator()

        _Memoize._all_decorators.add(decorator)

        return wraps(_decoratee)(decorator)

    @staticmethod
    def reset_all() -> None:
        for decorator in _Memoize._all_decorators:
            decorator.memoize.reset()
Esempio n. 56
0
class Pool(ThreadPoolExecutor, NewExecutorPoolMixin):
    """Let ThreadPoolExecutor use NewFuture instead of origin concurrent.futures.Future.

    WARNING: NewFutures in Pool will not block main thread without NewFuture.x.

    Basic Usage::

            from torequests.main import Pool
            import time

            pool = Pool()


            def use_submit(i):
                time.sleep(i)
                result = 'use_submit: %s' % i
                print(result)
                return result


            @pool.async_func
            def use_decorator(i):
                time.sleep(i)
                result = 'use_decorator: %s' % i
                print(result)
                return result


            tasks = [pool.submit(use_submit, i) for i in (2, 1, 0)
                    ] + [use_decorator(i) for i in (2, 1, 0)]
            # pool.x can be ignore
            pool.x
            results = [i.x for i in tasks]
            print(results)

            # use_submit: 0
            # use_decorator: 0
            # use_submit: 1
            # use_decorator: 1
            # use_submit: 2
            # use_decorator: 2
            # ['use_submit: 2', 'use_submit: 1', 'use_submit: 0', 'use_decorator: 2', 'use_decorator: 1', 'use_decorator: 0']
    """
    def __init__(self,
                 n=None,
                 timeout=None,
                 default_callback=None,
                 catch_exception=True,
                 *args,
                 **kwargs):
        n = n or kwargs.pop("max_workers", None)
        if PY2 and n is None:
            # python2 n!=None
            n = (self._get_cpu_count() or 1) * 5
        super(Pool, self).__init__(n, *args, **kwargs)
        #: set the default timeout
        self._timeout = timeout
        #: set the default_callback if not set single task's callback
        self.default_callback = default_callback
        #: WeakSet of _all_futures for self.x
        self._all_futures = WeakSet()
        #: catch_exception=True will not raise exceptions, return object FailureException(exception)
        self.catch_exception = catch_exception

    @property
    def all_tasks(self):
        """Keep the same api for dummy, return self._all_futures actually"""
        return self._all_futures

    def submit(self, func, *args, **kwargs):
        """Submit a function to the pool, `self.submit(function,arg1,arg2,arg3=3)`"""

        with self._shutdown_lock:
            if self._shutdown:
                raise RuntimeError(
                    "cannot schedule new futures after shutdown")
            callback = kwargs.pop("callback", self.default_callback)
            future = NewFuture(
                self._timeout,
                args,
                kwargs,
                callback=callback,
                catch_exception=self.catch_exception,
            )
            w = _WorkItem(future, func, args, kwargs)
            self._work_queue.put(w)
            self._adjust_thread_count()
            self._all_futures.add(future)
            return future
Esempio n. 57
0
class Agent(AgentT, Service):
    """Agent.

    This is the type of object returned by the ``@app.agent`` decorator.
    """

    supervisor: SupervisorStrategyT = None
    instances: MutableSequence[ActorRefT]

    # channel is loaded lazily on .channel property access
    # to make sure configuration is not accessed when agent created
    # at module-scope.
    _channel: Optional[ChannelT] = None
    _channel_arg: Optional[Union[str, ChannelT]]
    _channel_kwargs: Dict[str, Any]
    _channel_iterator: Optional[AsyncIterator] = None
    _sinks: List[SinkT]

    _actors: MutableSet[ActorRefT]
    _actor_by_partition: MutableMapping[TP, ActorRefT]

    #: This mutable set is used by the first agent we start,
    #: so that we can update its active_partitions later
    #: (in on_partitions_assigned, when we know what partitions we get).
    _pending_active_partitions: Optional[Set[TP]] = None

    _first_assignment_done: bool = False

    def __init__(self,
                 fun: AgentFun,
                 *,
                 app: AppT,
                 name: str = None,
                 channel: Union[str, ChannelT] = None,
                 concurrency: int = 1,
                 sink: Iterable[SinkT] = None,
                 on_error: AgentErrorHandler = None,
                 supervisor_strategy: Type[SupervisorStrategyT] = None,
                 help: str = None,
                 schema: SchemaT = None,
                 key_type: ModelArg = None,
                 value_type: ModelArg = None,
                 isolated_partitions: bool = False,
                 use_reply_headers: bool = None,
                 **kwargs: Any) -> None:
        self.app = app
        self.fun: AgentFun = fun
        self.name = name or canonshortname(self.fun)
        # key-type/value_type arguments only apply when a channel
        # is not set
        if schema is not None:
            assert channel is None or isinstance(channel, str)
        if key_type is not None:
            assert channel is None or isinstance(channel, str)
        self._key_type = key_type
        if value_type is not None:
            assert channel is None or isinstance(channel, str)
        self._schema = schema
        self._value_type = value_type
        self._channel_arg = channel
        self._channel_kwargs = kwargs
        self.concurrency = concurrency or 1
        self.isolated_partitions = isolated_partitions
        self.help = help or ''
        self._sinks = list(sink) if sink is not None else []
        self._on_error: Optional[AgentErrorHandler] = on_error
        self.supervisor_strategy = supervisor_strategy
        self._actors = WeakSet()
        self._actor_by_partition = WeakValueDictionary()
        if self.isolated_partitions and self.concurrency > 1:
            raise ImproperlyConfigured(
                'Agent concurrency must be 1 when using isolated partitions')
        self.use_reply_headers = use_reply_headers
        Service.__init__(self)

    def on_init_dependencies(self) -> Iterable[ServiceT]:
        """Return list of services dependencies required to start agent."""
        # Agent service is now a child of app.
        self.beacon.reattach(self.app.beacon)
        return []

    async def _start_one(self,
                         *,
                         index: Optional[int] = None,
                         active_partitions: Optional[Set[TP]] = None,
                         stream: StreamT = None,
                         channel: ChannelT = None) -> ActorT:
        # an index of None means there's only one instance,
        # and `index is None` is used as a test by functions that
        # disallows concurrency.
        index = index if self.concurrency > 1 else None
        return await self._start_task(
            index=index,
            active_partitions=active_partitions,
            stream=stream,
            channel=channel,
            beacon=self.beacon,
        )

    async def _start_one_supervised(self,
                                    index: Optional[int] = None,
                                    active_partitions: Optional[
                                        Set[TP]] = None,
                                    stream: StreamT = None) -> ActorT:
        aref = await self._start_one(
            index=index,
            active_partitions=active_partitions,
            stream=stream,
        )
        self.supervisor.add(aref)
        await aref.maybe_start()
        return aref

    async def _start_for_partitions(self,
                                    active_partitions: Set[TP]) -> ActorT:
        assert active_partitions
        self.log.info('Starting actor for partitions %s', active_partitions)
        return await self._start_one_supervised(None, active_partitions)

    async def on_start(self) -> None:
        """Call when an agent starts."""
        self.supervisor = self._new_supervisor()
        await self._on_start_supervisor()

    def _new_supervisor(self) -> SupervisorStrategyT:
        return self._get_supervisor_strategy()(
            max_restarts=100.0,
            over=1.0,
            replacement=self._replace_actor,
            loop=self.loop,
            beacon=self.beacon,
        )

    async def _replace_actor(self, service: ServiceT, index: int) -> ServiceT:
        aref = cast(ActorRefT, service)
        return await self._start_one(
            index=index,
            active_partitions=aref.active_partitions,
            stream=aref.stream,
            channel=cast(ChannelT, aref.stream.channel),
        )

    def _get_supervisor_strategy(self) -> Type[SupervisorStrategyT]:
        SupervisorStrategy = self.supervisor_strategy
        if SupervisorStrategy is None:
            SupervisorStrategy = self.app.conf.agent_supervisor
        return SupervisorStrategy

    async def _on_start_supervisor(self) -> None:
        active_partitions = self._get_active_partitions()
        channel: ChannelT = cast(ChannelT, None)
        for i in range(self.concurrency):
            res = await self._start_one(
                index=i,
                active_partitions=active_partitions,
                channel=channel,
            )
            if channel is None:
                # First concurrency actor creates channel,
                # then we reuse it for --concurrency=n.
                # This way they share the same queue.
                channel = res.stream.channel
            self.supervisor.add(res)
        await self.supervisor.start()

    def _get_active_partitions(self) -> Optional[Set[TP]]:
        active_partitions: Optional[Set[TP]] = None
        if self.isolated_partitions:
            # when we start our first agent, we create the set of
            # partitions early, and save it in ._pending_active_partitions.
            # That way we can update the set once partitions are assigned,
            # and the actor we started may be assigned one of the partitions.
            active_partitions = self._pending_active_partitions = set()
        return active_partitions

    async def on_stop(self) -> None:
        """Call when an agent stops."""
        # Agents iterate over infinite streams, so we cannot wait for it
        # to stop.
        # Instead we cancel it and this forces the stream to ack the
        # last message processed (but not the message causing the error
        # to be raised).
        await self._stop_supervisor()
        with suppress(asyncio.CancelledError):
            await asyncio.gather(*[
                aref.actor_task for aref in self._actors
                if aref.actor_task is not None
            ])
        self._actors.clear()

    async def _stop_supervisor(self) -> None:
        if self.supervisor:
            await self.supervisor.stop()
            self.supervisor = None

    def cancel(self) -> None:
        """Cancel agent and its actor instances running in this process."""
        for aref in self._actors:
            aref.cancel()

    async def on_partitions_revoked(self, revoked: Set[TP]) -> None:
        """Call when partitions are revoked."""
        T = traced_from_parent_span()
        if self.isolated_partitions:
            # isolated: start/stop actors for each partition
            await T(self.on_isolated_partitions_revoked)(revoked)
        else:
            await T(self.on_shared_partitions_revoked)(revoked)

    async def on_partitions_assigned(self, assigned: Set[TP]) -> None:
        """Call when partitions are assigned."""
        T = traced_from_parent_span()
        if self.isolated_partitions:
            await T(self.on_isolated_partitions_assigned)(assigned)
        else:
            await T(self.on_shared_partitions_assigned)(assigned)

    async def on_isolated_partitions_revoked(self, revoked: Set[TP]) -> None:
        """Call when isolated partitions are revoked."""
        self.log.dev('Partitions revoked')
        T = traced_from_parent_span()
        for tp in revoked:
            aref: Optional[ActorRefT] = self._actor_by_partition.pop(tp, None)
            if aref is not None:
                await T(aref.on_isolated_partition_revoked)(tp)

    async def on_isolated_partitions_assigned(self, assigned: Set[TP]) -> None:
        """Call when isolated partitions are assigned."""
        T = traced_from_parent_span()
        for tp in sorted(assigned):
            await T(self._assign_isolated_partition)(tp)

    async def _assign_isolated_partition(self, tp: TP) -> None:
        T = traced_from_parent_span()
        if (not self._first_assignment_done and not self._actor_by_partition):
            self._first_assignment_done = True
            # if this is the first time we are assigned
            # we need to reassign the agent we started at boot to
            # one of the partitions.
            T(self._on_first_isolated_partition_assigned)(tp)
        await T(self._maybe_start_isolated)(tp)

    def _on_first_isolated_partition_assigned(self, tp: TP) -> None:
        assert self._actors
        assert len(self._actors) == 1
        self._actor_by_partition[tp] = next(iter(self._actors))
        if self._pending_active_partitions is not None:
            assert not self._pending_active_partitions
            self._pending_active_partitions.add(tp)

    async def _maybe_start_isolated(self, tp: TP) -> None:
        try:
            aref = self._actor_by_partition[tp]
        except KeyError:
            aref = await self._start_isolated(tp)
            self._actor_by_partition[tp] = aref
        await aref.on_isolated_partition_assigned(tp)

    async def _start_isolated(self, tp: TP) -> ActorT:
        return await self._start_for_partitions({tp})

    async def on_shared_partitions_revoked(self, revoked: Set[TP]) -> None:
        """Call when non-isolated partitions are revoked."""
        ...

    async def on_shared_partitions_assigned(self, assigned: Set[TP]) -> None:
        """Call when non-isolated partitions are assigned."""
        ...

    def info(self) -> Mapping:
        """Return agent attributes as a dictionary."""
        return {
            'app': self.app,
            'fun': self.fun,
            'name': self.name,
            'channel': self.channel,
            'concurrency': self.concurrency,
            'help': self.help,
            'sinks': self._sinks,
            'on_error': self._on_error,
            'supervisor_strategy': self.supervisor_strategy,
            'isolated_partitions': self.isolated_partitions,
        }

    def clone(self, *, cls: Type[AgentT] = None, **kwargs: Any) -> AgentT:
        """Create clone of this agent object.

        Keyword arguments can be passed to override any argument
        supported by :class:`Agent.__init__ <Agent>`.
        """
        return (cls or type(self))(**{**self.info(), **kwargs})

    def test_context(self,
                     channel: ChannelT = None,
                     supervisor_strategy: SupervisorStrategyT = None,
                     on_error: AgentErrorHandler = None,
                     **kwargs: Any) -> AgentTestWrapperT:  # pragma: no cover
        """Create new unit-testing wrapper for this agent."""
        # flow control into channel queues are disabled at startup,
        # so need to resume that.
        self.app.flow_control.resume()

        async def on_agent_error(agent: AgentT, exc: BaseException) -> None:
            if on_error is not None:
                await on_error(agent, exc)
            await agent.crash_test_agent(exc)

        return self.clone(
            cls=AgentTestWrapper,
            channel=channel if channel is not None else self.app.channel(),
            supervisor_strategy=supervisor_strategy or CrashingSupervisor,
            original_channel=self.channel,
            on_error=on_agent_error,
            **kwargs)

    def _prepare_channel(self,
                         channel: Union[str, ChannelT] = None,
                         internal: bool = True,
                         schema: SchemaT = None,
                         key_type: ModelArg = None,
                         value_type: ModelArg = None,
                         **kwargs: Any) -> ChannelT:
        app = self.app
        channel = f'{app.conf.id}-{self.name}' if channel is None else channel
        if isinstance(channel, ChannelT):
            return channel
        elif isinstance(channel, str):
            return app.topic(channel,
                             internal=internal,
                             schema=schema,
                             key_type=key_type,
                             value_type=value_type,
                             **kwargs)
        raise TypeError(
            f'Channel must be channel, topic, or str; not {type(channel)}')

    def __call__(self,
                 *,
                 index: int = None,
                 active_partitions: Set[TP] = None,
                 stream: StreamT = None,
                 channel: ChannelT = None) -> ActorRefT:
        """Create new actor instance for this agent."""
        # The agent function can be reused by other agents/tasks.
        # For example:
        #
        #   @app.agent(logs_topic, through='other-topic')
        #   filter_log_errors_(stream):
        #       async for event in stream:
        #           if event.severity == 'error':
        #               yield event
        #
        #   @app.agent(logs_topic)
        #   def alert_on_log_error(stream):
        #       async for event in filter_log_errors(stream):
        #            alert(f'Error occurred: {event!r}')
        #
        # Calling `res = filter_log_errors(it)` will end you up with
        # an AsyncIterable that you can reuse (but only if the agent
        # function is an `async def` function that yields)
        return self.actor_from_stream(stream,
                                      index=index,
                                      active_partitions=active_partitions,
                                      channel=channel)

    def actor_from_stream(self,
                          stream: Optional[StreamT],
                          *,
                          index: int = None,
                          active_partitions: Set[TP] = None,
                          channel: ChannelT = None) -> ActorRefT:
        """Create new actor from stream."""
        we_created_stream = False
        actual_stream: StreamT
        if stream is None:
            actual_stream = self.stream(
                channel=channel,
                concurrency_index=index,
                active_partitions=active_partitions,
            )
            we_created_stream = True
        else:
            # reusing actor stream after agent restart
            assert stream.concurrency_index == index
            assert stream.active_partitions == active_partitions
            actual_stream = stream

        res = self.fun(actual_stream)
        if isinstance(res, AsyncIterable):
            if we_created_stream:
                actual_stream.add_processor(self._maybe_unwrap_reply_request)
            typ = cast(Type[Actor], AsyncIterableActor)
        else:
            typ = cast(Type[Actor], AwaitableActor)
        return typ(
            self,
            actual_stream,
            res,
            index=actual_stream.concurrency_index,
            active_partitions=actual_stream.active_partitions,
            loop=self.loop,
            beacon=self.beacon,
        )

    def add_sink(self, sink: SinkT) -> None:
        """Add new sink to further handle results from this agent."""
        if sink not in self._sinks:
            self._sinks.append(sink)

    def stream(self,
               channel: ChannelT = None,
               active_partitions: Set[TP] = None,
               **kwargs: Any) -> StreamT:
        """Create underlying stream used by this agent."""
        if channel is None:
            channel = cast(TopicT, self.channel_iterator).clone(
                is_iterator=False,
                active_partitions=active_partitions,
            )
        if active_partitions is not None:
            assert channel.active_partitions == active_partitions
        s = self.app.stream(channel,
                            loop=self.loop,
                            active_partitions=active_partitions,
                            prefix=self.name,
                            **kwargs)
        return s

    def _maybe_unwrap_reply_request(self, value: V) -> Any:
        if isinstance(value, ReqRepRequest):
            return value.value
        return value

    async def _start_task(self,
                          *,
                          index: Optional[int],
                          active_partitions: Optional[Set[TP]] = None,
                          stream: StreamT = None,
                          channel: ChannelT = None,
                          beacon: NodeT = None) -> ActorRefT:
        # If the agent is an async function we simply start it,
        # if it returns an AsyncIterable/AsyncGenerator we start a task
        # that will consume it.
        actor = self(
            index=index,
            active_partitions=active_partitions,
            stream=stream,
            channel=channel,
        )
        return await self._prepare_actor(actor, beacon)

    async def _prepare_actor(self, aref: ActorRefT,
                             beacon: NodeT) -> ActorRefT:
        coro: Any
        if isinstance(aref, Awaitable):
            # agent does not yield
            coro = aref
            if self._sinks:
                raise ImproperlyConfigured('Agent must yield to use sinks')
        else:
            # agent yields and is an AsyncIterator so we have to consume it.
            coro = self._slurp(aref, aiter(aref))
        task = asyncio.Task(self._execute_task(coro, aref), loop=self.loop)
        task._beacon = beacon  # type: ignore
        aref.actor_task = task
        self._actors.add(aref)
        return aref

    async def _execute_task(self, coro: Awaitable, aref: ActorRefT) -> None:
        # This executes the agent task itself, and does exception handling.
        _current_agent.set(self)
        try:
            await coro
        except asyncio.CancelledError:
            if self.should_stop:
                raise
        except Exception as exc:
            if self._on_error is not None:
                await self._on_error(self, exc)

            # Mark ActorRef as dead, so that supervisor thread
            # can start a new one.
            await aref.crash(exc)
            self.supervisor.wakeup()

    async def _slurp(self, res: ActorRefT, it: AsyncIterator) -> None:
        # this is used when the agent returns an AsyncIterator,
        # and simply consumes that async iterator.
        stream: Optional[StreamT] = None
        async for value in it:
            self.log.debug('%r yielded: %r', self.fun, value)
            if stream is None:
                stream = res.stream.get_active_stream()
            event = stream.current_event
            if event is not None:
                headers = event.headers
                reply_to: Optional[str] = None
                correlation_id: Optional[str] = None
                if isinstance(event.value, ReqRepRequest):
                    req: ReqRepRequest = event.value
                    reply_to = req.reply_to
                    correlation_id = req.correlation_id
                elif headers:
                    reply_to = want_str(headers.get('Faust-Ag-ReplyTo'))
                    correlation_id = want_str(
                        headers.get('Faust-Ag-CorrelationId'))
                if reply_to is not None:
                    await self._reply(event.key, value, reply_to,
                                      cast(str, correlation_id))
            await self._delegate_to_sinks(value)

    async def _delegate_to_sinks(self, value: Any) -> None:
        for sink in self._sinks:
            if isinstance(sink, AgentT):
                await sink.send(value=value)
            elif isinstance(sink, ChannelT):
                await cast(TopicT, sink).send(value=value)
            else:
                await maybe_async(cast(Callable, sink)(value))

    async def _reply(self, key: Any, value: Any, reply_to: str,
                     correlation_id: str) -> None:
        assert reply_to
        response = self._response_class(value)(
            key=key,
            value=value,
            correlation_id=correlation_id,
        )
        await self.app.send(
            reply_to,
            key=None,
            value=response,
        )

    def _response_class(self, value: Any) -> Type[ReqRepResponse]:
        if isinstance(value, ModelT):
            return ModelReqRepResponse
        return ReqRepResponse

    async def cast(self,
                   value: V = None,
                   *,
                   key: K = None,
                   partition: int = None,
                   timestamp: float = None,
                   headers: HeadersArg = None) -> None:
        """RPC operation: like :meth:`ask` but do not expect reply.

        Cast here is like "casting a spell", and will not expect
        a reply back from the agent.
        """
        await self.send(
            key=key,
            value=value,
            partition=partition,
            timestamp=timestamp,
            headers=headers,
        )

    async def ask(self,
                  value: V = None,
                  *,
                  key: K = None,
                  partition: int = None,
                  timestamp: float = None,
                  headers: HeadersArg = None,
                  reply_to: ReplyToArg = None,
                  correlation_id: str = None) -> Any:
        """RPC operation: ask agent for result of processing value.

        This version will wait until the result is available
        and return the processed value.
        """
        p = await self.ask_nowait(
            value,
            key=key,
            partition=partition,
            timestamp=timestamp,
            headers=headers,
            reply_to=reply_to or self.app.conf.reply_to,
            correlation_id=correlation_id,
            force=True,  # Send immediately, since we are waiting for result.
        )
        app = cast(_App, self.app)
        await app._reply_consumer.add(p.correlation_id, p)
        await app.maybe_start_client()
        return await p

    async def ask_nowait(self,
                         value: V = None,
                         *,
                         key: K = None,
                         partition: int = None,
                         timestamp: float = None,
                         headers: HeadersArg = None,
                         reply_to: ReplyToArg = None,
                         correlation_id: str = None,
                         force: bool = False) -> ReplyPromise:
        """RPC operation: ask agent for result of processing value.

        This version does not wait for the result to arrive,
        but instead returns a promise of future evaluation.
        """
        if reply_to is None:
            raise TypeError('Missing reply_to argument')
        reply_to = self._get_strtopic(reply_to)
        correlation_id = correlation_id or str(uuid4())
        value, headers = self._create_req(key, value, reply_to, correlation_id,
                                          headers)
        await self.channel.send(
            key=key,
            value=value,
            partition=partition,
            timestamp=timestamp,
            headers=headers,
            force=force,
        )
        return ReplyPromise(reply_to, correlation_id)

    def _create_req(
            self,
            key: K = None,
            value: V = None,
            reply_to: ReplyToArg = None,
            correlation_id: str = None,
            headers: HeadersArg = None) -> Tuple[V, Optional[HeadersArg]]:
        if reply_to is None:
            raise TypeError('Missing reply_to argument')
        topic_name = self._get_strtopic(reply_to)
        correlation_id = correlation_id or str(uuid4())
        open_headers = prepare_headers(headers or {})
        if self.use_reply_headers:
            merge_headers(
                open_headers, {
                    'Faust-Ag-ReplyTo': want_bytes(topic_name),
                    'Faust-Ag-CorrelationId': want_bytes(correlation_id),
                })
            return value, open_headers
        else:
            # wrap value in envelope
            req = self._request_class(value)(
                value=value,
                reply_to=topic_name,
                correlation_id=correlation_id,
            )
            return req, open_headers

    def _request_class(self, value: V) -> Type[ReqRepRequest]:
        if isinstance(value, ModelT):
            return ModelReqRepRequest
        return ReqRepRequest

    async def send(self,
                   *,
                   key: K = None,
                   value: V = None,
                   partition: int = None,
                   timestamp: float = None,
                   headers: HeadersArg = None,
                   key_serializer: CodecArg = None,
                   value_serializer: CodecArg = None,
                   callback: MessageSentCallback = None,
                   reply_to: ReplyToArg = None,
                   correlation_id: str = None,
                   force: bool = False) -> Awaitable[RecordMetadata]:
        """Send message to topic used by agent."""
        if reply_to:
            value, headers = self._create_req(key, value, reply_to,
                                              correlation_id, headers)
        return await self.channel.send(
            key=key,
            value=value,
            partition=partition,
            timestamp=timestamp,
            headers=headers,
            key_serializer=key_serializer,
            value_serializer=value_serializer,
            force=force,
        )

    def _get_strtopic(self, topic: Union[str, ChannelT, TopicT,
                                         AgentT]) -> str:
        if isinstance(topic, AgentT):
            return self._get_strtopic(topic.channel)
        if isinstance(topic, TopicT):
            return topic.get_topic_name()
        if isinstance(topic, ChannelT):
            raise ValueError('Channels are unnamed topics')
        return topic

    async def map(
        self,
        values: Union[AsyncIterable, Iterable],
        key: K = None,
        reply_to: ReplyToArg = None,
    ) -> AsyncIterator:  # pragma: no cover
        """RPC map operation on a list of values.

        A map operation iterates over results as they arrive.
        See :meth:`join` and :meth:`kvjoin` if you want them in order.
        """
        # Map takes only values, but can provide one key that is used for all.
        async for value in self.kvmap(((key, v) async for v in aiter(values)),
                                      reply_to):
            yield value

    async def kvmap(
        self,
        items: Union[AsyncIterable[Tuple[K, V]], Iterable[Tuple[K, V]]],
        reply_to: ReplyToArg = None,
    ) -> AsyncIterator[str]:  # pragma: no cover
        """RPC map operation on a list of ``(key, value)`` pairs.

        A map operation iterates over results as they arrive.
        See :meth:`join` and :meth:`kvjoin` if you want them in order.
        """
        # kvmap takes (key, value) pairs.
        reply_to = self._get_strtopic(reply_to or self.app.conf.reply_to)

        # BarrierState is the promise that keeps track of pending results.
        # It contains a list of individual ReplyPromises.
        barrier = BarrierState(reply_to)

        async for _ in self._barrier_send(barrier, items, reply_to):
            # Now that we've sent a message, try to see if we have any
            # replies.
            try:
                _, val = barrier.get_nowait()
            except asyncio.QueueEmpty:
                pass
            else:
                yield val
        # All the messages have been sent so finalize the barrier.
        barrier.finalize()

        # Then iterate over the results in the group.
        async for _, value in barrier.iterate():
            yield value

    async def join(
        self,
        values: Union[AsyncIterable[V], Iterable[V]],
        key: K = None,
        reply_to: ReplyToArg = None,
    ) -> List[Any]:  # pragma: no cover
        """RPC map operation on a list of values.

        A join returns the results in order, and only returns once
        all values have been processed.
        """
        return await self.kvjoin(
            ((key, value) async for value in aiter(values)),
            reply_to=reply_to,
        )

    async def kvjoin(
            self,
            items: Union[AsyncIterable[Tuple[K, V]], Iterable[Tuple[K, V]]],
            reply_to: ReplyToArg = None) -> List[Any]:  # pragma: no cover
        """RPC map operation on list of ``(key, value)`` pairs.

        A join returns the results in order, and only returns once
        all values have been processed.
        """
        reply_to = self._get_strtopic(reply_to or self.app.conf.reply_to)
        barrier = BarrierState(reply_to)

        # Map correlation_id -> index
        posindex: MutableMapping[str, int] = {
            cid: i
            async for i, cid in aenumerate(
                self._barrier_send(barrier, items, reply_to))
        }

        # All the messages have been sent so finalize the barrier.
        barrier.finalize()

        # wait until all replies received
        await barrier
        # then construct a list in the correct order.
        values: List = [None] * barrier.total
        async for correlation_id, value in barrier.iterate():
            values[posindex[correlation_id]] = value
        return values

    async def _barrier_send(
            self, barrier: BarrierState, items: Union[AsyncIterable[Tuple[K,
                                                                          V]],
                                                      Iterable[Tuple[K, V]]],
            reply_to: ReplyToArg) -> AsyncIterator[str]:  # pragma: no cover
        # map: send many tasks to agents
        # while trying to pop incoming results off.
        async for key, value in aiter(items):
            correlation_id = str(uuid4())
            p = await self.ask_nowait(key=key,
                                      value=value,
                                      reply_to=reply_to,
                                      correlation_id=correlation_id)
            # add reply promise to the barrier
            barrier.add(p)

            # the ReplyConsumer will call the barrier whenever a new
            # result comes in.
            app = cast(_App, self.app)
            await app.maybe_start_client()
            await app._reply_consumer.add(p.correlation_id, barrier)

            yield correlation_id

    def _repr_info(self) -> str:
        return shorten_fqdn(self.name)

    def get_topic_names(self) -> Iterable[str]:
        """Return list of topic names this agent subscribes to."""
        channel = self.channel
        if isinstance(channel, TopicT):
            return channel.topics
        return []

    @property
    def channel(self) -> ChannelT:
        """Return channel used by agent."""
        if self._channel is None:
            self._channel = self._prepare_channel(
                self._channel_arg,
                schema=self._schema,
                key_type=self._key_type,
                value_type=self._value_type,
                **self._channel_kwargs,
            )
        return self._channel

    @channel.setter
    def channel(self, channel: ChannelT) -> None:
        self._channel = channel

    @property
    def channel_iterator(self) -> AsyncIterator:
        """Return channel agent iterates over."""
        # The channel is "memoized" here, so subsequent access to
        # instance.channel_iterator will return the same value.
        if self._channel_iterator is None:
            # we do not use aiter(channel) here, because
            # that will also add it to the topic conductor too early.
            self._channel_iterator = self.channel.clone(is_iterator=False)
        return self._channel_iterator

    @channel_iterator.setter
    def channel_iterator(self, it: AsyncIterator) -> None:
        self._channel_iterator = it

    @property
    def label(self) -> str:
        """Return human-readable description of agent."""
        return self._agent_label()

    def _agent_label(self, name_suffix: str = '') -> str:
        s = f'{type(self).__name__}{name_suffix}: '
        s += f'{shorten_fqdn(qualname(self.fun))}'
        return s

    @property
    def shortlabel(self) -> str:
        """Return short description of agent."""
        return self._agent_label()
Esempio n. 58
0
from _entities._entity import BaseEntity

# =============================================================================
# >> GLOBAL VARIABLES
# =============================================================================
# Get a list of projectiles for the game
_projectile_weapons = [weapon.name for weapon in WeaponClassIter('grenade')]

# Get a dictionary to store the delays
_entity_delays = defaultdict(set)

# Get a dictionary to store the repeats
_entity_repeats = defaultdict(set)

# Get a set to store the registered entity classes
_entity_classes = WeakSet()


# =============================================================================
# >> CLASSES
# =============================================================================
class _EntityCaching(BoostPythonClass):
    """Metaclass used to cache entity instances."""
    def __init__(cls, classname, bases, attributes):
        """Initializes the class."""
        # New instances of this class will be cached in that dictionary
        cls._cache = {}

        # Set whether or not this class is caching its instances by default
        try:
            cls._caching = bool(
Esempio n. 59
0
 def test_constructor_identity(self):
     s = WeakSet(self.items)
     t = WeakSet(s)
     self.assertNotEqual(id(s), id(t))
Esempio n. 60
0
from django.contrib.admin import ModelAdmin, actions
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.core.exceptions import ImproperlyConfigured
from django.db.models.base import ModelBase
from django.http import Http404, HttpResponseRedirect
from django.template.response import TemplateResponse
from django.urls import NoReverseMatch, reverse
from django.utils.functional import LazyObject
from django.utils.module_loading import import_string
from django.utils.text import capfirst
from django.utils.translation import gettext as _, gettext_lazy
from django.views.decorators.cache import never_cache
from django.views.decorators.csrf import csrf_protect
from django.views.i18n import JavaScriptCatalog

all_sites = WeakSet()


class AlreadyRegistered(Exception):
    pass


class NotRegistered(Exception):
    pass


class AdminSite:
    """
    An AdminSite object encapsulates an instance of the Django admin application, ready
    to be hooked in to your URLconf. Models are registered with the AdminSite using the
    register() method, and the get_urls() method can then be used to access Django view