Example #1
0
class ConnectionFactory(ReconnectingClientFactory):
    """Creates `.Connection` instances."""

    protocol = Connection
    log = Logger()

    def __init__(self):
        #: The `ConnectionSettings` object associated with this factory.
        self.settings = ConnectionSettings()
        #: A `WeakSet` containing associated `Connection` objects.
        self.protocols = WeakSet()

    def startedConnecting(self, connector):
        self.log.info("Attempting to connect to server")

    def buildProtocol(self, addr):
        protocol = ReconnectingClientFactory.buildProtocol(self, addr)
        protocol.settings = self.settings
        # Set various properties defined by Twisted's IRCClient.
        protocol.nickname = self.settings.nickname or protocol.nickname
        protocol.password = self.settings.password or protocol.password
        protocol.realname = self.settings.realname or protocol.realname
        protocol.username = self.settings.username or protocol.username
        protocol.userinfo = self.settings.userinfo or protocol.userinfo
        self.protocols.add(protocol)
        return protocol

    def reload_settings(self, dct):
        """Update this connection's settings using *dct*, then call
        `after_reload` on each of this factory's active connections."""
        self.log.info("Reloading settings")
        self.settings.replace(dct)
        for protocol in self.protocols:
            protocol.after_reload()
Example #2
0
    def test_weak_destroy_and_mutate_while_iterating(self):
        # Issue #7105: iterators shouldn't crash when a key is implicitly removed
        items = [SomeClass(c) for c in string.ascii_letters]
        s = WeakSet(items)
        @contextlib.contextmanager
        def testcontext():
            try:
                it = iter(s)
                next(it)
                # Schedule an item for removal and recreate it
                u = SomeClass(str(items.pop()))
                test_support.gc_collect()      # just in case
                yield u
            finally:
                it = None           # should commit all removals

        test_support.gc_collect()

        with testcontext() as u:
            self.assertNotIn(u, s)
        with testcontext() as u:
            self.assertRaises(KeyError, s.remove, u)
        self.assertNotIn(u, s)
        with testcontext() as u:
            s.add(u)
        self.assertIn(u, s)
        t = s.copy()
        with testcontext() as u:
            s.update(t)
        self.assertEqual(len(s), len(t))
        with testcontext() as u:
            s.clear()
        self.assertEqual(len(s), 0)
Example #3
0
class BaseSubject(object):
    """ Object holding all the observers, aliased to dirigent.subject
    """
    def __init__(self, init=None):
        if init:
            self.observers = WeakSet(init)
        else:
            self.observers = WeakSet()

    def register(self, func):
        """ Registers a callable.
            Can be used as a decorator.
        """
        self.observers.add(func)
        return func

    def unregister(self, func):
        """ Unregisters a callable.
        """
        if func in self.observers:
            self.observers.remove(func)
            return func
        return False

    def notify(self, *args, **kwargs):
        """ Notifies all registered observers of an event.
        """
        return [observer(*args, **kwargs) for observer in self.observers]

    def __iter__(self):
        return (observer for observer in self.observers)

    __call__ = notify
    on = bind = register
    off = unbind = unregister
Example #4
0
class Signal(object):
    def __init__(self):
        self._functions = WeakSet()
        self._methods = WeakKeyDictionary()

    def __call__(self, *args, **kargs):
        for f in self._functions:
            f(*args, **kargs)

        for obj, functions in self._methods.items():
            for f in functions:
                f(obj, *args, **kargs)

    def connect(self, slot):
        if inspect.ismethod(slot):
            if not slot.__self__ in self._methods:
                self._methods[slot.__self__] = set()
            self._methods[slot.__self__].add(slot.__func__)
        else:
            self._functions.add(slot)

    def disconnect(self, slot):
        if inspect.ismethod(slot):
            if slot.__self__ in self._methods:
                self._methods[slot.__self__].remove(slot.__func__)
        else:
            if slot in self._functions:
                self._functions.remove(slot)
Example #5
0
class Signal(object):
    """
    Simple class to emit signals to connected callable receivers.
    """

    def __init__(self):
        """
        Instantiate a new object
        """
        self.funcs = WeakSet()
        self.meths = WeakKeyDictionary()

    def connect(self, c):
        """
        Connect a callable as receiver for the signal
        @param c: signal receiver
        @type c: Callable
        """
        if inspect.ismethod(c):
            if c.__self__ not in self.meths:
                self.meths[c.__self__] = set()

            self.meths[c.__self__].add(c.__func__)
        else:
            if c not in self.funcs:
                self.funcs.add(c)

    def disconnect(self, c):
        """
        Disconnect the callable from receiving the signal
        @param c: signal receiver
        @type c: Callable
        """
        if inspect.ismethod(c):
            if c.__self__ in self.meths:
                self.meths[c.__self__].remove(c.__func__)
        else:
            if c in self.funcs:
                self.funcs.remove(c)

    def disconnectAll(self):
        """
        Disconnects all signal receivers
        """
        self.funcs.clear()
        self.meths.clear()

    def emit(self, *args, **kwargs):
        """
        Fires the signal to all connected receivers
        """
        for c in self.funcs:
            c(*args, **kwargs)

        for obj, funcs in self.meths.items():
            for func in funcs:
                func(obj, *args, **kwargs)
Example #6
0
 def subscribe(self, event, listener_object):
     event_handler_name = "on_%s_handler" % event
     if not hasattr(listener_object, event_handler_name):
         raise AttributeError("Listener object has no '%s' event handler." % event)
     try:
         event_listeners = self._listeners[event]
     except KeyError:
         event_listeners = WeakSet()
         self._listeners[event] = event_listeners
     event_listeners.add(listener_object)
Example #7
0
class Signal(object):
    def __init__(self):
        self._functions = WeakSet()
        self._methods = WeakKeyDictionary()

    def __call__(self, *args, **kargs):
        # Call handler functions
        to_be_removed = []
        for func in self._functions.copy():
            try:
                func(*args, **kargs)
            except RuntimeError:
                Warning.warn('Signals func->RuntimeError: func "{}" will be removed.'.format(func))
                to_be_removed.append(func)

        for remove in to_be_removed:
            self._functions.discard(remove)

        # Call handler methods
        to_be_removed = []
        emitters = self._methods.copy()
        for obj, funcs in emitters.items():
            msg_debug('obj is type "{}"'.format(type(obj)))
            for func in funcs.copy():
                try:
                    func(obj, *args, **kargs)
                except RuntimeError:
                    warnings.warn('Signals methods->RuntimeError, obj.func "{}.{}" will be removed'.format(obj, func))
                    to_be_removed.append((obj, func))

        for obj, func in to_be_removed:
            self._methods[obj].discard(func)

    def connect(self, slot):
        if inspect.ismethod(slot):
            if slot.__self__ not in self._methods:
                self._methods[slot.__self__] = set()

            self._methods[slot.__self__].add(slot.__func__)

        else:
            self._functions.add(slot)

    def disconnect(self, slot):
        if inspect.ismethod(slot):
            if slot.__self__ in self._methods:
                self._methods[slot.__self__].remove(slot.__func__)
        else:
            if slot in self._functions:
                self._functions.remove(slot)

    def clear(self):
        self._functions.clear()
        self._methods.clear()
Example #8
0
class Signal(object):
    def __init__(self):
        self._functions = WeakSet()
        self._methods = WeakKeyDictionary()
        self._activated = True

    def __call__(self, *args, **kargs):

        # call connected functions only if activated
        if self._activated:
            # Call handler functions
            for func in self._functions:
                func(*args, **kargs)

            # Call handler methods
            for obj, funcs in self._methods.items():
                for func in funcs:
                    func(obj, *args, **kargs)

    def connect(self, slot):
        if inspect.ismethod(slot):
            if slot.__self__ not in self._methods:
                self._methods[slot.__self__] = set()

            self._methods[slot.__self__].add(slot.__func__)

        else:
            self._functions.add(slot)

    def disconnect(self, slot):
        if inspect.ismethod(slot):
            if slot.__self__ in self._methods:
                self._methods[slot.__self__].remove(slot.__func__)
        else:
            if slot in self._functions:
                self._functions.remove(slot)

    def clear(self):
        self._functions.clear()
        self._methods.clear()

    def activate(self):
        """
        Activate the signal to emit.
        """

        self._activated = True

    def deactivate(self):
        """
        Deactivate the signal to emit.
        """

        self._activated = False
Example #9
0
 def test_len(self):
     obj = Object()
     obj2 = Object()
     ws = WeakSet([obj])
     self.assertIn(obj, ws)
     self.assertEqual(len(ws), 1)
     ws.add(obj2)
     self.assertEqual(len(ws), 2)
     self.assertIn(obj2, ws)
     del obj
     self.assertEqual(len(ws), 1)
     self.assertIn(obj2, ws)
Example #10
0
class Channel:

    """A communication channel."""

    def __init__(self, template="{msg}", members=None, logged=False):
        """Create a new channel.

        :param str template: A formatting string to use as a message template
        :param members: Optional, a list of sessions to fill members with;
                        if callable, it should return a list of sessions on-
                        demand in place of member tracking
        :param bool logged: Whether to log messages to the console or not
        :returns None:

        """
        self.template = template
        self.logged = logged
        if callable(members):
            self.members = members
        else:
            self.members = WeakSet()
            if members:
                for session in members:
                    self.members.add(session)

    def send(self, data, *more, sep=" ", context=None, members=None):
        """Send a message to a channel.

        `data` and all members of `more` will be converted to strings
        and joined together by `sep` via the joins function.

        :param any data: An initial chunk of data
        :param any more: Optional, any additional data to send
        :param str sep: Optional, a separator to join the resulting output by
        :param dict context: Optional, additional context to be passed to
                             the template formatter
        :param members: Optional, a list of sessions to use in place of the
                        channels own list; if callable, it should return a list
                        of sessions to use
        :returns None:

        """
        if not members:
            members = self.members
        if callable(members):
            members = members()
        message = joins(data, *more, sep=sep)
        context = context or {}
        message = self.template.format(msg=message, **context)
        if self.logged:
            log.info(strip_caret_codes(message))
        for session in members:
            session.send(message)
class Subject(object):
    def __init__(self, name):
        self.name = name
        self._observers = WeakSet()

    def register_observer(self, observer):
        self._observers.add(observer)
        print('observer {0} now listening on {1}'.format(observer.name, self.name))

    def notify_observers(self, msg):
        print('{0} notifying observers about {1}'.format(self.__class__.__name__, msg))
        for observer in self._observers:
            observer.notify(self, msg)
Example #12
0
class LazyResult(object):
  inited = False
  deps = None  # Stores hard references to upstream dependencies for invalidation purposes

  def __init__(self, watcher = None):
    self.watcher = watcher

  def invalidate(self):
    if not hasattr(self, '_value'):
      return
    if threading.current_thread() != MAIN_THREAD or evaluation_stack:
      invalidation_queue.append(self)
      invalidation_event.set()
      return
    del self._value
    self.deps = None
    try:
      refs = tuple(self._refs)
    except AttributeError:
      return
    self._refs.clear()
    for ref in refs:
      ref.invalidate()

  def set(self, value):
    assert not hasattr(self, '_value')
    self._value = (value, None)

  def get(self, f, *args):
    assert threading.current_thread() == MAIN_THREAD
    if not self.inited:
      invalidation_strategy._watch_object(self)
      self.inited = True
    if evaluation_stack:
      if not hasattr(self, '_refs'):
        self._refs = WeakSet()
      self._refs.add(evaluation_stack[-1])
    try:
      value, e = self._value
    except AttributeError:
      with LazyEvaluationContext(self):
        try:
          value = f(*args)
          self._value = (value, None)
          return value
        except Exception, e:
          self._value = (None, e)
          raise
    if e:
      raise e
    return value
Example #13
0
class Environments(object):
    """ A common object for all environments in a request. """
    def __init__(self):
        self.envs = WeakSet()           # weak set of environments
        self.todo = {}                  # recomputations {field: [records]}
        self.mode = False               # flag for draft/onchange

    def add(self, env):
        """ Add the environment `env`. """
        self.envs.add(env)

    def __iter__(self):
        """ Iterate over environments. """
        return iter(self.envs)
Example #14
0
class ConnectionPool:

    def __init__(self):
        self._config_dict = None
        self._queue = Queue()
        self._outstanding_connections = WeakSet()

    async def get_conn(self):
        self._check_config()
        try:
            while True:
                conn = self._queue.get_nowait()
                if conn.is_open():
                    break
                try:
                    await conn.close()
                except Exception:
                    l.debug('Exception in close rethink connection', exc_info=True)
        except QueueEmpty:
            conn = await r.connect(**self._config_dict)
        self._outstanding_connections.add(conn)
        return conn

    async def put_conn(self, conn):
        self._queue.put_nowait(conn)
        self._outstanding_connections.remove(conn)

    def set_config(self, config):
        self._config_dict = config

    def get_config(self):
        self._check_config()
        return self._config_dict

    async def teardown(self):
        while True:
            try:
                conn = self._queue.get_nowait()
            except QueueEmpty:
                break
            self._outstanding_connections.add(conn)
        for conn in self._outstanding_connections:
            try:
                await conn.close()
            except Exception:
                l.debug('Exception in close rethink connection', exc_info=True)

    def _check_config(self):
        assert self._config_dict is not None, "Did you remember to run resync.setup()?"
Example #15
0
class ObservableEvent(object):
    def __init__(self, weakref=False):
        self.listeners = WeakSet() if weakref else set()

    def __iadd__(self, ob):
        self.listeners.add(ob)
        return self

    def __isub__(self, ob):
        self.listeners.discard(ob)
        return self

    def notify(self, *a, **k):
        for ob in list(self.listeners):
            ob(*a, **k)
Example #16
0
class Channel:

    """A communication channel."""

    def __init__(self, header="", msg_color="^~", members=None):
        """Create a new channel.

        :param str header: A block of text to prepend to all messages
        :param str msg_color: A color code to use for messages
        :param members: Optional, a list of sessions to prefill members with;
                        if callable, it should return a list of sessions on-
                        demand in place of member tracking
        :returns None:

        """
        self.header = header
        self.msg_color = msg_color
        if callable(members):
            self.members = members
        else:
            self.members = WeakSet()
            if members:
                for session in members:
                    self.members.add(session)

    def send(self, data, *more, sep=" ", members=None):
        """Send a message to a channel.

        `data` and all members of `more` will be converted to strings
        and joined together by `sep` via the joins function.

        :param any data: An initial chunk of data
        :param any more: Optional, any additional data to send
        :param str sep: Optional, a separator to join the resulting output by

        :param members: Optional, a list of sessions to use in place of the
                        channels own list; if callable, it should return a list
                        of sessions to use
        :returns None:

        """
        if not members:
            members = self.members
        if callable(members):
            members = members()
        msg = joins(data, *more, sep=sep)
        for session in members:
            session.send(self.header, " ", self.msg_color, msg, "^~", sep="")
Example #17
0
File: api.py Project: Vauxoo/odoo
class Environments(object):
    """ A common object for all environments in a request. """
    def __init__(self):
        self.envs = WeakSet()           # weak set of environments
        self.cache = Cache()            # cache for all records
        self.todo = {}                  # recomputations {field: [records]}
        self.in_draft = False           # flag for draft
        self.recompute = True

    def add(self, env):
        """ Add the environment ``env``. """
        self.envs.add(env)

    def __iter__(self):
        """ Iterate over environments. """
        return iter(self.envs)
Example #18
0
class Account(Entity):

    """A user account."""

    _uid_code = "A"

    type = "account"

    def __init__(self, data=None, active=False, savable=True):
        super().__init__(data, active, savable)
        self._sessions = WeakSet()

    def __repr__(self):
        if hasattr(self, "name") and self.name:
            return joins("Account<", self.name, ">", sep="")
        else:
            return "Account<(unnamed)>"

    def login(self, session):
        """Process an account login for a session.

        :param sessions.Session session: The session logging in
        :returns None:

        """
        with EVENTS.fire("account_login", session, self):
            log.info("%s has logged in to %s.", session, self)
            session.send("\nMOTD will go here!")
            if session not in self._sessions:
                self._sessions.add(session)
            if not self.active:
                self.active = True

    def logout(self, session):
        """Process an account logout for a session.

        :param sessions.Session session: The session logging out
        :returns None:

        """
        with EVENTS.fire("account_logout", session, self):
            log.info("%s has logged out of %s.", session, self)
            if session in self._sessions:
                self._sessions.remove(session)
            if not self._sessions:
                self.active = False
Example #19
0
class LazyConstants(object):
  def __init__(self):
    self._watchable_objects = WeakSet()

  def _watch_object(self, object):
    if object.watcher is not None:
      self._watchable_objects.add(object)

  def _add_dependency(self, object):
    pass

  def _unwatch_object(self, object):
    pass

  def _invalidate_all(self):
    for watchable_object in self._watchable_objects:
      watchable_object.invalidate()
      watchable_object.inited = False
    self._watchable_objects.clear()
Example #20
0
class DependencyFactory(object):

    sharing_key = None

    def __init__(self, dep_cls, *init_args, **init_kwargs):
        self.dep_cls = dep_cls
        self.args = init_args
        self.kwargs = init_kwargs

        # keep a reference to every created instance
        self.instances = WeakSet()

    @property
    def key(self):
        return (self.dep_cls, str(self.args), str(self.kwargs))

    def create_and_bind_instance(self, name, container):
        """ Instantiate ``dep_cls`` and bind it to ``container``.

        See `:meth:~DependencyProvider.bind`.
        """
        sharing_key = self.sharing_key
        if sharing_key is not None:
            if sharing_key is CONTAINER_SHARED:
                sharing_key = container

            shared_dependencies.setdefault(sharing_key, {})
            instance = shared_dependencies[sharing_key].get(self.key)
            if instance is None:
                instance = self.dep_cls(*self.args, **self.kwargs)
                shared_dependencies[sharing_key][self.key] = instance
        else:
            instance = self.dep_cls(*self.args, **self.kwargs)
        instance.bind(name, container)

        for name, attr in inspect.getmembers(instance):
            if isinstance(attr, DependencyFactory):
                prov = attr.create_and_bind_instance(name, container)
                setattr(instance, name, prov)

        self.instances.add(instance)
        return instance
Example #21
0
class EventsDispatcher(object):
    """Abstract object from which all the objects generating events inherit"""

    def __init__(self):
        self._events_handlers = WeakSet()

    ### Callbacks
    def register_events_handler(self, events_handler):
        """Registers an event handler with this dispatcher
            @param events_handler: an instance with methods as code of callbacks
            @type events_handler: L{papyon.event.BaseEventInterface}
        """
        self._events_handlers.add(events_handler)

    def _dispatch(self, name, *args):
        count = 0
        for event_handler in list(self._events_handlers):
            if event_handler._dispatch_event(name, *args):
                count += 1
        return count
Example #22
0
class Scheduler(object):
    def __init__(self, protocol):
        self.protocol = protocol
        self.calls = WeakSet()
        self.loops = WeakSet()
    
    def call_later(self, *arg, **kw):
        call = reactor.callLater(*arg, **kw)
        self.calls.add(call)
        return call
    
    def call_end(self, *arg, **kw):
        call = self.protocol.call_end(*arg, **kw)
        self.calls.add(call)
        return call
    
    def loop_call(self, delay, func, *arg, **kw):
        loop = LoopingCall(func, *arg, **kw)
        loop.start(delay, False)
        self.loops.add(loop)
        return loop
    
    def reset(self):
        for call in self.calls:
            if call.active():
                call.cancel()
        for loop in self.loops:
            if loop.running:
                loop.stop()
        self.calls = WeakSet()
        self.loops = WeakSet()
Example #23
0
class Signal(object):
    def __init__(self):
        self._functions = WeakSet()
        self._methods = WeakKeyDictionary()

    def __call__(self, *args, **kargs):
        res = []
        # Call handler functions
        for func in self._functions:
            res.append(func(*args, **kargs))

        # Call handler methods
        for obj, funcs in self._methods.items():
            for func in funcs:
                res.append(func(obj, *args, **kargs))
        return res

    def connect(self, slot):
        if inspect.ismethod(slot):
            if slot.__self__ not in self._methods:
                self._methods[slot.__self__] = set()

            self._methods[slot.__self__].add(slot.__func__)

        else:
            self._functions.add(slot)

    def disconnect(self, slot):
        if inspect.ismethod(slot):
            if slot.__self__ in self._methods:
                self._methods[slot.__self__].remove(slot.__func__)
        else:
            if slot in self._functions:
                self._functions.remove(slot)

    def clear(self):
        self._functions.clear()
        self._methods.clear()
class SIGNAL(object):
    
    def __init__( self, name = None ):
        self._functions = WeakSet()
        self._methods = WeakKeyDictionary()
        self._name = name

    def __call__(self, *args, **kargs):
        # Call handler functions
        for func in self._functions:
            func(*args, **kargs)

        # Call handler methods
        for obj, funcs in self._methods.items():
            for func in funcs:
                func(obj, *args, **kargs)

    def connect(self, slot):
        if inspect.ismethod(slot):
            if slot.__self__ not in self._methods:
                self._methods[slot.__self__] = set()

            self._methods[slot.__self__].add(slot.__func__)

        else:
            self._functions.add(slot)

    def disconnect(self, slot):
        if inspect.ismethod(slot):
            if slot.__self__ in self._methods:
                self._methods[slot.__self__].remove(slot.__func__)
        else:
            if slot in self._functions:
                self._functions.remove(slot)

    def clear(self):
        self._functions.clear()
        self._methods.clear()
Example #25
0
    def test_weak_destroy_and_mutate_while_iterating(self):
        # Issue #7105: iterators shouldn't crash when a key is implicitly removed
        items = [ustr(c) for c in string.ascii_letters]
        s = WeakSet(items)
        @contextlib.contextmanager
        def testcontext():
            try:
                it = iter(s)
                # Start iterator
                yielded = ustr(str(next(it)))
                # Schedule an item for removal and recreate it
                u = ustr(str(items.pop()))
                if yielded == u:
                    # The iterator still has a reference to the removed item,
                    # advance it (issue #20006).
                    next(it)
                gc.collect()      # just in case
                yield u
            finally:
                it = None           # should commit all removals

        with testcontext() as u:
            self.assertNotIn(u, s)
        with testcontext() as u:
            self.assertRaises(KeyError, s.remove, u)
        self.assertNotIn(u, s)
        with testcontext() as u:
            s.add(u)
        self.assertIn(u, s)
        t = s.copy()
        with testcontext() as u:
            s.update(t)
        self.assertEqual(len(s), len(t))
        with testcontext() as u:
            s.clear()
        self.assertEqual(len(s), 0)
Example #26
0
 def __contains__(self, attribute):
     '''
     Assigned to the context as the __contains__ method.
     
     @param attribute: IAttribute or descriptor with '__name__' and '__objclass__'
         The attribute to check if contained.
     '''
     attribute = attributeOf(attribute)
     if attribute is None: return False
     contained, uncontained = self.__class__.__dict__.get('_contained'), self.__class__.__dict__.get('_uncontained')
     if contained and attribute in contained: return True
     if uncontained and attribute in uncontained: return False
     assert isinstance(attribute, IAttribute)
     if attribute.isIn(self.__class__):
         if contained is None:
             contained = WeakSet()
             setattr(self.__class__, '_contained', contained)
         contained.add(attribute)
         return True
     if uncontained is None:
         uncontained = WeakSet()
         setattr(self.__class__, '_uncontained', uncontained)
     uncontained.add(attribute)
     return False
Example #27
0
class World:
    def __init__(self, evManager):
        from weakref import WeakSet

        self.evManager = evManager
        self.evManager.RegisterListener(self)

        self.physics = Physics(evManager)

        self.entities = WeakSet()

    def AddEntity(self, entity):
        self.entities.add(entity)

    def ApplyPhysics(self, delta_time):
        for entity in self.entities:
            self.physics.Apply(entity, delta_time)

    def Notify(self, event):
        if isinstance(event, EntityCreatedEvent):
            self.AddEntity(event.entity)

        if isinstance(event, TickEvent):
            self.ApplyPhysics(event.delta_time)
Example #28
0
class Agent(AgentT, Service):
    """Agent.

    This is the type of object returned by the ``@app.agent`` decorator.
    """
    supervisor: SupervisorStrategyT = None
    instances: MutableSequence[ActorRefT]

    # channel is loaded lazily on .channel property access
    # to make sure configuration is not accessed when agent created
    # at module-scope.
    _channel: Optional[ChannelT] = None
    _channel_arg: Optional[Union[str, ChannelT]]
    _channel_kwargs: Dict[str, Any]
    _channel_iterator: Optional[AsyncIterator] = None
    _sinks: List[SinkT]

    _actors: MutableSet[ActorRefT]
    _actor_by_partition: MutableMapping[TP, ActorRefT]

    #: This mutable set is used by the first agent we start,
    #: so that we can update its active_partitions later
    #: (in on_partitions_assigned, when we know what partitions we get).
    _pending_active_partitions: Optional[Set[TP]] = None

    _first_assignment_done: bool = False

    def __init__(self,
                 fun: AgentFun,
                 *,
                 app: AppT,
                 name: str = None,
                 channel: Union[str, ChannelT] = None,
                 concurrency: int = 1,
                 sink: Iterable[SinkT] = None,
                 on_error: AgentErrorHandler = None,
                 supervisor_strategy: Type[SupervisorStrategyT] = None,
                 help: str = None,
                 key_type: ModelArg = None,
                 value_type: ModelArg = None,
                 isolated_partitions: bool = False,
                 **kwargs: Any) -> None:
        self.app = app
        self.fun: AgentFun = fun
        self.name = name or canonshortname(self.fun)
        # key-type/value_type arguments only apply when a channel
        # is not set
        if key_type is not None:
            assert channel is None or isinstance(channel, str)
        self._key_type = key_type
        if value_type is not None:
            assert channel is None or isinstance(channel, str)
        self._value_type = value_type
        self._channel_arg = channel
        self._channel_kwargs = kwargs
        self.concurrency = concurrency or 1
        self.isolated_partitions = isolated_partitions
        self.help = help or ''
        self._sinks = list(sink) if sink is not None else []
        self._on_error: Optional[AgentErrorHandler] = on_error
        self.supervisor_strategy = supervisor_strategy
        self._actors = WeakSet()
        self._actor_by_partition = WeakValueDictionary()
        if self.isolated_partitions and self.concurrency > 1:
            raise ImproperlyConfigured(
                'Agent concurrency must be 1 when using isolated partitions')
        Service.__init__(self)

    async def _start_one(self,
                         *,
                         index: Optional[int] = None,
                         active_partitions: Optional[Set[TP]] = None,
                         stream: StreamT = None,
                         channel: ChannelT = None) -> ActorT:
        # an index of None means there's only one instance,
        # and `index is None` is used as a test by functions that
        # disallows concurrency.
        index = index if self.concurrency > 1 else None
        return await self._start_task(
            index=index,
            active_partitions=active_partitions,
            stream=stream,
            channel=channel,
            beacon=self.beacon,
        )

    async def _start_one_supervised(self,
                                    index: Optional[int] = None,
                                    active_partitions: Optional[
                                        Set[TP]] = None,
                                    stream: StreamT = None) -> ActorT:
        aref = await self._start_one(
            index=index,
            active_partitions=active_partitions,
            stream=stream,
        )
        self.supervisor.add(aref)
        await aref.maybe_start()
        return aref

    async def _start_for_partitions(self,
                                    active_partitions: Set[TP]) -> ActorT:
        assert active_partitions
        self.log.info('Starting actor for partitions %s', active_partitions)
        return await self._start_one_supervised(None, active_partitions)

    async def on_start(self) -> None:
        self.supervisor = self._new_supervisor()
        await self._on_start_supervisor()

    def _new_supervisor(self) -> SupervisorStrategyT:
        return self._get_supervisor_strategy()(
            max_restarts=100.0,
            over=1.0,
            replacement=self._replace_actor,
            loop=self.loop,
            beacon=self.beacon,
        )

    async def _replace_actor(self, service: ServiceT, index: int) -> ServiceT:
        aref = cast(ActorRefT, service)
        return await self._start_one(
            index=index,
            active_partitions=aref.active_partitions,
            stream=aref.stream,
            channel=cast(ChannelT, aref.stream.channel),
        )

    def _get_supervisor_strategy(self) -> Type[SupervisorStrategyT]:
        SupervisorStrategy = self.supervisor_strategy
        if SupervisorStrategy is None:
            SupervisorStrategy = self.app.conf.agent_supervisor
        return SupervisorStrategy

    async def _on_start_supervisor(self) -> None:
        active_partitions = self._get_active_partitions()
        channel: ChannelT = cast(ChannelT, None)
        for i in range(self.concurrency):
            res = await self._start_one(
                index=i,
                active_partitions=active_partitions,
                channel=channel,
            )
            if channel is None:
                # First concurrency actor creates channel,
                # then we reuse it for --concurrency=n.
                # This way they share the same queue.
                channel = res.stream.channel
            self.supervisor.add(res)
        await self.supervisor.start()

    def _get_active_partitions(self) -> Optional[Set[TP]]:
        active_partitions: Optional[Set[TP]] = None
        if self.isolated_partitions:
            # when we start our first agent, we create the set of
            # partitions early, and save it in ._pending_active_partitions.
            # That way we can update the set once partitions are assigned,
            # and the actor we started may be assigned one of the partitions.
            active_partitions = self._pending_active_partitions = set()
        return active_partitions

    async def on_stop(self) -> None:
        # Agents iterate over infinite streams, so we cannot wait for it
        # to stop.
        # Instead we cancel it and this forces the stream to ack the
        # last message processed (but not the message causing the error
        # to be raised).
        await self._stop_supervisor()

    async def _stop_supervisor(self) -> None:
        if self.supervisor:
            await self.supervisor.stop()
            self.supervisor = None

    def cancel(self) -> None:
        for actor in self._actors:
            actor.cancel()

    async def on_partitions_revoked(self, revoked: Set[TP]) -> None:
        if self.isolated_partitions:
            # isolated: start/stop actors for each partition
            await self.on_isolated_partitions_revoked(revoked)
        else:
            await self.on_shared_partitions_revoked(revoked)

    async def on_partitions_assigned(self, assigned: Set[TP]) -> None:
        if self.isolated_partitions:
            await self.on_isolated_partitions_assigned(assigned)
        else:
            await self.on_shared_partitions_assigned(assigned)

    async def on_isolated_partitions_revoked(self, revoked: Set[TP]) -> None:
        self.log.dev('Partitions revoked')
        for tp in revoked:
            aref: Optional[ActorRefT] = self._actor_by_partition.pop(tp, None)
            if aref is not None:
                await aref.on_isolated_partition_revoked(tp)

    async def on_isolated_partitions_assigned(self, assigned: Set[TP]) -> None:
        for tp in sorted(assigned):
            await self._assign_isolated_partition(tp)

    async def _assign_isolated_partition(self, tp: TP) -> None:
        if (not self._first_assignment_done and not self._actor_by_partition):
            self._first_assignment_done = True
            # if this is the first time we are assigned
            # we need to reassign the agent we started at boot to
            # one of the partitions.
            self._on_first_isolated_partition_assigned(tp)
        await self._maybe_start_isolated(tp)

    def _on_first_isolated_partition_assigned(self, tp: TP) -> None:
        assert self._actors
        assert len(self._actors) == 1
        self._actor_by_partition[tp] = next(iter(self._actors))
        if self._pending_active_partitions is not None:
            assert not self._pending_active_partitions
            self._pending_active_partitions.add(tp)

    async def _maybe_start_isolated(self, tp: TP) -> None:
        try:
            aref = self._actor_by_partition[tp]
        except KeyError:
            aref = await self._start_isolated(tp)
            self._actor_by_partition[tp] = aref
        await aref.on_isolated_partition_assigned(tp)

    async def _start_isolated(self, tp: TP) -> ActorT:
        return await self._start_for_partitions({tp})

    async def on_shared_partitions_revoked(self, revoked: Set[TP]) -> None:
        ...

    async def on_shared_partitions_assigned(self, assigned: Set[TP]) -> None:
        ...

    def info(self) -> Mapping:
        return {
            'app': self.app,
            'fun': self.fun,
            'name': self.name,
            'channel': self.channel,
            'concurrency': self.concurrency,
            'help': self.help,
            'sinks': self._sinks,
            'on_error': self._on_error,
            'supervisor_strategy': self.supervisor_strategy,
            'isolated_partitions': self.isolated_partitions,
        }

    def clone(self, *, cls: Type[AgentT] = None, **kwargs: Any) -> AgentT:
        return (cls or type(self))(**{**self.info(), **kwargs})

    def test_context(self,
                     channel: ChannelT = None,
                     supervisor_strategy: SupervisorStrategyT = None,
                     on_error: AgentErrorHandler = None,
                     **kwargs: Any) -> AgentTestWrapperT:  # pragma: no cover
        # flow control into channel queues are disabled at startup,
        # so need to resume that.
        self.app.flow_control.resume()

        async def on_agent_error(agent: AgentT, exc: BaseException) -> None:
            if on_error is not None:
                await on_error(agent, exc)
            await agent.crash_test_agent(exc)

        return self.clone(
            cls=AgentTestWrapper,
            channel=channel if channel is not None else self.app.channel(),
            supervisor_strategy=supervisor_strategy or CrashingSupervisor,
            original_channel=self.channel,
            on_error=on_agent_error,
            **kwargs)

    def _prepare_channel(self,
                         channel: Union[str, ChannelT] = None,
                         internal: bool = True,
                         key_type: ModelArg = None,
                         value_type: ModelArg = None,
                         **kwargs: Any) -> ChannelT:
        app = self.app
        channel = f'{app.conf.id}-{self.name}' if channel is None else channel
        if isinstance(channel, ChannelT):
            return cast(ChannelT, channel)
        elif isinstance(channel, str):
            return app.topic(channel,
                             internal=internal,
                             key_type=key_type,
                             value_type=value_type,
                             **kwargs)
        raise TypeError(
            f'Channel must be channel, topic, or str; not {type(channel)}')

    def __call__(self,
                 *,
                 index: int = None,
                 active_partitions: Set[TP] = None,
                 stream: StreamT = None,
                 channel: ChannelT = None) -> ActorRefT:
        # The agent function can be reused by other agents/tasks.
        # For example:
        #
        #   @app.agent(logs_topic, through='other-topic')
        #   filter_log_errors_(stream):
        #       async for event in stream:
        #           if event.severity == 'error':
        #               yield event
        #
        #   @app.agent(logs_topic)
        #   def alert_on_log_error(stream):
        #       async for event in filter_log_errors(stream):
        #            alert(f'Error occurred: {event!r}')
        #
        # Calling `res = filter_log_errors(it)` will end you up with
        # an AsyncIterable that you can reuse (but only if the agent
        # function is an `async def` function that yields)
        if stream is None:
            stream = self.stream(
                channel=channel,
                concurrency_index=index,
                active_partitions=active_partitions,
            )
        else:
            # reusing actor stream after agent restart
            assert stream.concurrency_index == index
            assert stream.active_partitions == active_partitions
        return self.actor_from_stream(stream)

    def actor_from_stream(self, stream: StreamT) -> ActorRefT:
        res = self.fun(stream)
        typ = cast(Type[Actor], (AwaitableActor if isinstance(res, Awaitable)
                                 else AsyncIterableActor))
        return typ(
            self,
            stream,
            res,
            index=stream.concurrency_index,
            active_partitions=stream.active_partitions,
            loop=self.loop,
            beacon=self.beacon,
        )

    def add_sink(self, sink: SinkT) -> None:
        if sink not in self._sinks:
            self._sinks.append(sink)

    def stream(self,
               channel: ChannelT = None,
               active_partitions: Set[TP] = None,
               **kwargs: Any) -> StreamT:
        if channel is None:
            channel = cast(TopicT, self.channel_iterator).clone(
                is_iterator=False,
                active_partitions=active_partitions,
            )
        if active_partitions is not None:
            assert channel.active_partitions == active_partitions
        s = self.app.stream(channel,
                            loop=self.loop,
                            active_partitions=active_partitions,
                            **kwargs)
        s.add_processor(self._maybe_unwrap_reply_request)
        return s

    def _maybe_unwrap_reply_request(self, value: V) -> Any:
        if isinstance(value, ReqRepRequest):
            return value.value
        return value

    async def _start_task(self,
                          *,
                          index: Optional[int],
                          active_partitions: Optional[Set[TP]] = None,
                          stream: StreamT = None,
                          channel: ChannelT = None,
                          beacon: NodeT = None) -> ActorRefT:
        # If the agent is an async function we simply start it,
        # if it returns an AsyncIterable/AsyncGenerator we start a task
        # that will consume it.
        actor = self(
            index=index,
            active_partitions=active_partitions,
            stream=stream,
            channel=channel,
        )
        return await self._prepare_actor(actor, beacon)

    async def _prepare_actor(self, aref: ActorRefT,
                             beacon: NodeT) -> ActorRefT:
        coro: Any
        if isinstance(aref, Awaitable):
            # agent does not yield
            coro = aref
            if self._sinks:
                raise ImproperlyConfigured('Agent must yield to use sinks')
        else:
            # agent yields and is an AsyncIterator so we have to consume it.
            coro = self._slurp(aref, aiter(aref))
        task = asyncio.Task(self._execute_task(coro, aref), loop=self.loop)
        task._beacon = beacon  # type: ignore
        aref.actor_task = task
        self._actors.add(aref)
        return aref

    async def _execute_task(self, coro: Awaitable, aref: ActorRefT) -> None:
        # This executes the agent task itself, and does exception handling.
        try:
            await coro
        except asyncio.CancelledError:
            if self.should_stop:
                raise
        except Exception as exc:
            if self._on_error is not None:
                await self._on_error(self, exc)

            # Mark ActorRef as dead, so that supervisor thread
            # can start a new one.
            await aref.crash(exc)
            self.supervisor.wakeup()
            raise

    async def _slurp(self, res: ActorRefT, it: AsyncIterator) -> None:
        # this is used when the agent returns an AsyncIterator,
        # and simply consumes that async iterator.
        stream: Optional[StreamT] = None
        async for value in it:
            self.log.debug('%r yielded: %r', self.fun, value)
            if stream is None:
                stream = res.stream.get_active_stream()
            event = stream.current_event
            if event is not None:
                if isinstance(event.value, ReqRepRequest):
                    await self._reply(event.key, value, event.value)
            else:
                raise TypeError('Stream has no current event')
            await self._delegate_to_sinks(value)

    async def _delegate_to_sinks(self, value: Any) -> None:
        for sink in self._sinks:
            if isinstance(sink, AgentT):
                await cast(AgentT, sink).send(value=value)
            elif isinstance(sink, ChannelT):
                await cast(TopicT, sink).send(value=value)
            else:
                await maybe_async(cast(Callable, sink)(value))

    async def _reply(self, key: Any, value: Any, req: ReqRepRequest) -> None:
        assert req.reply_to
        response = self._response_class(value)(
            key=key,
            value=value,
            correlation_id=req.correlation_id,
        )
        await self.app.send(
            req.reply_to,
            key=None,
            value=response,
        )

    def _response_class(self, value: Any) -> Type[ReqRepResponse]:
        if isinstance(value, ModelT):
            return ModelReqRepResponse
        return ReqRepResponse

    async def cast(self,
                   value: V = None,
                   *,
                   key: K = None,
                   partition: int = None) -> None:
        await self.send(key=key, value=value, partition=partition)

    async def ask(self,
                  value: V = None,
                  *,
                  key: K = None,
                  partition: int = None,
                  reply_to: ReplyToArg = None,
                  correlation_id: str = None) -> Any:
        p = await self.ask_nowait(
            value,
            key=key,
            partition=partition,
            reply_to=reply_to or self.app.conf.reply_to,
            correlation_id=correlation_id,
            force=True,  # Send immediately, since we are waiting for result.
        )
        app = cast(App, self.app)
        await app._reply_consumer.add(p.correlation_id, p)
        await app.maybe_start_client()
        return await p

    async def ask_nowait(self,
                         value: V = None,
                         *,
                         key: K = None,
                         partition: int = None,
                         reply_to: ReplyToArg = None,
                         correlation_id: str = None,
                         force: bool = False) -> ReplyPromise:
        req = self._create_req(key, value, reply_to, correlation_id)
        await self.channel.send(
            key=key,
            value=req,
            partition=partition,
            force=force,
        )
        return ReplyPromise(req.reply_to, req.correlation_id)

    def _create_req(self,
                    key: K = None,
                    value: V = None,
                    reply_to: ReplyToArg = None,
                    correlation_id: str = None) -> ReqRepRequest:
        if reply_to is None:
            raise TypeError('Missing reply_to argument')
        topic_name = self._get_strtopic(reply_to)
        correlation_id = correlation_id or str(uuid4())
        return self._request_class(value)(
            value=value,
            reply_to=topic_name,
            correlation_id=correlation_id,
        )

    def _request_class(self, value: V) -> Type[ReqRepRequest]:
        if isinstance(value, ModelT):
            return ModelReqRepRequest
        return ReqRepRequest

    async def send(self,
                   *,
                   key: K = None,
                   value: V = None,
                   partition: int = None,
                   key_serializer: CodecArg = None,
                   value_serializer: CodecArg = None,
                   callback: MessageSentCallback = None,
                   reply_to: ReplyToArg = None,
                   correlation_id: str = None,
                   force: bool = False) -> Awaitable[RecordMetadata]:
        """Send message to topic used by agent."""
        if reply_to:
            value = self._create_req(key, value, reply_to, correlation_id)
        return await self.channel.send(
            key=key,
            value=value,
            partition=partition,
            key_serializer=key_serializer,
            value_serializer=value_serializer,
            force=force,
        )

    def _get_strtopic(self, topic: Union[str, ChannelT, TopicT,
                                         AgentT]) -> str:
        if isinstance(topic, AgentT):
            return self._get_strtopic(cast(AgentT, topic).channel)
        if isinstance(topic, TopicT):
            return cast(TopicT, topic).get_topic_name()
        if isinstance(topic, ChannelT):
            raise ValueError('Channels are unnamed topics')
        return cast(str, topic)

    async def map(
        self,
        values: Union[AsyncIterable, Iterable],
        key: K = None,
        reply_to: ReplyToArg = None,
    ) -> AsyncIterator:  # pragma: no cover
        # Map takes only values, but can provide one key that is used for all.
        async for value in self.kvmap(((key, v) async for v in aiter(values)),
                                      reply_to):
            yield value

    async def kvmap(
        self,
        items: Union[AsyncIterable[Tuple[K, V]], Iterable[Tuple[K, V]]],
        reply_to: ReplyToArg = None,
    ) -> AsyncIterator[str]:  # pragma: no cover
        # kvmap takes (key, value) pairs.
        reply_to = self._get_strtopic(reply_to or self.app.conf.reply_to)

        # BarrierState is the promise that keeps track of pending results.
        # It contains a list of individual ReplyPromises.
        barrier = BarrierState(reply_to)

        async for _ in self._barrier_send(barrier, items, reply_to):
            # Now that we've sent a message, try to see if we have any
            # replies.
            try:
                _, val = barrier.get_nowait()
            except asyncio.QueueEmpty:
                pass
            else:
                yield val
        # All the messages have been sent so finalize the barrier.
        barrier.finalize()

        # Then iterate over the results in the group.
        async for _, value in barrier.iterate():
            yield value

    async def join(
        self,
        values: Union[AsyncIterable[V], Iterable[V]],
        key: K = None,
        reply_to: ReplyToArg = None,
    ) -> List[Any]:  # pragma: no cover
        return await self.kvjoin(
            ((key, value) async for value in aiter(values)),
            reply_to=reply_to,
        )

    async def kvjoin(
            self,
            items: Union[AsyncIterable[Tuple[K, V]], Iterable[Tuple[K, V]]],
            reply_to: ReplyToArg = None) -> List[Any]:  # pragma: no cover
        reply_to = self._get_strtopic(reply_to or self.app.conf.reply_to)
        barrier = BarrierState(reply_to)

        # Map correlation_id -> index
        posindex: MutableMapping[str, int] = {
            cid: i
            async for i, cid in aenumerate(
                self._barrier_send(barrier, items, reply_to))
        }

        # All the messages have been sent so finalize the barrier.
        barrier.finalize()

        # wait until all replies received
        await barrier
        # then construct a list in the correct order.
        values: List = [None] * barrier.total
        async for correlation_id, value in barrier.iterate():
            values[posindex[correlation_id]] = value
        return values

    async def _barrier_send(
            self, barrier: BarrierState, items: Union[AsyncIterable[Tuple[K,
                                                                          V]],
                                                      Iterable[Tuple[K, V]]],
            reply_to: ReplyToArg) -> AsyncIterator[str]:  # pragma: no cover
        # map: send many tasks to agents
        # while trying to pop incoming results off.
        async for key, value in aiter(items):
            correlation_id = str(uuid4())
            p = await self.ask_nowait(key=key,
                                      value=value,
                                      reply_to=reply_to,
                                      correlation_id=correlation_id)
            # add reply promise to the barrier
            barrier.add(p)

            # the ReplyConsumer will call the barrier whenever a new
            # result comes in.
            app = cast(App, self.app)
            await app.maybe_start_client()
            await app._reply_consumer.add(p.correlation_id, barrier)

            yield correlation_id

    def _repr_info(self) -> str:
        return shorten_fqdn(self.name)

    def get_topic_names(self) -> Iterable[str]:
        channel = self.channel
        if isinstance(channel, TopicT):
            return channel.topics
        return []

    @property
    def channel(self) -> ChannelT:
        if self._channel is None:
            self._channel = self._prepare_channel(
                self._channel_arg,
                key_type=self._key_type,
                value_type=self._value_type,
                **self._channel_kwargs,
            )
        return self._channel

    @channel.setter
    def channel(self, channel: ChannelT) -> None:
        self._channel = channel

    @property
    def channel_iterator(self) -> AsyncIterator:
        # The channel is "memoized" here, so subsequent access to
        # instance.channel_iterator will return the same value.
        if self._channel_iterator is None:
            # we do not use aiter(channel) here, because
            # that will also add it to the topic conductor too early.
            self._channel_iterator = self.channel.clone(is_iterator=True)
        return self._channel_iterator

    @channel_iterator.setter
    def channel_iterator(self, it: AsyncIterator) -> None:
        self._channel_iterator = it

    @property
    def label(self) -> str:
        return self._agent_label()

    def _agent_label(self, name_suffix: str = '') -> str:
        s = f'{type(self).__name__}{name_suffix}: '
        s += f'{shorten_fqdn(qualname(self.fun))}'
        return s

    @property
    def shortlabel(self) -> str:
        return self._agent_label()
Example #29
0
class BaseService(ABC, CancellableMixin):
    # Use a WeakSet so that we don't have to bother updating it when tasks finish.
    _child_services = None  # : 'WeakSet[BaseService]'
    _tasks = None  # : 'WeakSet[asyncio.Future[Any]]'
    _finished_callbacks = None  # : List[Callable[['BaseService'], None]]
    # Number of seconds cancel() will wait for run() to finish.
    _wait_until_finished_timeout = 5

    # the custom event loop to run in, or None if the default loop should be used
    _loop = None  # : asyncio.AbstractEventLoop

    def __init__(self,
                 token: CancelToken = None,
                 loop: asyncio.AbstractEventLoop = None) -> None:
        self.events = ServiceEvents()
        self._run_lock = asyncio.Lock()
        self._child_services = WeakSet()
        self._tasks = WeakSet()
        self._finished_callbacks = []

        self._loop = loop

        base_token = CancelToken(type(self).__name__, loop=loop)

        if token is None:
            self.cancel_token = base_token
        else:
            self.cancel_token = base_token.chain(token)

        self._executor = get_asyncio_executor()

    @property
    def logger(self) -> Logger:
        return Logger

    def get_event_loop(self) -> asyncio.AbstractEventLoop:
        if self._loop is None:
            return asyncio.get_event_loop()
        else:
            return self._loop

    async def run(
        self,
        finished_callback: Optional[Callable[["BaseService"], None]] = None
    ) -> None:
        """Await for the service's _run() coroutine.

        Once _run() returns, triggers the cancel token, call cleanup() and
        finished_callback (if one was passed).
        """
        if self.is_running:
            raise ValidationError(
                "Cannot start the service while it's already running")
        elif self.is_cancelled:
            raise ValidationError(
                "Cannot restart a service that has already been cancelled")

        if finished_callback:
            self._finished_callbacks.append(finished_callback)

        try:
            async with self._run_lock:
                self.events.started.set()
                await self._run()
        except OperationCancelled as e:
            self.logger.debug("%s finished: %s" % (self, e))
        except Exception:
            self.logger.exception("Unexpected error in %r, exiting" % self)
        finally:
            # Trigger our cancel token to ensure all pending asyncio tasks and background
            # coroutines started by this service exit cleanly.
            self.events.cancelled.set()
            self.cancel_token.trigger()

            await self.cleanup()

            for callback in self._finished_callbacks:
                callback(self)

            self.events.finished.set()
            self.logger.debug("%s halted cleanly" % self)

    def add_finished_callback(
            self, finished_callback: Callable[["BaseService"], None]) -> None:
        self._finished_callbacks.append(finished_callback)

    def run_task(self, awaitable: Awaitable[Any]) -> None:
        """Run the given awaitable in the background.

        The awaitable should return whenever this service's cancel token is triggered.

        If it raises OperationCancelled, that is caught and ignored.
        """
        @functools.wraps(awaitable)  # type: ignore
        async def _run_task_wrapper() -> None:
            self.logger.debug("Running task %s" % awaitable)
            try:
                await awaitable
            except OperationCancelled:
                pass
            except Exception as e:
                self.logger.warning("Task %s finished unexpectedly: %s" %
                                    (awaitable, e))
                self.logger.debug("Task failure traceback")
            else:
                self.logger.debug("Task %s finished with no errors" %
                                  awaitable)

        self._tasks.add(asyncio.ensure_future(_run_task_wrapper()))

    def run_daemon_task(self, awaitable: Awaitable[Any]) -> None:
        """Run the given awaitable in the background.

        Like :meth:`run_task` but if the task ends without cancelling, then this
        this service will terminate as well.
        """
        @functools.wraps(awaitable)  # type: ignore
        async def _run_daemon_task_wrapper() -> None:
            try:
                await awaitable
            finally:
                if not self.is_cancelled:
                    self.logger.debug(
                        "%s finished while %s is still running, terminating as well"
                        % (awaitable, self))
                    self.cancel_token.trigger()

        self.run_task(_run_daemon_task_wrapper())

    def run_child_service(self, child_service: "BaseService") -> None:
        """
        Run a child service and keep a reference to it to be considered during the cleanup.
        """
        if child_service.is_running:
            raise ValidationError(
                f"Can't start service {child_service!r}, child of {self!r}: it's already running"
            )
        elif child_service.is_cancelled:
            raise ValidationError(
                f"Can't restart {child_service!r}, child of {self!r}: it's already completed"
            )

        self._child_services.add(child_service)
        self.run_task(child_service.run())

    def run_daemon(self, service: "BaseService") -> None:
        """
        Run a service and keep a reference to it to be considered during the cleanup.

        If the service finishes while we're still running, we'll terminate as well.
        """
        if service.is_running:
            raise ValidationError(
                f"Can't start daemon {service!r}, child of {self!r}: it's already running"
            )
        elif service.is_cancelled:
            raise ValidationError(
                f"Can't restart daemon {service!r}, child of {self!r}: it's already completed"
            )

        self._child_services.add(service)

        @functools.wraps(service.run)
        async def _run_daemon_wrapper() -> None:
            try:
                await service.run()
            except OperationCancelled:
                pass
            except Exception as e:
                self.logger.warning(
                    "Daemon Service %s finished unexpectedly: %s" %
                    (service, e))
                self.logger.debug("Daemon Service failure traceback")
            finally:
                if not self.is_cancelled:
                    self.logger.debug(
                        "%s finished while %s is still running, terminating as well"
                        % (service, self))
                    self.cancel_token.trigger()

        self.run_task(_run_daemon_wrapper())

    def call_later(self, delay: float, callback: "Callable[..., None]",
                   *args: Any) -> None:
        @functools.wraps(callback)
        async def _call_later_wrapped() -> None:
            await self.sleep(delay)
            callback(*args)

        self.run_task(_call_later_wrapped())

    async def _run_in_executor(self, callback: Callable[..., Any],
                               *args: Any) -> Any:
        loop = self.get_event_loop()
        return await self.wait(
            loop.run_in_executor(self._executor, callback, *args))

    async def cleanup(self) -> None:
        """
        Run the ``_cleanup()`` coroutine and set the ``cleaned_up`` event after the service as
        well as all child services finished their cleanup.

        The ``_cleanup()`` coroutine is invoked before the child services may have finished
        their cleanup.
        """
        if self._child_services:
            self.logger.debug("Waiting for child services: %s" %
                              list(self._child_services))
            await asyncio.gather(*[
                child_service.events.cleaned_up.wait()
                for child_service in self._child_services
            ])
            self.logger.debug("All child services finished")
        if self._tasks:
            self.logger.debug("Waiting for tasks: %s" % list(self._tasks))
            await asyncio.gather(*self._tasks)
            self.logger.debug("All tasks finished")

        await self._cleanup()
        self.events.cleaned_up.set()

    def cancel_nowait(self) -> None:
        if self.is_cancelled:
            self.logger.warning(
                "Tried to cancel %s, but it was already cancelled" % self)
            return
        elif not self.is_running:
            raise ValidationError(
                "Cannot cancel a service that has not been started")

        self.logger.debug("Cancelling %s" % self)
        self.events.cancelled.set()
        self.cancel_token.trigger()

    async def cancel(self) -> None:
        """Trigger the CancelToken and wait for the cleaned_up event to be set."""
        self.cancel_nowait()

        try:
            await asyncio.wait_for(self.events.cleaned_up.wait(),
                                   timeout=self._wait_until_finished_timeout)
        except asyncio.futures.TimeoutError:
            self.logger.info(
                "Timed out waiting for %s to finish its cleanup, forcibly cancelling pending "
                "tasks and exiting anyway" % self, )
            if self._tasks:
                self.logger.debug("Pending tasks: %s" % list(self._tasks))
            if self._child_services:
                self.logger.debug("Pending child services: %s" %
                                  list(self._child_services))
            self._forcibly_cancel_all_tasks()
            # Sleep a bit because the Future.cancel() method just schedules the callbacks, so we
            # need to give the event loop a chance to actually call them.
            await asyncio.sleep(0.5)
        else:
            self.logger.debug("%s finished cleanly" % self)

    def _forcibly_cancel_all_tasks(self) -> None:
        for task in self._tasks:
            task.cancel()

    @property
    def is_cancelled(self) -> bool:
        return self.cancel_token.triggered

    @property
    def is_operational(self) -> bool:
        return self.events.started.is_set() and not self.cancel_token.triggered

    @property
    def is_running(self) -> bool:
        return self._run_lock.locked()

    async def threadsafe_cancel(self) -> None:
        """
        Cancel service in another thread. Block until service is cleaned up.

        :param poll_period: how many seconds to wait in between each check for service cleanup
        """
        asyncio.run_coroutine_threadsafe(self.cancel(),
                                         loop=self.get_event_loop())
        await asyncio.wait_for(self.events.cleaned_up.wait(),
                               timeout=self._wait_until_finished_timeout)

    async def sleep(self, delay: float) -> None:
        """Coroutine that completes after a given time (in seconds)."""
        await self.wait(asyncio.sleep(delay))

    @abstractmethod
    async def _run(self) -> None:
        """Run the service's loop.

        Should return or raise OperationCancelled when the CancelToken is triggered.
        """
        raise NotImplementedError()

    async def _cleanup(self) -> None:
        """Clean up any resources held by this service.

        Called after the service's _run() method returns.
        """
        pass

    def gc(self) -> None:
        for cs in self._child_services.copy():
            if cs.events.finished.is_set():
                self._child_services.remove(cs)
        for t in self._tasks.copy():
            if t.done():
                self._tasks.remove(t)
Example #30
0
class RemoteServiceServer(RemoteServiceBase):
    """The server side of a RPC communication.

    Considers all messages coming from the other end as requests for
    RPCs executions. Will perform them and send results as responses.

    After having created an instance and initialized it with a socket
    the reader loop should be started by calling run.

    """
    def __init__(self, local_service, remote_address):
        """Create a responder for the given service.

        local_service (Service): the object whose methods should be
            called via RPC.

        For other arguments see RemoteServiceBase.

        """
        super(RemoteServiceServer, self).__init__(remote_address)
        self.local_service = local_service

        self.pending_incoming_requests_threads = WeakSet()

    def finalize(self, reason=""):
        """See RemoteServiceBase.finalize."""
        super(RemoteServiceServer, self).finalize(reason)

        for thread in self.pending_incoming_requests_threads:
            thread.kill(RPCError(reason), block=False)

        self.pending_incoming_requests_threads.clear()

    def handle(self, socket_):
        self.initialize(socket_, self.remote_address)
        gevent.spawn(self.run)

    def run(self):
        """Start listening for requests, and go on forever.

        Read messages from the socket and issue greenlets to parse
        them, execute methods and send the response to the client.
        This method won't return as long as there's something to read,
        it's therefore advisable to spawn a greenlet to call it.

        """
        while True:
            try:
                data = self._read()
            except IOError:
                break

            if len(data) == 0:
                self.finalize("Connection closed.")
                break

            gevent.spawn(self.process_data, data)

    def process_data(self, data):
        """Handle the message.

        JSON-decode it and forward it to process_incoming_request
        (unconditionally!).

        data (bytes): the message read from the socket.

        """
        # Decode the incoming data.
        try:
            message = json.loads(data, encoding='utf-8')
        except ValueError:
            logger.warning("Cannot parse incoming message, discarding.")
            return

        self.process_incoming_request(message)

    def process_incoming_request(self, request):
        """Handle the request.

        Parse the request, execute the method it asks for, format the
        result and send the response.

        request (dict): the JSON-decoded request.

        """
        # Validate the request.
        if not {"__id", "__method", "__data"}.issubset(request.iterkeys()):
            logger.warning("Request is missing some fields, ingoring.")
            return

        # Determine the ID.
        id_ = request["__id"]

        # Store the request.
        self.pending_incoming_requests_threads.add(gevent.getcurrent())

        # Build the response.
        response = {"__id": id_, "__data": None, "__error": None}

        method_name = request["__method"]

        if not hasattr(self.local_service, method_name):
            response["__error"] = "Method %s doesn't exist." % method_name
        else:
            method = getattr(self.local_service, method_name)

            if not getattr(method, "rpc_callable", False):
                response["__error"] = "Method %s isn't callable." % method_name
            else:
                try:
                    response["__data"] = method(**request["__data"])
                except Exception as error:
                    response["__error"] = "%s: %s\n%s" % \
                        (error.__class__.__name__, error,
                         traceback.format_exc())

        # Encode it.
        try:
            data = json.dumps(response, encoding='utf-8')
        except (TypeError, ValueError):
            logger.warning("JSON encoding failed.", exc_info=True)
            return

        # Send it.
        try:
            self._write(data)
        except IOError:
            # Log messages have already been produced.
            return
Example #31
0
class Agent(AgentT, Service):
    """Agent.

    This is the type of object returned by the ``@app.agent`` decorator.
    """

    # supervisor is None until the agent is started so we cast to simplify.
    supervisor: SupervisorStrategyT = cast(SupervisorStrategyT, None)

    # channel is loaded lazily on .channel property access
    # to make sure configuration is not accessed when agent created
    # at module-scope.
    _channel: Optional[ChannelT] = None
    _channel_arg: Optional[Union[str, ChannelT]]
    _channel_kwargs: Dict[str, Any]
    _channel_iterator: Optional[AsyncIterator] = None
    _sinks: List[SinkT]

    _actors: MutableSet[ActorRefT]
    _actor_by_partition: MutableMapping[TP, ActorRefT]

    #: This mutable set is used by the first agent we start,
    #: so that we can update its active_partitions later
    #: (in on_partitions_assigned, when we know what partitions we get).
    _pending_active_partitions: Optional[Set[TP]] = None

    _first_assignment_done: bool = False

    def __init__(
        self,
        fun: AgentFun,
        *,
        app: AppT,
        name: str = None,
        channel: Union[str, ChannelT] = None,
        concurrency: int = 1,
        sink: Iterable[SinkT] = None,
        on_error: AgentErrorHandler = None,
        supervisor_strategy: Type[SupervisorStrategyT] = None,
        help: str = None,
        schema: SchemaT = None,
        key_type: ModelArg = None,
        value_type: ModelArg = None,
        isolated_partitions: bool = False,
        use_reply_headers: bool = None,
        **kwargs: Any,
    ) -> None:
        self.app = app
        self.fun: AgentFun = fun
        self.name = name or canonshortname(self.fun)
        # key-type/value_type arguments only apply when a channel
        # is not set
        if schema is not None:
            assert channel is None or isinstance(channel, str)
        if key_type is not None:
            assert channel is None or isinstance(channel, str)
        self._key_type = key_type
        if value_type is not None:
            assert channel is None or isinstance(channel, str)
        self._schema = schema
        self._value_type = value_type
        self._channel_arg = channel
        self._channel_kwargs = kwargs
        self.concurrency = concurrency or 1
        self.isolated_partitions = isolated_partitions
        self.help = help or ""
        self._sinks = list(sink) if sink is not None else []
        self._on_error: Optional[AgentErrorHandler] = on_error
        self.supervisor_strategy = supervisor_strategy
        self._actors = WeakSet()
        self._actor_by_partition = WeakValueDictionary()
        if self.isolated_partitions and self.concurrency > 1:
            raise ImproperlyConfigured(
                "Agent concurrency must be 1 when using isolated partitions")
        self.use_reply_headers = use_reply_headers
        Service.__init__(self)

    def on_init_dependencies(self) -> Iterable[ServiceT]:
        """Return list of services dependencies required to start agent."""
        # Agent service is now a child of app.
        self.beacon.reattach(self.app.agents.beacon)
        return []

    def actor_tracebacks(self) -> List[str]:
        return [actor.traceback() for actor in self._actors]

    async def _start_one(
        self,
        *,
        index: Optional[int] = None,
        active_partitions: Optional[Set[TP]] = None,
        stream: StreamT = None,
        channel: ChannelT = None,
    ) -> ActorT:
        # an index of None means there's only one instance,
        # and `index is None` is used as a test by functions that
        # disallows concurrency.
        index = index if self.concurrency > 1 else None
        return await self._start_task(
            index=index,
            active_partitions=active_partitions,
            stream=stream,
            channel=channel,
            beacon=self.beacon,
        )

    async def _start_one_supervised(
        self,
        index: Optional[int] = None,
        active_partitions: Optional[Set[TP]] = None,
        stream: StreamT = None,
    ) -> ActorT:
        aref = await self._start_one(
            index=index,
            active_partitions=active_partitions,
            stream=stream,
        )
        self.supervisor.add(aref)
        await aref.maybe_start()
        return aref

    async def _start_for_partitions(self,
                                    active_partitions: Set[TP]) -> ActorT:
        assert active_partitions
        self.log.info("Starting actor for partitions %s", active_partitions)
        return await self._start_one_supervised(None, active_partitions)

    async def on_start(self) -> None:
        """Call when an agent starts."""
        self.supervisor = self._new_supervisor()
        await self._on_start_supervisor()

    def _new_supervisor(self) -> SupervisorStrategyT:
        return self._get_supervisor_strategy()(
            max_restarts=100.0,
            over=1.0,
            replacement=self._replace_actor,
            loop=self.loop,
            beacon=self.beacon,
        )

    async def _replace_actor(self, service: ServiceT, index: int) -> ServiceT:
        aref = cast(ActorRefT, service)
        return await self._start_one(
            index=index,
            active_partitions=aref.active_partitions,
            stream=aref.stream,
            channel=cast(ChannelT, aref.stream.channel),
        )

    def _get_supervisor_strategy(self) -> Type[SupervisorStrategyT]:
        SupervisorStrategy = self.supervisor_strategy
        if SupervisorStrategy is None:
            return cast(Type[SupervisorStrategyT],
                        self.app.conf.agent_supervisor)
        else:
            return SupervisorStrategy

    async def _on_start_supervisor(self) -> None:
        active_partitions = self._get_active_partitions()
        channel: ChannelT = cast(ChannelT, None)
        for i in range(self.concurrency):
            res = await self._start_one(
                index=i,
                active_partitions=active_partitions,
                channel=channel,
            )
            if channel is None:
                # First concurrency actor creates channel,
                # then we reuse it for --concurrency=n.
                # This way they share the same queue.
                channel = res.stream.channel
            self.supervisor.add(res)
        await self.supervisor.start()

    def _get_active_partitions(self) -> Optional[Set[TP]]:
        active_partitions: Optional[Set[TP]] = None
        if self.isolated_partitions:
            # when we start our first agent, we create the set of
            # partitions early, and save it in ._pending_active_partitions.
            # That way we can update the set once partitions are assigned,
            # and the actor we started may be assigned one of the partitions.
            active_partitions = self._pending_active_partitions = set()
        return active_partitions

    async def on_stop(self) -> None:
        """Call when an agent stops."""
        # Agents iterate over infinite streams, so we cannot wait for it
        # to stop.
        # Instead we cancel it and this forces the stream to ack the
        # last message processed (but not the message causing the error
        # to be raised).
        await self._stop_supervisor()
        with suppress(asyncio.CancelledError):
            await asyncio.gather(*[
                aref.actor_task for aref in self._actors
                if aref.actor_task is not None
            ])
        self._actors.clear()

    async def _stop_supervisor(self) -> None:
        if self.supervisor:
            await self.supervisor.stop()
            self.supervisor = cast(SupervisorStrategyT, None)

    def cancel(self) -> None:
        """Cancel agent and its actor instances running in this process."""
        for aref in self._actors:
            aref.cancel()

    async def on_partitions_revoked(self, revoked: Set[TP]) -> None:
        """Call when partitions are revoked."""
        T = traced_from_parent_span()
        if self.isolated_partitions:
            # isolated: start/stop actors for each partition
            await T(self.on_isolated_partitions_revoked)(revoked)
        else:
            await T(self.on_shared_partitions_revoked)(revoked)

    async def on_partitions_assigned(self, assigned: Set[TP]) -> None:
        """Call when partitions are assigned."""
        T = traced_from_parent_span()
        if self.isolated_partitions:
            await T(self.on_isolated_partitions_assigned)(assigned)
        else:
            await T(self.on_shared_partitions_assigned)(assigned)

    async def on_isolated_partitions_revoked(self, revoked: Set[TP]) -> None:
        """Call when isolated partitions are revoked."""
        self.log.dev("Partitions revoked")
        T = traced_from_parent_span()
        for tp in revoked:
            aref: Optional[ActorRefT] = self._actor_by_partition.pop(tp, None)
            if aref is not None:
                await T(aref.on_isolated_partition_revoked)(tp)

    async def on_isolated_partitions_assigned(self, assigned: Set[TP]) -> None:
        """Call when isolated partitions are assigned."""
        T = traced_from_parent_span()
        for tp in sorted(assigned):
            await T(self._assign_isolated_partition)(tp)

    async def _assign_isolated_partition(self, tp: TP) -> None:
        T = traced_from_parent_span()
        if not self._first_assignment_done and not self._actor_by_partition:
            self._first_assignment_done = True
            # if this is the first time we are assigned
            # we need to reassign the agent we started at boot to
            # one of the partitions.
            T(self._on_first_isolated_partition_assigned)(tp)
        await T(self._maybe_start_isolated)(tp)

    def _on_first_isolated_partition_assigned(self, tp: TP) -> None:
        assert self._actors
        assert len(self._actors) == 1
        self._actor_by_partition[tp] = next(iter(self._actors))
        if self._pending_active_partitions is not None:
            assert not self._pending_active_partitions
            self._pending_active_partitions.add(tp)

    async def _maybe_start_isolated(self, tp: TP) -> None:
        try:
            aref = self._actor_by_partition[tp]
        except KeyError:
            aref = await self._start_isolated(tp)
            self._actor_by_partition[tp] = aref
        await aref.on_isolated_partition_assigned(tp)

    async def _start_isolated(self, tp: TP) -> ActorT:
        return await self._start_for_partitions({tp})

    async def on_shared_partitions_revoked(self, revoked: Set[TP]) -> None:
        """Call when non-isolated partitions are revoked."""
        ...

    async def on_shared_partitions_assigned(self, assigned: Set[TP]) -> None:
        """Call when non-isolated partitions are assigned."""
        ...

    def info(self) -> Mapping:
        """Return agent attributes as a dictionary."""
        return {
            "app": self.app,
            "fun": self.fun,
            "name": self.name,
            "channel": self.channel,
            "concurrency": self.concurrency,
            "help": self.help,
            "sink": self._sinks,
            "on_error": self._on_error,
            "supervisor_strategy": self.supervisor_strategy,
            "isolated_partitions": self.isolated_partitions,
        }

    def clone(self, *, cls: Type[AgentT] = None, **kwargs: Any) -> AgentT:
        """Create clone of this agent object.

        Keyword arguments can be passed to override any argument
        supported by :class:`Agent.__init__ <Agent>`.
        """
        return (cls or type(self))(**{**self.info(), **kwargs})

    def test_context(
        self,
        channel: ChannelT = None,
        supervisor_strategy: SupervisorStrategyT = None,
        on_error: AgentErrorHandler = None,
        **kwargs: Any,
    ) -> AgentTestWrapperT:  # pragma: no cover
        """Create new unit-testing wrapper for this agent."""
        # flow control into channel queues are disabled at startup,
        # so need to resume that.
        self.app.flow_control.resume()

        async def on_agent_error(agent: AgentT, exc: BaseException) -> None:
            if on_error is not None:
                await on_error(agent, exc)
            await cast(AgentTestWrapper, agent).crash_test_agent(exc)

        return cast(
            AgentTestWrapperT,
            self.clone(
                cls=AgentTestWrapper,
                channel=channel if channel is not None else self.app.channel(),
                supervisor_strategy=supervisor_strategy or CrashingSupervisor,
                original_channel=self.channel,
                on_error=on_agent_error,
                **kwargs,
            ),
        )

    def _prepare_channel(
        self,
        channel: Union[str, ChannelT] = None,
        internal: bool = True,
        schema: SchemaT = None,
        key_type: ModelArg = None,
        value_type: ModelArg = None,
        **kwargs: Any,
    ) -> ChannelT:
        app = self.app
        has_prefix = False
        if channel is None:
            channel = f"{app.conf.id}-{self.name}"
            has_prefix = True
        if isinstance(channel, ChannelT):
            return channel
        elif isinstance(channel, str):
            return app.topic(
                channel,
                internal=internal,
                schema=schema,
                key_type=key_type,
                value_type=value_type,
                has_prefix=has_prefix,
                **kwargs,
            )
        raise TypeError(
            f"Channel must be channel, topic, or str; not {type(channel)}")

    def __call__(
        self,
        *,
        index: int = None,
        active_partitions: Set[TP] = None,
        stream: StreamT = None,
        channel: ChannelT = None,
    ) -> ActorRefT:
        """Create new actor instance for this agent."""
        # The agent function can be reused by other agents/tasks.
        # For example:
        #
        #   @app.agent(logs_topic, through='other-topic')
        #   filter_log_errors_(stream):
        #       async for event in stream:
        #           if event.severity == 'error':
        #               yield event
        #
        #   @app.agent(logs_topic)
        #   def alert_on_log_error(stream):
        #       async for event in filter_log_errors(stream):
        #            alert(f'Error occurred: {event!r}')
        #
        # Calling `res = filter_log_errors(it)` will end you up with
        # an AsyncIterable that you can reuse (but only if the agent
        # function is an `async def` function that yields)
        return self.actor_from_stream(stream,
                                      index=index,
                                      active_partitions=active_partitions,
                                      channel=channel)

    def actor_from_stream(
        self,
        stream: Optional[StreamT],
        *,
        index: int = None,
        active_partitions: Set[TP] = None,
        channel: ChannelT = None,
    ) -> ActorRefT:
        """Create new actor from stream."""
        we_created_stream = False
        actual_stream: StreamT
        if stream is None:
            actual_stream = self.stream(
                channel=channel,
                concurrency_index=index,
                active_partitions=active_partitions,
            )
            we_created_stream = True
        else:
            # reusing actor stream after agent restart
            assert stream.concurrency_index == index
            assert stream.active_partitions == active_partitions
            actual_stream = stream

        res = self.fun(actual_stream)
        if isinstance(res, AsyncIterable):
            if we_created_stream:
                actual_stream.add_processor(self._maybe_unwrap_reply_request)
            return cast(
                ActorRefT,
                AsyncIterableActor(
                    self,
                    actual_stream,
                    res,
                    index=actual_stream.concurrency_index,
                    active_partitions=actual_stream.active_partitions,
                    loop=self.loop,
                    beacon=self.beacon,
                ),
            )
        else:
            return cast(
                ActorRefT,
                AwaitableActor(
                    self,
                    actual_stream,
                    res,
                    index=actual_stream.concurrency_index,
                    active_partitions=actual_stream.active_partitions,
                    loop=self.loop,
                    beacon=self.beacon,
                ),
            )

    def add_sink(self, sink: SinkT) -> None:
        """Add new sink to further handle results from this agent."""
        if sink not in self._sinks:
            self._sinks.append(sink)

    def stream(self,
               channel: ChannelT = None,
               active_partitions: Set[TP] = None,
               **kwargs: Any) -> StreamT:
        """Create underlying stream used by this agent."""
        if channel is None:
            channel = cast(TopicT, self.channel_iterator).clone(
                is_iterator=False,
                active_partitions=active_partitions,
            )
        if active_partitions is not None:
            assert channel.active_partitions == active_partitions
        s = self.app.stream(
            channel,
            loop=self.loop,
            active_partitions=active_partitions,
            prefix=self.name,
            beacon=self.beacon,
            **kwargs,
        )
        return s

    def _maybe_unwrap_reply_request(self, value: V) -> Any:
        if isinstance(value, ReqRepRequest):
            return value.value
        return value

    async def _start_task(
        self,
        *,
        index: Optional[int],
        active_partitions: Optional[Set[TP]] = None,
        stream: StreamT = None,
        channel: ChannelT = None,
        beacon: NodeT = None,
    ) -> ActorRefT:
        # If the agent is an async function we simply start it,
        # if it returns an AsyncIterable/AsyncGenerator we start a task
        # that will consume it.
        actor = self(
            index=index,
            active_partitions=active_partitions,
            stream=stream,
            channel=channel,
        )
        return await self._prepare_actor(
            actor, beacon if beacon is not None else self.beacon)

    async def _prepare_actor(self, aref: ActorRefT,
                             beacon: NodeT) -> ActorRefT:
        coro: Any
        if isinstance(aref, Awaitable):
            # agent does not yield
            coro = aref
            if self._sinks:
                raise ImproperlyConfigured("Agent must yield to use sinks")
        else:
            # agent yields and is an AsyncIterator so we have to consume it.
            coro = self._slurp(aref, aiter(aref))
        task = asyncio.Task(self._execute_actor(coro, aref), loop=self.loop)
        task._beacon = beacon  # type: ignore
        aref.actor_task = task
        self._actors.add(aref)
        return aref

    async def _execute_actor(self, coro: Awaitable, aref: ActorRefT) -> None:
        # This executes the agent task itself, and does exception handling.
        _current_agent.set(self)
        try:
            await coro
        except asyncio.CancelledError as exc:
            if self.should_stop:
                raise
            else:
                self.log.info("Restarting on rebalance")
                await aref.crash(exc)
                self.supervisor.wakeup()

        except Exception as exc:
            if self._on_error is not None:
                await self._on_error(self, exc)

            # Mark ActorRef as dead, so that supervisor thread
            # can start a new one.
            await aref.crash(exc)
            self.supervisor.wakeup()

    async def _slurp(self, res: ActorRefT, it: AsyncIterator) -> None:
        # this is used when the agent returns an AsyncIterator,
        # and simply consumes that async iterator.
        stream: Optional[StreamT] = None
        async for value in it:
            self.log.debug("%r yielded: %r", self.fun, value)
            if stream is None:
                stream = res.stream.get_active_stream()
            event = stream.current_event
            if event is not None:
                headers = event.headers
                reply_to: Optional[str] = None
                correlation_id: Optional[str] = None
                if isinstance(event.value, ReqRepRequest):
                    req: ReqRepRequest = event.value
                    reply_to = req.reply_to
                    correlation_id = req.correlation_id
                elif headers:
                    reply_to_bytes = headers.get("Faust-Ag-ReplyTo")
                    if reply_to_bytes:
                        reply_to = want_str(reply_to_bytes)
                        correlation_id_bytes = headers.get(
                            "Faust-Ag-CorrelationId")
                        if correlation_id_bytes:
                            correlation_id = want_str(correlation_id_bytes)
                if reply_to is not None:
                    await self._reply(event.key, value, reply_to,
                                      cast(str, correlation_id))
            await self._delegate_to_sinks(value)

    async def _delegate_to_sinks(self, value: Any) -> None:
        for sink in self._sinks:
            if isinstance(sink, AgentT):
                await sink.send(value=value)
            elif isinstance(sink, ChannelT):
                await cast(TopicT, sink).send(value=value)
            else:
                await maybe_async(cast(Callable, sink)(value))

    async def _reply(self, key: Any, value: Any, reply_to: str,
                     correlation_id: str) -> None:
        assert reply_to
        response = self._response_class(value)(
            key=key,
            value=value,
            correlation_id=correlation_id,
        )
        await self.app.send(
            reply_to,
            key=None,
            value=response,
        )

    def _response_class(self, value: Any) -> Type[ReqRepResponse]:
        if isinstance(value, ModelT):
            return ModelReqRepResponse
        return ReqRepResponse

    async def cast(
        self,
        value: V = None,
        *,
        key: K = None,
        partition: int = None,
        timestamp: float = None,
        headers: HeadersArg = None,
    ) -> None:
        """RPC operation: like :meth:`ask` but do not expect reply.

        Cast here is like "casting a spell", and will not expect
        a reply back from the agent.
        """
        await self.send(
            key=key,
            value=value,
            partition=partition,
            timestamp=timestamp,
            headers=headers,
        )

    async def ask(
        self,
        value: V = None,
        *,
        key: K = None,
        partition: int = None,
        timestamp: float = None,
        headers: HeadersArg = None,
        reply_to: ReplyToArg = None,
        correlation_id: str = None,
    ) -> Any:
        """RPC operation: ask agent for result of processing value.

        This version will wait until the result is available
        and return the processed value.
        """
        p = await self.ask_nowait(
            value,
            key=key,
            partition=partition,
            timestamp=timestamp,
            headers=headers,
            reply_to=reply_to or self.app.conf.reply_to,
            correlation_id=correlation_id,
            force=True,  # Send immediately, since we are waiting for result.
        )
        app = cast(_App, self.app)
        await app._reply_consumer.add(p.correlation_id, p)
        await app.maybe_start_client()
        return await p

    async def ask_nowait(
        self,
        value: V = None,
        *,
        key: K = None,
        partition: int = None,
        timestamp: float = None,
        headers: HeadersArg = None,
        reply_to: ReplyToArg = None,
        correlation_id: str = None,
        force: bool = False,
    ) -> ReplyPromise:
        """RPC operation: ask agent for result of processing value.

        This version does not wait for the result to arrive,
        but instead returns a promise of future evaluation.
        """
        if reply_to is None:
            raise TypeError("Missing reply_to argument")
        reply_to = self._get_strtopic(reply_to)
        correlation_id = correlation_id or str(uuid4())
        value, headers = self._create_req(key, value, reply_to, correlation_id,
                                          headers)
        await self.channel.send(
            key=key,
            value=value,
            partition=partition,
            timestamp=timestamp,
            headers=headers,
            force=force,
        )
        return ReplyPromise(reply_to, correlation_id)

    def _create_req(
        self,
        key: K = None,
        value: V = None,
        reply_to: ReplyToArg = None,
        correlation_id: str = None,
        headers: HeadersArg = None,
    ) -> Tuple[V, Optional[HeadersArg]]:
        if reply_to is None:
            raise TypeError("Missing reply_to argument")
        topic_name = self._get_strtopic(reply_to)
        correlation_id = correlation_id or str(uuid4())
        open_headers = prepare_headers(headers or {})
        if self.use_reply_headers:
            merge_headers(
                open_headers,
                {
                    "Faust-Ag-ReplyTo": want_bytes(topic_name),
                    "Faust-Ag-CorrelationId": want_bytes(correlation_id),
                },
            )
            return value, open_headers
        else:
            # wrap value in envelope
            req = self._request_class(value)(
                value=value,
                reply_to=topic_name,
                correlation_id=correlation_id,
            )
            return req, open_headers

    def _request_class(self, value: V) -> Type[ReqRepRequest]:
        if isinstance(value, ModelT):
            return ModelReqRepRequest
        return ReqRepRequest

    async def send(
        self,
        *,
        key: K = None,
        value: V = None,
        partition: int = None,
        timestamp: float = None,
        headers: HeadersArg = None,
        key_serializer: CodecArg = None,
        value_serializer: CodecArg = None,
        callback: MessageSentCallback = None,
        reply_to: ReplyToArg = None,
        correlation_id: str = None,
        force: bool = False,
    ) -> Awaitable[RecordMetadata]:
        """Send message to topic used by agent."""
        if reply_to:
            value, headers = self._create_req(key, value, reply_to,
                                              correlation_id, headers)
        return await self.channel.send(
            key=key,
            value=value,
            partition=partition,
            timestamp=timestamp,
            headers=headers,
            key_serializer=key_serializer,
            value_serializer=value_serializer,
            force=force,
        )

    def _get_strtopic(self, topic: Union[str, ChannelT, TopicT,
                                         AgentT]) -> str:
        if isinstance(topic, AgentT):
            return self._get_strtopic(topic.channel)
        if isinstance(topic, TopicT):
            return topic.get_topic_name()
        if isinstance(topic, ChannelT):
            raise ValueError("Channels are unnamed topics")
        return topic

    async def map(
        self,
        values: Union[AsyncIterable, Iterable],
        key: K = None,
        reply_to: ReplyToArg = None,
    ) -> AsyncIterator:  # pragma: no cover
        """RPC map operation on a list of values.

        A map operation iterates over results as they arrive.
        See :meth:`join` and :meth:`kvjoin` if you want them in order.
        """
        # Map takes only values, but can provide one key that is used for all.
        async for value in self.kvmap(((key, v) async for v in aiter(values)),
                                      reply_to):
            yield value

    async def kvmap(
        self,
        items: Union[AsyncIterable[Tuple[K, V]], Iterable[Tuple[K, V]]],
        reply_to: ReplyToArg = None,
    ) -> AsyncIterator[str]:  # pragma: no cover
        """RPC map operation on a list of ``(key, value)`` pairs.

        A map operation iterates over results as they arrive.
        See :meth:`join` and :meth:`kvjoin` if you want them in order.
        """
        # kvmap takes (key, value) pairs.
        reply_to = self._get_strtopic(reply_to or self.app.conf.reply_to)

        # BarrierState is the promise that keeps track of pending results.
        # It contains a list of individual ReplyPromises.
        barrier = BarrierState(reply_to)

        async for _ in self._barrier_send(barrier, items, reply_to):
            # Now that we've sent a message, try to see if we have any
            # replies.
            try:
                _, val = barrier.get_nowait()
            except asyncio.QueueEmpty:
                pass
            else:
                yield val
        # All the messages have been sent so finalize the barrier.
        barrier.finalize()

        # Then iterate over the results in the group.
        async for _, value in barrier.iterate():
            yield value

    async def join(
        self,
        values: Union[AsyncIterable[V], Iterable[V]],
        key: K = None,
        reply_to: ReplyToArg = None,
    ) -> List[Any]:  # pragma: no cover
        """RPC map operation on a list of values.

        A join returns the results in order, and only returns once
        all values have been processed.
        """
        return await self.kvjoin(
            ((key, value) async for value in aiter(values)),
            reply_to=reply_to,
        )

    async def kvjoin(
        self,
        items: Union[AsyncIterable[Tuple[K, V]], Iterable[Tuple[K, V]]],
        reply_to: ReplyToArg = None,
    ) -> List[Any]:  # pragma: no cover
        """RPC map operation on list of ``(key, value)`` pairs.

        A join returns the results in order, and only returns once
        all values have been processed.
        """
        reply_to = self._get_strtopic(reply_to or self.app.conf.reply_to)
        barrier = BarrierState(reply_to)

        # Map correlation_id -> index
        posindex: MutableMapping[str, int] = {
            cid: i
            async for i, cid in aenumerate(
                self._barrier_send(barrier, items, reply_to))
        }

        # All the messages have been sent so finalize the barrier.
        barrier.finalize()

        # wait until all replies received
        await barrier
        # then construct a list in the correct order.
        values: List = [None] * barrier.total
        async for correlation_id, value in barrier.iterate():
            values[posindex[correlation_id]] = value
        return values

    async def _barrier_send(
        self,
        barrier: BarrierState,
        items: Union[AsyncIterable[Tuple[K, V]], Iterable[Tuple[K, V]]],
        reply_to: ReplyToArg,
    ) -> AsyncIterator[str]:  # pragma: no cover
        # map: send many tasks to agents
        # while trying to pop incoming results off.
        key: K
        value: V
        async for key, value in aiter(items):  # type: ignore
            correlation_id = str(uuid4())
            p = await self.ask_nowait(key=key,
                                      value=value,
                                      reply_to=reply_to,
                                      correlation_id=correlation_id)
            # add reply promise to the barrier
            barrier.add(p)

            # the ReplyConsumer will call the barrier whenever a new
            # result comes in.
            app = cast(_App, self.app)
            await app.maybe_start_client()
            await app._reply_consumer.add(p.correlation_id, barrier)

            yield correlation_id

    def _repr_info(self) -> str:
        return shorten_fqdn(self.name)

    def get_topic_names(self) -> Iterable[str]:
        """Return list of topic names this agent subscribes to."""
        channel = self.channel
        if isinstance(channel, TopicT):
            return channel.topics
        return []

    @property
    def channel(self) -> ChannelT:
        """Return channel used by agent."""
        if self._channel is None:
            self._channel = self._prepare_channel(
                self._channel_arg,
                schema=self._schema,
                key_type=self._key_type,
                value_type=self._value_type,
                **self._channel_kwargs,
            )
        return self._channel

    @channel.setter
    def channel(self, channel: ChannelT) -> None:
        self._channel = channel

    @property
    def channel_iterator(self) -> AsyncIterator:
        """Return channel agent iterates over."""
        # The channel is "memoized" here, so subsequent access to
        # instance.channel_iterator will return the same value.
        if self._channel_iterator is None:
            # we do not use aiter(channel) here, because
            # that will also add it to the topic conductor too early.
            self._channel_iterator = self.channel.clone(is_iterator=False)
        return self._channel_iterator

    @channel_iterator.setter
    def channel_iterator(self, it: AsyncIterator) -> None:
        self._channel_iterator = it

    @property
    def label(self) -> str:
        """Return human-readable description of agent."""
        return self._agent_label()

    def _agent_label(self, name_suffix: str = "") -> str:
        s = f"{type(self).__name__}{name_suffix}: "
        s += f"{shorten_fqdn(qualname(self.fun))}"
        return s

    @property
    def shortlabel(self) -> str:
        """Return short description of agent."""
        return self._agent_label()
Example #32
0
class JavaSocket:
    """
    Wrapper for ZMQ socket that sends and recieves dictionaries
    """

    def __init__(self, context, port, type, debug):
        # request reply socket
        self._socket = context.socket(type)
        self._debug = debug
        # store these as wekrefs so that circular refs dont prevent garbage collection
        self._java_objects = WeakSet()
        # try:
        if type == zmq.PUSH:
            if debug:
                print("binding {}".format(port))
            self._socket.bind("tcp://127.0.0.1:{}".format(port))
        else:
            if debug:
                print("connecting {}".format(port))
            self._socket.connect("tcp://127.0.0.1:{}".format(port))
        # except Exception as e:
        #     print(e.__traceback__)
        # raise Exception('Couldnt connect or bind to port {}'.format(port))

    def _register_java_object(self, object):
        self._java_objects.add(object)

    def __del__(self):
        # make sure all shadow objects have signaled to Java side to release references before they shut down
        for java_object in self._java_objects:
            java_object._close()

    def _convert_np_to_python(self, d):
        """
        recursively search dictionary and convert any values from numpy floats/ints to
        python floats/ints so they can be json serialized
        :return:
        """
        if type(d) != dict:
            return
        for k, v in d.items():
            if isinstance(v, dict):
                self._convert_np_to_python(v)
            elif type(v) == list:
                for e in v:
                    self._convert_np_to_python(e)
            elif np.issubdtype(type(v), np.floating):
                d[k] = float(v)
            elif np.issubdtype(type(v), np.integer):
                d[k] = int(v)

    def send(self, message, timeout=0):
        if message is None:
            message = {}
        # make sure any np types convert to python types so they can be json serialized
        self._convert_np_to_python(message)
        message_string = json.dumps(message)
        if self._debug:
            print("DEBUG, sending: {}".format(message))
        if timeout == 0:
            self._socket.send(bytes(message_string, "iso-8859-1"))
        else:
            start = time.time()
            while 1000 * (time.time() - start) < timeout:
                try:
                    self._socket.send(bytes(message_string, "iso-8859-1"), flags=zmq.NOBLOCK)
                    return True
                except zmq.ZMQError:
                    pass  # ignore, keep trying
            return False

    def receive(self, timeout=0):
        if timeout == 0:
            reply = self._socket.recv()
        else:
            start = time.time()
            reply = None
            while 1000 * (time.time() - start) < timeout:
                try:
                    reply = self._socket.recv(flags=zmq.NOBLOCK)
                    if reply is not None:
                        break
                except zmq.ZMQError:
                    pass  # ignore, keep trying
            if reply is None:
                return reply
        message = json.loads(reply.decode("iso-8859-1"))
        if self._debug:
            print("DEBUG, recieved: {}".format(message))
        self._check_exception(message)
        return message

    def _check_exception(self, response):
        if "type" in response and response["type"] == "exception":
            raise Exception(response["value"])

    def close(self):
        self._socket.close()
Example #33
0
class Context(ContextBase, AttributeSetter):
    """Create a zmq Context

    A zmq Context creates sockets via its ``ctx.socket`` method.
    """
    sockopts = None
    _instance = None
    _instance_lock = Lock()
    _instance_pid = None
    _shadow = False
    _sockets = None

    def __init__(self, io_threads=1, **kwargs):
        super(Context, self).__init__(io_threads=io_threads, **kwargs)
        if kwargs.get('shadow', False):
            self._shadow = True
        else:
            self._shadow = False
        self.sockopts = {}
        self._sockets = WeakSet()

    def __del__(self):
        """deleting a Context should terminate it, without trying non-threadsafe destroy"""

        # Calling locals() here conceals issue #1167 on Windows CPython 3.5.4.
        locals()

        if not self._shadow and not _exiting:
            self.term()

    def __enter__(self):
        return self

    def __exit__(self, *args, **kwargs):
        self.term()

    def __copy__(self, memo=None):
        """Copying a Context creates a shadow copy"""
        return self.__class__.shadow(self.underlying)

    __deepcopy__ = __copy__

    @classmethod
    def shadow(cls, address):
        """Shadow an existing libzmq context
        
        address is the integer address of the libzmq context
        or an FFI pointer to it.
        
        .. versionadded:: 14.1
        """
        from zmq.utils.interop import cast_int_addr
        address = cast_int_addr(address)
        return cls(shadow=address)

    @classmethod
    def shadow_pyczmq(cls, ctx):
        """Shadow an existing pyczmq context
        
        ctx is the FFI `zctx_t *` pointer
        
        .. versionadded:: 14.1
        """
        from pyczmq import zctx
        from zmq.utils.interop import cast_int_addr

        underlying = zctx.underlying(ctx)
        address = cast_int_addr(underlying)
        return cls(shadow=address)

    # static method copied from tornado IOLoop.instance
    @classmethod
    def instance(cls, io_threads=1):
        """Returns a global Context instance.

        Most single-threaded applications have a single, global Context.
        Use this method instead of passing around Context instances
        throughout your code.

        A common pattern for classes that depend on Contexts is to use
        a default argument to enable programs with multiple Contexts
        but not require the argument for simpler applications::

            class MyClass(object):
                def __init__(self, context=None):
                    self.context = context or Context.instance()

        .. versionchanged:: 18.1

            When called in a subprocess after forking,
            a new global instance is created instead of inheriting
            a Context that won't work from the parent process.
        """
        if (cls._instance is None or cls._instance_pid != os.getpid()
                or cls._instance.closed):
            with cls._instance_lock:
                if (cls._instance is None or cls._instance_pid != os.getpid()
                        or cls._instance.closed):
                    cls._instance = cls(io_threads=io_threads)
                    cls._instance_pid = os.getpid()
        return cls._instance

    def term(self):
        """Close or terminate the context.

        Context termination is performed in the following steps:

        - Any blocking operations currently in progress on sockets open within context shall
          raise :class:`zmq.ContextTerminated`.
          With the exception of socket.close(), any further operations on sockets open within this context
          shall raise :class:`zmq.ContextTerminated`.
        - After interrupting all blocking calls, term shall block until the following conditions are satisfied:
            - All sockets open within context have been closed.
            - For each socket within context, all messages sent on the socket have either been
              physically transferred to a network peer,
              or the socket's linger period set with the zmq.LINGER socket option has expired.

        For further details regarding socket linger behaviour refer to libzmq documentation for ZMQ_LINGER.

        This can be called to close the context by hand. If this is not called,
        the context will automatically be closed when it is garbage collected.
        """
        return super(Context, self).term()

    #-------------------------------------------------------------------------
    # Hooks for ctxopt completion
    #-------------------------------------------------------------------------

    def __dir__(self):
        keys = dir(self.__class__)

        for collection in (ctx_opt_names, ):
            keys.extend(collection)
        return keys

    #-------------------------------------------------------------------------
    # Creating Sockets
    #-------------------------------------------------------------------------

    def _add_socket(self, socket):
        self._sockets.add(socket)

    def _rm_socket(self, socket):
        if self._sockets:
            self._sockets.discard(socket)

    def destroy(self, linger=None):
        """Close all sockets associated with this context and then terminate
        the context.

        .. warning::

            destroy involves calling ``zmq_close()``, which is **NOT** threadsafe.
            If there are active sockets in other threads, this must not be called.

        Parameters
        ----------

        linger : int, optional
            If specified, set LINGER on sockets prior to closing them.
        """
        if self.closed:
            return

        sockets = self._sockets
        self._sockets = WeakSet()
        for s in sockets:
            if s and not s.closed:
                if linger is not None:
                    s.setsockopt(LINGER, linger)
                s.close()

        self.term()

    @property
    def _socket_class(self):
        return Socket

    def socket(self, socket_type, **kwargs):
        """Create a Socket associated with this Context.

        Parameters
        ----------
        socket_type : int
            The socket type, which can be any of the 0MQ socket types:
            REQ, REP, PUB, SUB, PAIR, DEALER, ROUTER, PULL, PUSH, etc.

        kwargs:
            will be passed to the __init__ method of the socket class.
        """
        if self.closed:
            raise ZMQError(ENOTSUP)
        s = self._socket_class(self, socket_type, **kwargs)
        for opt, value in self.sockopts.items():
            try:
                s.setsockopt(opt, value)
            except ZMQError:
                # ignore ZMQErrors, which are likely for socket options
                # that do not apply to a particular socket type, e.g.
                # SUBSCRIBE for non-SUB sockets.
                pass
        self._add_socket(s)
        return s

    def setsockopt(self, opt, value):
        """set default socket options for new sockets created by this Context

        .. versionadded:: 13.0
        """
        self.sockopts[opt] = value

    def getsockopt(self, opt):
        """get default socket options for new sockets created by this Context

        .. versionadded:: 13.0
        """
        return self.sockopts[opt]

    def _set_attr_opt(self, name, opt, value):
        """set default sockopts as attributes"""
        if name in constants.ctx_opt_names:
            return self.set(opt, value)
        else:
            self.sockopts[opt] = value

    def _get_attr_opt(self, name, opt):
        """get default sockopts as attributes"""
        if name in constants.ctx_opt_names:
            return self.get(opt)
        else:
            if opt not in self.sockopts:
                raise AttributeError(name)
            else:
                return self.sockopts[opt]

    def __delattr__(self, key):
        """delete default sockopts as attributes"""
        key = key.upper()
        try:
            opt = getattr(constants, key)
        except AttributeError:
            raise AttributeError("no such socket option: %s" % key)
        else:
            if opt not in self.sockopts:
                raise AttributeError(key)
            else:
                del self.sockopts[opt]
Example #34
0
class Signal(object):
    """ class for signal slot concept

    Example
    -------

    A simple example for a callback is
    >>> event = Signal()
    >>> event.connect(mfunc)
    >>> # raise the signal
    >>> event("hello")
    >>>
    >>> # functions can be disconnected
    >>> even.disconnect(myfunc)

    Since weak references are used, care has to be taken with object functions

    >>> obj = MyClass()
    >>> event.connect(obj.myfunc) # works
    >>> event.connect(MyClass().myfunc) # will not work

    The second example for member functions will not work since the Signal class
    uses weakref and therefore does not increase the reference counter. MyClass()
    only exists for the time of the function call and will be deleted afterwards
    and the weakref will become invalid.

    """
    def __init__(self):
        self._functions = WeakSet()
        self._methods = WeakKeyDictionary()

    def __call__(self, *args, **kargs):
        """ raise the event """
        # Call handler functions
        for func in self._functions:
            func(*args, **kargs)

        # Call handler methods
        for obj, funcs in self._methods.items():
            for func in funcs:
                func(obj, *args, **kargs)

    def connect(self, slot):
        """ connect a function / member function to the signal """
        if inspect.ismethod(slot):
            if slot.__self__ not in self._methods:
                self._methods[slot.__self__] = set()

            self._methods[slot.__self__].add(slot.__func__)

        else:
            self._functions.add(slot)

    def disconnect(self, slot):
        """ disconnect a function from the signal """
        if inspect.ismethod(slot):
            if slot.__self__ in self._methods:
                self._methods[slot.__self__].remove(slot.__func__)
        else:
            if slot in self._functions:
                self._functions.remove(slot)

    def clear(self):
        """ remove all callbacks from the signal """
        self._functions.clear()
        self._methods.clear()
Example #35
0
class Camera(Instrument):
    """Generic class for representing cameras.
    
    This should always be subclassed in order to make a useful instrument.
    
    The minimum you should do is alter raw_snapshot to acquire and return a
    frame from the camera.  All other acquisition functions can come from that.
    If your camera also supports video streaming (for live view, for example)
    you should override     
    """

    video_priority = DumbNotifiedProperty(False)
    filename = DumbNotifiedProperty('snapshot_%d')
    """Set video_priority to True to avoid disturbing the video stream when
    taking images.  raw_snapshot may ignore the setting, but get_image and by
    extension rgb_image and gray_image will honour it."""

    parameters = None

    filter_function = None
    """This function is run on the image before it's displayed in live view.  
    It should accept, and return, an RGB image as its argument."""
    def __init__(self):
        super(Camera, self).__init__()
        self.acquisition_lock = threading.Lock()
        self._latest_frame_update_condition = threading.Condition()
        self._live_view = False
        self._frame_counter = 0
        # Ensure camera parameters get saved in the metadata.  You may want to override this in subclasses
        # to remove junk (e.g. if some of the parameters are meaningless)
        #        self.metadata_property_names = self.metadata_property_names + tuple(self.camera_parameter_names())
        self.metadata_property_names = tuple(
            self.metadata_property_names) + tuple(
                self.camera_parameter_names())
#       self.filename = 'snapshot_%d'

    def __del__(self):
        self.close()
#        super(Camera,self).__del__() #apparently not...?

    def close(self):
        """Stop communication with the camera and allow it to be re-used.
        
        override in subclass if you want to shut down hardware."""
        self.live_view = False

    def get_next_frame(self,
                       timeout=60,
                       discard_frames=0,
                       assert_live_view=True,
                       raw=True):
        """Wait for the next frame to arrive and return it.
        
        This function is mostly intended for acquiring frames from a video
        stream that's running in live view - it returns a fresh frame without
        interrupting the video.  If called with timeout=None when live view is
        false, it may take a very long time to return.
        
        @param: timeout: Maximum length of time to wait for a new frame.  None
        waits forever, but this may be a bad idea (could hang your script).
        @param: discard_frames: Wait for this many new frames before returning
        one.  This option is useful if the camera buffers frames, so you must
        wait for several frames to be acquired before getting a "fresh" one.
        The default setting of 0 means the first new frame that arrives is
        returned.
        @param: assert_live_view: If True (default) raise an assertion error if
        live view is not enabled - this function is intended only to be used
        when that is the case.
        @param: raw: The default (True) returns a raw frame - False returns the
        frame after processing by the filter function if any.
        """
        if assert_live_view:
            assert self.live_view, """Can't wait for the next frame if live view is not enabled!"""
        with self._latest_frame_update_condition:
            # We use the Condition object to block until a new frame appears
            # However we need to check that a new frame has actually been taken
            # so we use the frame counter.
            # NB the current implementation may be vulnerable to dropped frames
            # which will probably cause a timeout error.
            # Checking for frame_counter being >= target_frame is vulnerable to
            # overflow.
            target_frame = self._frame_counter + 1 + discard_frames
            expiry_time = time.time() + timeout
            while self._frame_counter != target_frame and time.time(
            ) < expiry_time:
                self._latest_frame_update_condition.wait(
                    timeout)  #wait for a new frame
            if time.time() >= expiry_time:
                raise IOError(
                    "Timed out waiting for a fresh frame from the video stream."
                )
            if raw:
                return self.latest_raw_frame
            else:
                return self.latest_frame

    def raw_snapshot(self):
        """Take a snapshot and return it.  No filtering or conversion."""
        raise NotImplementedError("Cameras must subclass raw_snapshot!")
        return True, np.zeros((640, 480, 3), dtype=np.uint8)

    def get_image(self):
        print "Warning: get_image is deprecated, use raw_image() instead."
        return self.raw_image()

    def raw_image(self, bundle_metadata=False, update_latest_frame=False):
        """Take an image from the camera, respecting video priority.
        
        If live view is enabled and video_priority is true, return the next
        frame in the video stream.  Otherwise, return a specially-acquired
        image from raw_snapshot.
        """
        frame = None
        if self.live_view and self.video_priority:
            frame = self.get_next_frame(raw=True)
        else:
            status, frame = self.raw_snapshot()
        if update_latest_frame:
            self.latest_raw_frame = frame
        # return it as an ArrayWithAttrs including self.metadata, if requested
        return self.bundle_metadata(frame, bundle_metadata)

    def color_image(self, **kwargs):
        """Get a colour image (bypass filtering, etc.)
        
        Additional keyword arguments are passed to raw_image."""
        frame = self.raw_image(**kwargs)
        try:
            assert frame.shape[2] == 3
            return frame
        except:
            try:
                assert len(frame.shape) == 2
                gray_frame = np.vstack(
                    (frame, ) * 3)  #turn gray into color by duplicating!
                if hasattr(frame, "attrs"):
                    return ArrayWithAttrs(gray_frame, attrs=frame.attrs)
                else:
                    return gray_frame
            except:
                raise Exception(
                    "Couldn't convert the camera's raw image to colour.")

    def gray_image(self, **kwargs):
        """Get a colour image (bypass filtering, etc.)
        
        Additional keyword arguments are passed to raw_image."""
        frame = self.raw_image(**kwargs)
        try:
            assert len(frame.shape) == 2
            return frame
        except:
            try:
                assert frame.shape[2] == 3
                return np.mean(frame, axis=2, dtype=frame.dtype)
            except:
                raise Exception(
                    "Couldn't convert the camera's raw image to grayscale.")

    def save_raw_image(self, update_latest_frame=True, attrs={}):
        """Save an image to the default place in the default HDF5 file."""
        d = self.create_dataset(self.filename,
                                data=self.raw_image(
                                    bundle_metadata=True,
                                    update_latest_frame=update_latest_frame))
        d.attrs.update(attrs)

    _latest_raw_frame = None

    @NotifiedProperty
    def latest_raw_frame(self):
        """The last frame acquired by the camera.  
        
        This property is particularly useful when
        live_view is enabled.  This is before processing by any filter function
        that may be in effect.  May be NxMx3 or NxM for monochrome.  To get a
        fresh frame, use raw_image().  Setting this property will update any
        preview widgets that are in use."""
        return self._latest_raw_frame

    @latest_raw_frame.setter
    def latest_raw_frame(self, frame):
        """Set the latest raw frame, and update the preview widget if any."""
        with self._latest_frame_update_condition:
            self._latest_raw_frame = frame
            self._frame_counter += 1
            self._latest_frame_update_condition.notify_all()

        # TODO: use the NotifiedProperty to do this with less code?
        if self._preview_widgets is not None:
            for w in self._preview_widgets:
                try:
                    w.update_image(self.latest_frame)
                except Exception as e:
                    print "something went wrong updating the preview widget"
                    print e

    @property
    def latest_frame(self):
        """The last frame acquired (in live view/from GUI), after filtering."""
        if self.filter_function is not None:
            return self.filter_function(self.latest_raw_frame)
        else:
            return self.latest_raw_frame

    def update_latest_frame(self, frame=None):
        """Take a new frame and store it as the "latest frame"
        
        Returns the image as displayed, including filters, etc.
        This should rarely be used - raw_image, color_image and gray_image are
        the preferred way of acquiring data.  If you supply an image, it will
        use that image as if it was the most recent colour image to be 
        acquired.
        
        Unless you need the filtered image, you should probably use 
        raw_image, color_image or gray_image.
        """
        if frame is None:
            frame = self.color_image()
        if frame is not None:
            self.latest_raw_frame = frame

            return self.latest_frame
        else:
            print "Failed to get an image from the camera"

    def camera_parameter_names(self):
        """Return a list of names of parameters that may be set/read.
        
        This will list the names of all the members of this class that are 
        `CameraParameter`s - you should define one of these for each of the 
        properties of the camera you'd like to expose.
        
        If you need to support dynamic properties, I suggest you use a class
        factory, and add CameraParameters at runtime.  You could do this from
        within the class, but that's a courageous move.
        
        If you need more sophisticated control, I suggest subclassing
        `CameraParameter`, though I can't currently see how that would help...
        """
        # first, identify all the CameraParameter properties we've got
        p_list = []
        for p in dir(self.__class__):
            try:
                if isinstance(getattr(self.__class__, p), CameraParameter):
                    getattr(self, p)
                    p_list.append(p)
            except:
                delattr(self.__class__, p)
                pass
        return p_list

#     return [p for p in dir(self.__class__)
#              if isinstance(getattr(self.__class__, p), CameraParameter)]

    def get_camera_parameter(self, parameter_name):
        """Return the named property from the camera"""
        raise NotImplementedError(
            "You must override get_camera_parameter to use it")

    def set_camera_parameter(self, parameter_name, value):
        """Return the named property from the camera"""
        raise NotImplementedError(
            "You must override set_camera_parameter to use it")

    _live_view = False

    @NotifiedProperty
    def live_view(self):
        """Whether the camera is currently streaming and displaying video"""
        return self._live_view

    @live_view.setter
    def live_view(self, live_view):
        """Turn live view on and off.
        
        This is used to start and stop streaming of the camera feed.  The
        default implementation just repeatedly takes snapshots, but subclasses
        are encouraged to override that behaviour by starting/stopping a stream
        and using a callback function to update self.latest_raw_frame."""
        if live_view == True:
            if self._live_view:
                return  # do nothing if it's going already.
            print "starting live view thread"
            try:
                self._frame_counter = 0
                self._live_view_stop_event = threading.Event()
                self._live_view_thread = threading.Thread(
                    target=self._live_view_function)
                self._live_view_thread.start()
                self._live_view = True
            except AttributeError as e:  #if any of the attributes aren't there
                print "Error:", e
        else:
            if not self._live_view:
                return  # do nothing if it's not running.
            print "stopping live view thread"
            try:
                self._live_view_stop_event.set()
                self._live_view_thread.join()
                del (self._live_view_stop_event, self._live_view_thread)
                self._live_view = False
            except AttributeError:
                raise Exception(
                    "Tried to stop live view but it doesn't appear to be running!"
                )

    def _live_view_function(self):
        """This function should only EVER be executed by _live_view_changed.
        
        Loop until the event tells us to stop, constantly taking snapshots.
        Ideally you should override live_view to start and stop streaming
        from the camera, using a callback function to update latest_raw_frame.
        """
        while not self._live_view_stop_event.wait(timeout=0.1):
            success, frame = self.raw_snapshot()
            self.update_latest_frame(frame)

    legacy_click_callback = None

    def set_legacy_click_callback(self, function):
        """Set a function to be called when the image is clicked.
        
        Warning: this is only for compatibility with old code and will be removed
        once camera_stage_mapper is updated!
        """
        self.legacy_click_callback = function
        if self._preview_widgets is not None:
            for w in self._preview_widgets:
                w.add_legacy_click_callback(self.legacy_click_callback)

    _preview_widgets = None

    def get_preview_widget(self):
        """A Qt Widget that can be used as a viewfinder for the camera.
        
        In live mode, this is continuously updated.  It's also updated whenever
        a snapshot is taken using update_latest_frame.  Currently this returns
        a single widget instance - in future it might be able to generate (and
        keep updated) multiple widgets."""
        if self._preview_widgets is None:
            self._preview_widgets = WeakSet()
        new_widget = CameraPreviewWidget()
        self._preview_widgets.add(new_widget)
        if self.legacy_click_callback is not None:
            new_widget.add_legacy_click_callback(self.legacy_click_callback)
        return new_widget

    def get_control_widget(self):
        """Return a widget that contains the camera controls but no image."""
        return CameraControlWidget(self)

    def get_parameters_widget(self):
        """Return a widget that controls the camera's settings."""
        return CameraParametersWidget(self)

    def get_qt_ui(self, control_only=False, parameters_only=False):
        """Create a QWidget that controls the camera.
        
        Specifying control_only=True returns just the controls for the camera.
        Otherwise, you get both the controls and a preview window.
        """
        if control_only:
            return self.get_control_widget()
        elif parameters_only:
            return self.get_parameters_widget(self)
        else:
            return CameraUI(self)
Example #36
0
class TileableData(EntityData, Tileable):
    __slots__ = '_cix', '_entities'
    _no_copy_attrs_ = SerializableWithKey._no_copy_attrs_ | {'_cix'}

    # optional fields
    # `nsplits` means the sizes of chunks for each dimension
    _nsplits = TupleField('nsplits',
                          ValueType.tuple(ValueType.uint64),
                          on_serialize=on_serialize_nsplits)

    def __init__(self, *args, **kwargs):
        if kwargs.get('_nsplits', None) is not None:
            kwargs['_nsplits'] = tuple(tuple(s) for s in kwargs['_nsplits'])

        super().__init__(*args, **kwargs)

        if hasattr(self, '_chunks') and self._chunks:
            self._chunks = sorted(self._chunks, key=attrgetter('index'))

        self._entities = WeakSet()

    @property
    def chunk_shape(self):
        if hasattr(self, '_nsplits') and self._nsplits is not None:
            return tuple(map(len, self._nsplits))

    @property
    def chunks(self) -> List["Chunk"]:
        return getattr(self, '_chunks', None)

    @property
    def nsplits(self):
        return getattr(self, '_nsplits', None)

    @nsplits.setter
    def nsplits(self, new_nsplits):
        self._nsplits = new_nsplits

    @property
    def params(self) -> dict:
        # params return the properties which useful to rebuild a new tileable object
        return dict()

    @property
    def cix(self):
        if self.ndim == 0:
            return ChunksIndexer(self)

        try:
            if getattr(self, '_cix', None) is None:
                self._cix = ChunksIndexer(self)
            return self._cix
        except (TypeError, ValueError):
            return ChunksIndexer(self)

    @property
    def entities(self):
        return self._entities

    def is_coarse(self):
        return not hasattr(self, '_chunks') or self._chunks is None or len(
            self._chunks) == 0

    @enter_build_mode
    def attach(self, entity):
        self._entities.add(entity)

    @enter_build_mode
    def detach(self, entity):
        self._entities.discard(entity)

    def execute(self, session=None, **kw):
        from .session import Session

        if session is None:
            session = Session.default_or_local()
        return session.run(self, **kw)

    def fetch(self, session=None, **kw):
        from .session import Session

        if session is None:
            session = Session.default_or_local()
        return session.fetch(self, **kw)

    def _set_execute_session(self, session):
        _cleaner.register(self, session)

    _execute_session = property(fset=_set_execute_session)
Example #37
0
class Signal:
    ##  Signal types.
    #   These indicate the type of a signal, that is, how the signal handles calling the connected
    #   slots. 
    #   - Direct connections immediately call the connected slots from the thread that called emit(). 
    #   - Auto connections will push the call onto the event loop if the current thread is
    #     not the main thread, but make a direct call if it is. 
    #   - Queued connections will always push
    #     the call on to the event loop.
    Direct = 1
    Auto = 2
    Queued = 3

    ##  Initialize the instance.
    #
    #   \param kwargs Keyword arguments.
    #                 Possible keywords:
    #                 - type: The signal type. Defaults to Direct.
    def __init__(self, **kwargs):
        self.__functions = WeakSet()
        self.__methods = WeakKeyDictionary()
        self.__signals = WeakSet()
        self.__type = kwargs.get("type", Signal.Auto)

        self.__emitting = False
        self.__connect_queue = []
        self.__disconnect_queue = []
    
    ##  \exception NotImplementedError
    def __call__(self):
        raise NotImplementedError("Call emit() to emit a signal")
    
    ##  Get type of the signal
    #   \return \type{int} Direct(1), Auto(2) or Queued(3)
    def getType(self):
        return self.__type

    ##  Emit the signal which indirectly calls all of the connected slots.
    #
    #   \param args The positional arguments to pass along.
    #   \param kargs The keyword arguments to pass along.
    #
    #   \note If the Signal type is Queued and this is not called from the application thread
    #   the call will be posted as an event to the application main thread, which means the
    #   function will be called on the next application event loop tick.
    def emit(self, *args, **kargs):
        try:
            if self.__type == Signal.Queued:
                Signal._app.functionEvent(CallFunctionEvent(self.emit, args, kargs))
                return

            if self.__type == Signal.Auto:
                if threading.current_thread() is not Signal._app.getMainThread():
                    Signal._app.functionEvent(CallFunctionEvent(self.emit, args, kargs))
                    return
        except AttributeError: # If Signal._app is not set
            return

        self.__emitting = True

        # Call handler functions
        for func in self.__functions:
            func(*args, **kargs)

        # Call handler methods
        for dest, funcs in self.__methods.items():
            for func in funcs:
                func(dest, *args, **kargs)

        # Emit connected signals
        for signal in self.__signals:
            signal.emit(*args, **kargs)

        self.__emitting = False
        for connector in self.__connect_queue:
            self.connect(connector)
        self.__connect_queue.clear()
        for connector in self.__disconnect_queue:
            self.disconnect(connector)
        self.__connect_queue.clear()

    ##  Connect to this signal.
    #   \param connector The signal or slot (function) to connect.
    def connect(self, connector):
        if self.__emitting:
            # When we try to connect to a signal we change the dictionary of connectors.
            # This will cause an Exception since we are iterating over a dictionary that changed.
            # So instead, defer the connections until after we are done emitting.
            self.__connect_queue.append(connector)
            return

        if type(connector) == Signal:
            if connector == self:
                return
            self.__signals.add(connector)
        elif inspect.ismethod(connector):
            if connector.__self__ not in self.__methods:
                self.__methods[connector.__self__] = set()

            self.__methods[connector.__self__].add(connector.__func__)
        else:
            self.__functions.add(connector)

    ##  Disconnect from this signal.
    #   \param connector The signal or slot (function) to disconnect.
    def disconnect(self, connector):
        if self.__emitting:
            # See above.
            self.__disconnect_queue.append(connector)
            return

        try:
            if connector in self.__signals:
                self.__signals.remove(connector)
            elif inspect.ismethod(connector) and connector.__self__ in self.__methods:
                self.__methods[connector.__self__].remove(connector.__func__)
            else:
                if connector in self.__functions:
                    self.__functions.remove(connector)

        except KeyError: #Ignore errors when connector is not connected to this signal.
            pass

    ##  Disconnect all connected slots.
    def disconnectAll(self):
        if self.__emitting:
            raise RuntimeError("Tried to disconnect signal while signal is being emitted")

        self.__functions.clear()
        self.__methods.clear()
        self.__signals.clear()

    ##  private:

    #   To avoid circular references when importing Application, this should be
    #   set by the Application instance.
    _app = None
Example #38
0
class TestWeakSet(unittest.TestCase):
    def setUp(self):
        # need to keep references to them
        self.items = [ustr(c) for c in ('a', 'b', 'c')]
        self.items2 = [ustr(c) for c in ('x', 'y', 'z')]
        self.ab_items = [ustr(c) for c in 'ab']
        self.abcde_items = [ustr(c) for c in 'abcde']
        self.def_items = [ustr(c) for c in 'def']
        self.ab_weakset = WeakSet(self.ab_items)
        self.abcde_weakset = WeakSet(self.abcde_items)
        self.def_weakset = WeakSet(self.def_items)
        self.letters = [ustr(c) for c in string.ascii_letters]
        self.s = WeakSet(self.items)
        self.d = dict.fromkeys(self.items)
        self.obj = ustr('F')
        self.fs = WeakSet([self.obj])

    def test_methods(self):
        weaksetmethods = dir(WeakSet)
        for method in dir(set):
            if method == 'test_c_api' or method.startswith('_'):
                continue
            self.assertIn(method, weaksetmethods,
                          "WeakSet missing method " + method)

    def test_new_or_init(self):
        self.assertRaises(TypeError, WeakSet, [], 2)

    def test_len(self):
        self.assertEqual(len(self.s), len(self.d))
        self.assertEqual(len(self.fs), 1)
        del self.obj
        self.assertEqual(len(self.fs), 0)

    def test_contains(self):
        for c in self.letters:
            self.assertEqual(c in self.s, c in self.d)
        # 1 is not weakref'able, but that TypeError is caught by __contains__
        self.assertNotIn(1, self.s)
        self.assertIn(self.obj, self.fs)
        del self.obj
        self.assertNotIn(ustr('F'), self.fs)

    def test_union(self):
        u = self.s.union(self.items2)
        for c in self.letters:
            self.assertEqual(c in u, c in self.d or c in self.items2)
        self.assertEqual(self.s, WeakSet(self.items))
        self.assertEqual(type(u), WeakSet)
        self.assertRaises(TypeError, self.s.union, [[]])
        for C in set, frozenset, dict.fromkeys, list, tuple:
            x = WeakSet(self.items + self.items2)
            c = C(self.items2)
            self.assertEqual(self.s.union(c), x)
            del c
        self.assertEqual(len(u), len(self.items) + len(self.items2))
        self.items2.pop()
        gc.collect()
        self.assertEqual(len(u), len(self.items) + len(self.items2))

    def test_or(self):
        i = self.s.union(self.items2)
        self.assertEqual(self.s | set(self.items2), i)
        self.assertEqual(self.s | frozenset(self.items2), i)

    def test_intersection(self):
        s = WeakSet(self.letters)
        i = s.intersection(self.items2)
        for c in self.letters:
            self.assertEqual(c in i, c in self.items2 and c in self.letters)
        self.assertEqual(s, WeakSet(self.letters))
        self.assertEqual(type(i), WeakSet)
        for C in set, frozenset, dict.fromkeys, list, tuple:
            x = WeakSet([])
            self.assertEqual(i.intersection(C(self.items)), x)
        self.assertEqual(len(i), len(self.items2))
        self.items2.pop()
        gc.collect()
        self.assertEqual(len(i), len(self.items2))

    def test_isdisjoint(self):
        self.assertTrue(self.s.isdisjoint(WeakSet(self.items2)))
        self.assertTrue(not self.s.isdisjoint(WeakSet(self.letters)))

    def test_and(self):
        i = self.s.intersection(self.items2)
        self.assertEqual(self.s & set(self.items2), i)
        self.assertEqual(self.s & frozenset(self.items2), i)

    def test_difference(self):
        i = self.s.difference(self.items2)
        for c in self.letters:
            self.assertEqual(c in i, c in self.d and c not in self.items2)
        self.assertEqual(self.s, WeakSet(self.items))
        self.assertEqual(type(i), WeakSet)
        self.assertRaises(TypeError, self.s.difference, [[]])

    def test_sub(self):
        i = self.s.difference(self.items2)
        self.assertEqual(self.s - set(self.items2), i)
        self.assertEqual(self.s - frozenset(self.items2), i)

    def test_symmetric_difference(self):
        i = self.s.symmetric_difference(self.items2)
        for c in self.letters:
            self.assertEqual(c in i, (c in self.d) ^ (c in self.items2))
        self.assertEqual(self.s, WeakSet(self.items))
        self.assertEqual(type(i), WeakSet)
        self.assertRaises(TypeError, self.s.symmetric_difference, [[]])
        self.assertEqual(len(i), len(self.items) + len(self.items2))
        self.items2.pop()
        gc.collect()
        self.assertEqual(len(i), len(self.items) + len(self.items2))

    def test_xor(self):
        i = self.s.symmetric_difference(self.items2)
        self.assertEqual(self.s ^ set(self.items2), i)
        self.assertEqual(self.s ^ frozenset(self.items2), i)

    def test_sub_and_super(self):
        self.assertTrue(self.ab_weakset <= self.abcde_weakset)
        self.assertTrue(self.abcde_weakset <= self.abcde_weakset)
        self.assertTrue(self.abcde_weakset >= self.ab_weakset)
        self.assertFalse(self.abcde_weakset <= self.def_weakset)
        self.assertFalse(self.abcde_weakset >= self.def_weakset)
        self.assertTrue(set('a').issubset('abc'))
        self.assertTrue(set('abc').issuperset('a'))
        self.assertFalse(set('a').issubset('cbs'))
        self.assertFalse(set('cbs').issuperset('a'))

    def test_lt(self):
        self.assertTrue(self.ab_weakset < self.abcde_weakset)
        self.assertFalse(self.abcde_weakset < self.def_weakset)
        self.assertFalse(self.ab_weakset < self.ab_weakset)
        self.assertFalse(WeakSet() < WeakSet())

    def test_gt(self):
        self.assertTrue(self.abcde_weakset > self.ab_weakset)
        self.assertFalse(self.abcde_weakset > self.def_weakset)
        self.assertFalse(self.ab_weakset > self.ab_weakset)
        self.assertFalse(WeakSet() > WeakSet())

    def test_gc(self):
        # Create a nest of cycles to exercise overall ref count check
        s = WeakSet(Foo() for i in range(1000))
        for elem in s:
            elem.cycle = s
            elem.sub = elem
            elem.set = WeakSet([elem])

    def test_subclass_with_custom_hash(self):
        # Bug #1257731
        class H(WeakSet):
            def __hash__(self):
                return int(id(self) & 0x7fffffff)

        s = H()
        f = set()
        f.add(s)
        self.assertIn(s, f)
        f.remove(s)
        f.add(s)
        f.discard(s)

    def test_init(self):
        s = WeakSet()
        s.__init__(self.items)
        self.assertEqual(s, self.s)
        s.__init__(self.items2)
        self.assertEqual(s, WeakSet(self.items2))
        self.assertRaises(TypeError, s.__init__, s, 2)
        self.assertRaises(TypeError, s.__init__, 1)

    def test_constructor_identity(self):
        s = WeakSet(self.items)
        t = WeakSet(s)
        self.assertNotEqual(id(s), id(t))

    def test_hash(self):
        self.assertRaises(TypeError, hash, self.s)

    def test_clear(self):
        self.s.clear()
        self.assertEqual(self.s, WeakSet([]))
        self.assertEqual(len(self.s), 0)

    def test_copy(self):
        dup = self.s.copy()
        self.assertEqual(self.s, dup)
        self.assertNotEqual(id(self.s), id(dup))

    def test_add(self):
        x = ustr('Q')
        self.s.add(x)
        self.assertIn(x, self.s)
        dup = self.s.copy()
        self.s.add(x)
        self.assertEqual(self.s, dup)
        self.assertRaises(TypeError, self.s.add, [])
        self.fs.add(Foo())
        self.assertTrue(len(self.fs) == 1)
        self.fs.add(self.obj)
        self.assertTrue(len(self.fs) == 1)

    def test_remove(self):
        x = ustr('a')
        self.s.remove(x)
        self.assertNotIn(x, self.s)
        self.assertRaises(KeyError, self.s.remove, x)
        self.assertRaises(TypeError, self.s.remove, [])

    def test_discard(self):
        a, q = ustr('a'), ustr('Q')
        self.s.discard(a)
        self.assertNotIn(a, self.s)
        self.s.discard(q)
        self.assertRaises(TypeError, self.s.discard, [])

    def test_pop(self):
        for i in range(len(self.s)):
            elem = self.s.pop()
            self.assertNotIn(elem, self.s)
        self.assertRaises(KeyError, self.s.pop)

    def test_update(self):
        retval = self.s.update(self.items2)
        self.assertEqual(retval, None)
        for c in (self.items + self.items2):
            self.assertIn(c, self.s)
        self.assertRaises(TypeError, self.s.update, [[]])

    def test_update_set(self):
        self.s.update(set(self.items2))
        for c in (self.items + self.items2):
            self.assertIn(c, self.s)

    def test_ior(self):
        self.s |= set(self.items2)
        for c in (self.items + self.items2):
            self.assertIn(c, self.s)

    def test_intersection_update(self):
        retval = self.s.intersection_update(self.items2)
        self.assertEqual(retval, None)
        for c in (self.items + self.items2):
            if c in self.items2 and c in self.items:
                self.assertIn(c, self.s)
            else:
                self.assertNotIn(c, self.s)
        self.assertRaises(TypeError, self.s.intersection_update, [[]])

    def test_iand(self):
        self.s &= set(self.items2)
        for c in (self.items + self.items2):
            if c in self.items2 and c in self.items:
                self.assertIn(c, self.s)
            else:
                self.assertNotIn(c, self.s)

    def test_difference_update(self):
        retval = self.s.difference_update(self.items2)
        self.assertEqual(retval, None)
        for c in (self.items + self.items2):
            if c in self.items and c not in self.items2:
                self.assertIn(c, self.s)
            else:
                self.assertNotIn(c, self.s)
        self.assertRaises(TypeError, self.s.difference_update, [[]])
        self.assertRaises(TypeError, self.s.symmetric_difference_update, [[]])

    def test_isub(self):
        self.s -= set(self.items2)
        for c in (self.items + self.items2):
            if c in self.items and c not in self.items2:
                self.assertIn(c, self.s)
            else:
                self.assertNotIn(c, self.s)

    def test_symmetric_difference_update(self):
        retval = self.s.symmetric_difference_update(self.items2)
        self.assertEqual(retval, None)
        for c in (self.items + self.items2):
            if (c in self.items) ^ (c in self.items2):
                self.assertIn(c, self.s)
            else:
                self.assertNotIn(c, self.s)
        self.assertRaises(TypeError, self.s.symmetric_difference_update, [[]])

    def test_ixor(self):
        self.s ^= set(self.items2)
        for c in (self.items + self.items2):
            if (c in self.items) ^ (c in self.items2):
                self.assertIn(c, self.s)
            else:
                self.assertNotIn(c, self.s)

    def test_inplace_on_self(self):
        t = self.s.copy()
        t |= t
        self.assertEqual(t, self.s)
        t &= t
        self.assertEqual(t, self.s)
        t -= t
        self.assertEqual(t, WeakSet())
        t = self.s.copy()
        t ^= t
        self.assertEqual(t, WeakSet())

    def test_eq(self):
        # issue 5964
        self.assertTrue(self.s == self.s)
        self.assertTrue(self.s == WeakSet(self.items))
        self.assertFalse(self.s == set(self.items))
        self.assertFalse(self.s == list(self.items))
        self.assertFalse(self.s == tuple(self.items))
        self.assertFalse(self.s == WeakSet([Foo]))
        self.assertFalse(self.s == 1)

    def test_ne(self):
        self.assertTrue(self.s != set(self.items))
        s1 = WeakSet()
        s2 = WeakSet()
        self.assertFalse(s1 != s2)

    def test_weak_destroy_while_iterating(self):
        # Issue #7105: iterators shouldn't crash when a key is implicitly removed
        # Create new items to be sure no-one else holds a reference
        items = [ustr(c) for c in ('a', 'b', 'c')]
        s = WeakSet(items)
        it = iter(s)
        next(it)  # Trigger internal iteration
        # Destroy an item
        del items[-1]
        gc.collect()  # just in case
        # We have removed either the first consumed items, or another one
        self.assertIn(len(list(it)), [len(items), len(items) - 1])
        del it
        # The removal has been committed
        self.assertEqual(len(s), len(items))

    def test_weak_destroy_and_mutate_while_iterating(self):
        # Issue #7105: iterators shouldn't crash when a key is implicitly removed
        items = [ustr(c) for c in string.ascii_letters]
        s = WeakSet(items)

        @contextlib.contextmanager
        def testcontext():
            try:
                it = iter(s)
                # Start iterator
                yielded = ustr(str(next(it)))
                # Schedule an item for removal and recreate it
                u = ustr(str(items.pop()))
                if yielded == u:
                    # The iterator still has a reference to the removed item,
                    # advance it (issue #20006).
                    next(it)
                gc.collect()  # just in case
                yield u
            finally:
                it = None  # should commit all removals

        with testcontext() as u:
            self.assertNotIn(u, s)
        with testcontext() as u:
            self.assertRaises(KeyError, s.remove, u)
        self.assertNotIn(u, s)
        with testcontext() as u:
            s.add(u)
        self.assertIn(u, s)
        t = s.copy()
        with testcontext() as u:
            s.update(t)
        self.assertEqual(len(s), len(t))
        with testcontext() as u:
            s.clear()
        self.assertEqual(len(s), 0)

    def test_len_cycles(self):
        N = 20
        items = [RefCycle() for i in range(N)]
        s = WeakSet(items)
        del items
        it = iter(s)
        try:
            next(it)
        except StopIteration:
            pass
        gc.collect()
        n1 = len(s)
        del it
        gc.collect()
        n2 = len(s)
        # one item may be kept alive inside the iterator
        self.assertIn(n1, (0, 1))
        self.assertEqual(n2, 0)

    def test_len_race(self):
        # Extended sanity checks for len() in the face of cyclic collection
        self.addCleanup(gc.set_threshold, *gc.get_threshold())
        for th in range(1, 100):
            N = 20
            gc.collect(0)
            gc.set_threshold(th, th, th)
            items = [RefCycle() for i in range(N)]
            s = WeakSet(items)
            del items
            # All items will be collected at next garbage collection pass
            it = iter(s)
            try:
                next(it)
            except StopIteration:
                pass
            n1 = len(s)
            del it
            n2 = len(s)
            self.assertGreaterEqual(n1, 0)
            self.assertLessEqual(n1, N)
            self.assertGreaterEqual(n2, 0)
            self.assertLessEqual(n2, n1)

    def test_repr(self):
        assert repr(self.s) == repr(self.s.data)

    def test_abc(self):
        self.assertIsInstance(self.s, Set)
        self.assertIsInstance(self.s, MutableSet)
Example #39
0
class Publisher(object):
    """The publish part of a pub-sub broadcast system.

    Publish-subscribe is actually an improper name, as there's just one
    "topic", making it a simple broadcast system. The publisher class
    is responsible for receiving messages to be sent, keeping them in
    a cache for a while, instantiating subscribers, each with its own
    queue, and pushing new messages to all these queues.

    """
    def __init__(self, size):
        """Instantiate a new publisher.

        size (int): the number of messages to keep in cache.

        """
        # We use a deque as it's efficient to add messages to one end
        # and have the ones at the other end be dropped when the total
        # number exceeds the given limit.
        self._cache = deque(maxlen=size)
        # We use a WeakSet as we want queues to be vanish automatically
        # when no one else is using (i.e. fetching from) them.
        self._sub_queues = WeakSet()

    def put(self, event, data):
        """Dispatch a new item to all subscribers.

        See format_event for details about the parameters.

        event (unicode): the type of event the client will receive.
        data (unicode): the associated data.

        """
        # Number of microseconds since epoch.
        key = int(time.time() * 1000000)
        msg = format_event("%x" % key, event, data)
        # Put into cache.
        self._cache.append((key, msg))
        # Send to all subscribers.
        for queue in self._sub_queues:
            queue.put(msg)

    def get_subscriber(self, last_event_id=None):
        """Obtain a new subscriber.

        The returned subscriber will receive all messages after the one
        with the given index (if they are still in the cache).

        last_event_id (unicode|None): the ID of the last message the
            client did receive, to request the one generated since
            then to be sent again. If not given no past message will
            be sent.

        return (Subscriber): a new subscriber instance.

        """
        queue = Queue()
        # If a valid last_event_id is provided see if cache can supply
        # missed events.
        if last_event_id is not None and \
                re.match("^[0-9A-Fa-f]+$", last_event_id):
            last_event_key = int(last_event_id, 16)
            if len(self._cache) > 0 and last_event_key >= self._cache[0][0]:
                # All missed events are in cache.
                for key, msg in self._cache:
                    if key > last_event_key:
                        queue.put(msg)
            else:
                # Some events may be missing. Ask to reinit.
                queue.put(b"event:reinit\n\n")
        # Store the queue and return a subscriber bound to it.
        self._sub_queues.add(queue)
        return Subscriber(queue)
Example #40
0
class VersioningManager(object):
    _actor_cls = None

    def __init__(
        self,
        actor_cls=None,
        schema_name=None,
        use_statement_level_triggers=True
    ):
        if actor_cls is not None:
            self._actor_cls = actor_cls
        self.values = {}
        self.listeners = (
            (
                orm.mapper,
                'instrument_class',
                self.instrument_versioned_classes
            ),
            (
                orm.mapper,
                'after_configured',
                self.configure_versioned_classes
            ),
            (
                orm.session.Session,
                'before_flush',
                self.receive_before_flush,
            ),
        )
        self.schema_name = schema_name
        self.table_listeners = self.get_table_listeners()
        self.pending_classes = WeakSet()
        self.cached_ddls = {}
        self.use_statement_level_triggers = use_statement_level_triggers

    def get_transaction_values(self):
        return self.values

    @contextmanager
    def disable(self, session):
        session.execute(
            "SET LOCAL postgresql_audit.enable_versioning = 'false'"
        )
        try:
            yield
        finally:
            session.execute(
                "SET LOCAL postgresql_audit.enable_versioning = 'true'"
            )

    def render_tmpl(self, tmpl_name):
        file_contents = read_file(
            'templates/{}'.format(tmpl_name)
        ).replace('%', '%%').replace('$$', '$$$$')
        tmpl = string.Template(file_contents)
        context = dict(schema_name=self.schema_name)

        if self.schema_name is None:
            context['schema_prefix'] = ''
            context['revoke_cmd'] = ''
        else:
            context['schema_prefix'] = '{}.'.format(self.schema_name)
            context['revoke_cmd'] = (
                'REVOKE ALL ON {schema_prefix}activity FROM public;'
            ).format(**context)

        temp = tmpl.substitute(**context)
        return temp

    def create_operators(self, target, bind, **kwargs):
        if bind.dialect.server_version_info < (9, 5, 0):
            StatementExecutor(self.render_tmpl('operators_pre95.sql'))(
                target, bind, **kwargs
            )
        if bind.dialect.server_version_info < (9, 6, 0):
            StatementExecutor(self.render_tmpl('operators_pre96.sql'))(
                target, bind, **kwargs
            )
        if bind.dialect.server_version_info < (10, 0):
            operators_template = self.render_tmpl('operators_pre100.sql')
            StatementExecutor(operators_template)(target, bind, **kwargs)
        operators_template = self.render_tmpl('operators.sql')
        StatementExecutor(operators_template)(target, bind, **kwargs)

    def create_audit_table(self, target, bind, **kwargs):
        sql = ''
        if (
            self.use_statement_level_triggers and
            bind.dialect.server_version_info >= (10, 0)
        ):
            sql += self.render_tmpl('create_activity_stmt_level.sql')
            sql += self.render_tmpl('audit_table_stmt_level.sql')
        else:
            sql += self.render_tmpl('create_activity_row_level.sql')
            sql += self.render_tmpl('audit_table_row_level.sql')
        StatementExecutor(sql)(target, bind, **kwargs)

    def get_table_listeners(self):
        listeners = {'audit_transactions': []}

        listeners['audit_activities'] = [
            ('after_create', sa.schema.DDL(
                self.render_tmpl('jsonb_change_key_name.sql')
            )),
            ('after_create', self.create_audit_table),
            ('after_create', self.create_operators)
        ]
        if self.schema_name is not None:
            listeners['audit_transactions'] = [
                ('before_create', sa.schema.DDL(
                    self.render_tmpl('create_schema.sql')
                )),
                ('after_drop', sa.schema.DDL(
                    self.render_tmpl('drop_schema.sql')
                )),
            ]
        return listeners

    def audit_table(self, table, exclude_columns=None):
        args = [table.name]
        if exclude_columns:
            for column in exclude_columns:
                if column not in table.c:
                    raise ImproperlyConfigured(
                        "Could not configure versioning. Table '{}'' does "
                        "not have a column named '{}'.".format(
                            table.name, column
                        )
                    )
            args.append(array(exclude_columns))

        if self.schema_name is None:
            func = sa.func.audit_table
        else:
            func = getattr(getattr(sa.func, self.schema_name), 'audit_table')
        query = sa.select([func(*args)])
        if query not in cached_statements:
            cached_statements[query] = StatementExecutor(query)
        listener = (table, 'after_create', cached_statements[query])
        if not sa.event.contains(*listener):
            sa.event.listen(*listener)

    def set_activity_values(self, session):
        dialect = session.bind.engine.dialect
        table = self.transaction_cls.__table__

        if not isinstance(dialect, PGDialect):
            warnings.warn(
                '"{0}" is not a PostgreSQL dialect. No versioning data will '
                'be saved.'.format(dialect.__class__),
                RuntimeWarning
            )
            return

        values = convert_callables(self.get_transaction_values())
        if values:
            values['native_transaction_id'] = sa.func.txid_current()
            values['issued_at'] = sa.text("now() AT TIME ZONE 'UTC'")
            stmt = (
                insert(table)
                .values(**values)
                .on_conflict_do_nothing(
                    constraint='audit_transactions_unique_native_tx_id'
                )
            )
            session.execute(stmt)

    def modified_columns(self, obj):
        columns = set()
        mapper = sa.inspect(obj.__class__)
        for key, attr in sa.inspect(obj).attrs.items():
            if key in mapper.synonyms.keys():
                continue
            prop = getattr(obj.__class__, key).property
            if attr.history.has_changes():
                columns |= set(
                    prop.columns
                    if isinstance(prop, sa.orm.ColumnProperty)
                    else
                    [local for local, remote in prop.local_remote_pairs]
                )
        return columns

    def is_modified(self, obj_or_session):
        if hasattr(obj_or_session, '__mapper__'):
            if not hasattr(obj_or_session, '__versioned__'):
                raise ClassNotVersioned(obj_or_session.__class__.__name__)
            excluded = obj_or_session.__versioned__.get('exclude', [])
            return bool(
                set([
                    column.name
                    for column in self.modified_columns(obj_or_session)
                ]) - set(excluded)
            )
        else:
            return any(
                self.is_modified(entity) or entity in obj_or_session.deleted
                for entity in obj_or_session
                if hasattr(entity, '__versioned__')
            )

    def receive_before_flush(self, session, flush_context, instances):
        if self.is_modified(session):
            self.set_activity_values(session)

    def instrument_versioned_classes(self, mapper, cls):
        """
        Collect versioned class and add it to pending_classes list.

        :mapper mapper: SQLAlchemy mapper object
        :cls cls: SQLAlchemy declarative class
        """
        if hasattr(cls, '__versioned__') and cls not in self.pending_classes:
            self.pending_classes.add(cls)

    def configure_versioned_classes(self):
        """
        Configures all versioned classes that were collected during
        instrumentation process.
        """
        for cls in self.pending_classes:
            self.audit_table(cls.__table__, cls.__versioned__.get('exclude'))
        assign_actor(self.base, self.transaction_cls, self.actor_cls)

    def attach_table_listeners(self):
        for values in self.table_listeners['audit_transactions']:
            sa.event.listen(self.transaction_cls.__table__, *values)
        for values in self.table_listeners['audit_activities']:
            sa.event.listen(self.activity_cls.__table__, *values)

    def remove_table_listeners(self):
        for values in self.table_listeners['audit_transactions']:
            sa.event.remove(self.transaction_cls.__table__, *values)
        for values in self.table_listeners['audit_activities']:
            sa.event.remove(self.activity_cls.__table__, *values)

    @property
    def actor_cls(self):
        if isinstance(self._actor_cls, str):
            if not self.base:
                raise ImproperlyConfigured(
                    'This manager does not have declarative base set up yet. '
                    'Call init method to set up this manager.'
                )
            registry = self.base._decl_class_registry
            try:
                return registry[self._actor_cls]
            except KeyError:
                raise ImproperlyConfigured(
                    'Could not build relationship between Activity'
                    ' and %s. %s was not found in declarative class '
                    'registry. Either configure VersioningManager to '
                    'use different actor class or disable this '
                    'relationship by setting it to None.' % (
                        self._actor_cls,
                        self._actor_cls
                    )
                )
        return self._actor_cls

    def attach_listeners(self):
        self.attach_table_listeners()
        for listener in self.listeners:
            sa.event.listen(*listener)

    def remove_listeners(self):
        self.remove_table_listeners()
        for listener in self.listeners:
            sa.event.remove(*listener)

    def activity_model_factory(self, base, transaction_cls):
        class AuditActivity(activity_base(base, self.schema_name, transaction_cls)):
            __tablename__ = 'audit_activities'

        return AuditActivity

    def transaction_model_factory(self, base):
        class AuditTransaction(transaction_base(base, self.schema_name)):
            __tablename__ = 'audit_transactions'

        return AuditTransaction

    def init(self, base):
        self.base = base
        self.transaction_cls = self.transaction_model_factory(base)
        self.activity_cls = self.activity_model_factory(
            base,
            self.transaction_cls
        )
        self.attach_listeners()
Example #41
0
class Consumer(Service, ConsumerT):
    """Base Consumer."""

    app: AppT

    logger = logger

    #: Tuple of exception types that may be raised when the
    #: underlying consumer driver is stopped.
    consumer_stopped_errors: ClassVar[Tuple[Type[BaseException], ...]] = ()

    # Mapping of TP to list of acked offsets.
    _acked: MutableMapping[TP, List[int]]

    #: Fast lookup to see if tp+offset was acked.
    _acked_index: MutableMapping[TP, Set[int]]

    #: Keeps track of the currently read offset in each TP
    _read_offset: MutableMapping[TP, Optional[int]]

    #: Keeps track of the currently commited offset in each TP.
    _committed_offset: MutableMapping[TP, Optional[int]]

    #: The consumer.wait_empty() method will set this to be notified
    #: when something acks a message.
    _waiting_for_ack: Optional[asyncio.Future] = None

    #: Used by .commit to ensure only one thread is comitting at a time.
    #: Other thread starting to commit while a commit is already active,
    #: will wait for the original request to finish, and do nothing.
    _commit_fut: Optional[asyncio.Future] = None

    #: Set of unacked messages: that is messages that we started processing
    #: and that we MUST attempt to complete processing of, before
    #: shutting down or resuming a rebalance.
    _unacked_messages: MutableSet[Message]

    #: Time of last record batch received.
    #: Set only when not set, and reset by commit() so actually
    #: tracks how long it ago it was since we received a record that
    #: was never committed.
    _last_batch: Optional[float]

    #: Time of when the consumer was started.
    _time_start: float

    # How often to poll and track log end offsets.
    _end_offset_monitor_interval: float

    _commit_every: Optional[int]
    _n_acked: int = 0

    def __init__(self,
                 transport: TransportT,
                 callback: ConsumerCallback,
                 on_partitions_revoked: PartitionsRevokedCallback,
                 on_partitions_assigned: PartitionsAssignedCallback,
                 *,
                 commit_interval: float = None,
                 commit_livelock_soft_timeout: float = None,
                 loop: asyncio.AbstractEventLoop = None,
                 **kwargs: Any) -> None:
        assert callback is not None
        self.transport = transport
        self.app = self.transport.app
        self.callback = callback
        self._on_message_in = self.app.sensors.on_message_in
        self._on_partitions_revoked = on_partitions_revoked
        self._on_partitions_assigned = on_partitions_assigned
        self._commit_every = self.app.conf.broker_commit_every
        self.commit_interval = (commit_interval
                                or self.app.conf.broker_commit_interval)
        self.commit_livelock_soft_timeout = (
            commit_livelock_soft_timeout
            or self.app.conf.broker_commit_livelock_soft_timeout)
        self._acked = defaultdict(list)
        self._acked_index = defaultdict(set)
        self._read_offset = defaultdict(lambda: None)
        self._committed_offset = defaultdict(lambda: None)
        self._unacked_messages = WeakSet()
        self._waiting_for_ack = None
        self._time_start = monotonic()
        self._last_batch = None
        self._end_offset_monitor_interval = self.commit_interval * 2
        self.randomly_assigned_topics = set()
        super().__init__(loop=loop or self.transport.loop, **kwargs)

    @abc.abstractmethod
    async def _commit(
            self,
            offsets: Mapping[TP, Tuple[int, str]]) -> bool:  # pragma: no cover
        ...

    @abc.abstractmethod
    def _new_topicpartition(self, topic: str,
                            partition: int) -> TP:  # pragma: no cover
        ...

    def _is_changelog_tp(self, tp: TP) -> bool:
        return tp.topic in self.app.tables.changelog_topics

    @Service.transitions_to(CONSUMER_PARTITIONS_ASSIGNED)
    async def on_partitions_assigned(self, assigned: Set[TP]) -> None:
        await self._on_partitions_assigned(assigned)

    @Service.transitions_to(CONSUMER_PARTITIONS_REVOKED)
    async def on_partitions_revoked(self, revoked: Set[TP]) -> None:
        await self._on_partitions_revoked(revoked)

    def track_message(self, message: Message) -> None:
        # add to set of pending messages that must be acked for graceful
        # shutdown.  This is called by transport.Conductor,
        # before delivering messages to streams.
        self._unacked_messages.add(message)
        # call sensors
        self._on_message_in(message.tp, message.offset, message)

    def ack(self, message: Message) -> bool:
        if not message.acked:
            message.acked = True
            tp = message.tp
            offset = message.offset
            if self.app.topics.acks_enabled_for(message.topic):
                committed = self._committed_offset[tp]
                try:
                    if committed is None or offset > committed:
                        acked_index = self._acked_index[tp]
                        if offset not in acked_index:
                            self._unacked_messages.discard(message)
                            acked_index.add(offset)
                            acked_for_tp = self._acked[tp]
                            acked_for_tp.append(offset)
                            self._n_acked += 1
                            return True
                finally:
                    notify(self._waiting_for_ack)
        return False

    async def _wait_for_ack(self, timeout: float) -> None:
        # arm future so that `ack()` can wake us up
        self._waiting_for_ack = asyncio.Future(loop=self.loop)
        try:
            # wait for `ack()` to wake us up
            await asyncio.wait_for(self._waiting_for_ack,
                                   loop=self.loop,
                                   timeout=1)
        except (asyncio.TimeoutError,
                asyncio.CancelledError):  # pragma: no cover
            pass
        finally:
            self._waiting_for_ack = None

    @Service.transitions_to(CONSUMER_WAIT_EMPTY)
    async def wait_empty(self) -> None:
        """Wait for all messages that started processing to be acked."""
        wait_count = 0
        while not self.should_stop and self._unacked_messages:
            wait_count += 1
            if not wait_count % 100_000:  # pragma: no cover
                remaining = [(m.refcount, m) for m in self._unacked_messages]
                self.log.warn(f'Waiting for {remaining}')
            self.log.dev('STILL WAITING FOR ALL STREAMS TO FINISH')
            self.log.dev('WAITING FOR %r EVENTS', len(self._unacked_messages))
            gc.collect()
            await self.commit()
            if not self._unacked_messages:
                break
            await self._wait_for_ack(timeout=1)

        self.log.dev('COMMITTING AGAIN AFTER STREAMS DONE')
        await self.commit()

    async def on_stop(self) -> None:
        if self.app.conf.stream_wait_empty:
            await self.wait_empty()
        self._last_batch = None

    @Service.task
    async def _commit_handler(self) -> None:
        await self.sleep(self.commit_interval)
        while not self.should_stop:
            await self.commit()
            await self.sleep(self.commit_interval)

    @Service.task
    async def _commit_livelock_detector(self) -> None:  # pragma: no cover
        soft_timeout = self.commit_livelock_soft_timeout
        interval: float = self.commit_interval * 2.5
        await self.sleep(interval)
        while not self.should_stop:
            if self._last_batch is not None:
                s_since_batch = monotonic() - self._last_batch
                if s_since_batch > soft_timeout:
                    self.log.warn(
                        'Possible livelock: COMMIT OFFSET NOT ADVANCING')
            await self.sleep(interval)

    async def commit(self,
                     topics: TPorTopicSet = None) -> bool:  # pragma: no cover
        """Maybe commit the offset for all or specific topics.

        Arguments:
            topics: Set containing topics and/or TopicPartitions to commit.
        """
        if await self.maybe_wait_for_commit_to_finish():
            # original commit finished, return False as we did not commit
            return False

        self._commit_fut = asyncio.Future(loop=self.loop)
        try:
            return await self.force_commit(topics)
        finally:
            # set commit_fut to None so that next call will commit.
            fut, self._commit_fut = self._commit_fut, None
            # notify followers that the commit is done.
            if fut is not None and not fut.done():
                fut.set_result(None)

    async def maybe_wait_for_commit_to_finish(self) -> bool:
        # Only one coroutine allowed to commit at a time,
        # and other coroutines should wait for the original commit to finish
        # then do nothing.
        if self._commit_fut is not None:
            # something is already committing so wait for that future.
            try:
                await self._commit_fut
            except asyncio.CancelledError:
                # if future is cancelled we have to start new commit
                pass
            else:
                return True
        return False

    @Service.transitions_to(CONSUMER_COMMITTING)
    async def force_commit(self, topics: TPorTopicSet = None) -> bool:
        sensor_state = self.app.sensors.on_commit_initiated(self)

        # Go over the ack list in each topic/partition
        commit_tps = list(self._filter_tps_with_pending_acks(topics))
        did_commit = await self._commit_tps(commit_tps)

        self.app.sensors.on_commit_completed(self, sensor_state)
        return did_commit

    async def _commit_tps(self, tps: Iterable[TP]) -> bool:
        commit_offsets = self._filter_committable_offsets(tps)
        if commit_offsets:
            try:
                # send all messages attached to the new offset
                await self._handle_attached(commit_offsets)
            except ProducerSendError as exc:
                await self.crash(exc)
            else:
                return await self._commit_offsets(commit_offsets)
        return False

    def _filter_committable_offsets(self, tps: Iterable[TP]) -> Dict[TP, int]:
        commit_offsets = {}
        for tp in tps:
            # Find the latest offset we can commit in this tp
            offset = self._new_offset(tp)
            # check if we can commit to this offset
            if offset is not None and self._should_commit(tp, offset):
                commit_offsets[tp] = offset
        return commit_offsets

    async def _handle_attached(self, commit_offsets: Mapping[TP, int]) -> None:
        for tp, offset in commit_offsets.items():
            app = cast(App, self.app)
            attachments = app._attachments
            producer = app.producer
            # Start publishing the messages and return a list of pending
            # futures.
            pending = await attachments.publish_for_tp_offset(tp, offset)
            # then we wait for either
            #  1) all the attached messages to be published, or
            #  2) the producer crashing
            #
            # If the producer crashes we will not be able to send any messages
            # and it only crashes when there's an irrecoverable error.
            #
            # If we cannot commit it means the events will be processed again,
            # so conforms to at-least-once semantics.
            if pending:
                await producer.wait_many(pending)

    async def _commit_offsets(self, commit_offsets: Mapping[TP, int]) -> bool:
        meta = ''
        return await self._commit(
            {tp: (offset, meta)
             for tp, offset in commit_offsets.items()})

    def _filter_tps_with_pending_acks(self,
                                      topics: TPorTopicSet = None
                                      ) -> Iterator[TP]:
        return (tp for tp in self._acked
                if topics is None or tp in topics or tp.topic in topics)

    def _should_commit(self, tp: TP, offset: int) -> bool:
        committed = self._committed_offset[tp]
        return committed is None or bool(offset) and offset > committed

    def _new_offset(self, tp: TP) -> Optional[int]:
        # get the new offset for this tp, by going through
        # its list of acked messages.
        acked = self._acked[tp]

        # We iterate over it until we find a gap
        # then return the offset before that.
        # For example if acked[tp] is:
        #   1 2 3 4 5 6 7 8 9
        # the return value will be: 9
        # If acked[tp] is:
        #  34 35 36 40 41 42 43 44
        #          ^--- gap
        # the return value will be: 36
        if acked:
            acked.sort()
            # Note: acked is always kept sorted.
            # find first list of consecutive numbers
            batch = next(consecutive_numbers(acked))
            # remove them from the list to clean up.
            acked[:len(batch)] = []
            self._acked_index[tp].difference_update(batch)
            # return the highest commit offset
            return batch[-1]
        return None

    async def on_task_error(self, exc: BaseException) -> None:
        await self.commit()

    async def _drain_messages(self,
                              fetcher: ServiceT) -> None:  # pragma: no cover
        # This is the background thread started by Fetcher, used to
        # constantly read messages using Consumer.getmany.
        # It takes Fetcher as argument, because we must be able to
        # stop it using `await Fetcher.stop()`.
        callback = self.callback
        getmany = self.getmany
        consumer_should_stop = self._stopped.is_set
        fetcher_should_stop = fetcher._stopped.is_set

        get_read_offset = self._read_offset.__getitem__
        set_read_offset = self._read_offset.__setitem__
        flag_consumer_fetching = CONSUMER_FETCHING
        set_flag = self.diag.set_flag
        unset_flag = self.diag.unset_flag
        commit_every = self._commit_every

        try:
            while not (consumer_should_stop() or fetcher_should_stop()):
                set_flag(flag_consumer_fetching)
                ait = cast(AsyncIterator, getmany(timeout=5.0))
                # Sleeping because sometimes getmany is called in a loop
                # never releasing to the event loop
                await self.sleep(0)
                if not self.should_stop:
                    async for tp, message in ait:
                        offset = message.offset
                        r_offset = get_read_offset(tp)
                        if r_offset is None or offset > r_offset:
                            if commit_every is not None:
                                if self._n_acked >= commit_every:
                                    self._n_acked = 0
                                    await self.commit()
                            await callback(message)
                            set_read_offset(tp, offset)
                        else:
                            self.log.dev('DROPPED MESSAGE ROFF %r: k=%r v=%r',
                                         offset, message.key, message.value)
                    unset_flag(flag_consumer_fetching)

        except self.consumer_stopped_errors:
            if self.transport.app.should_stop:
                # we're already stopping so ignore
                self.log.info('Broker stopped consumer, shutting down...')
                return
            raise
        except asyncio.CancelledError:
            if self.transport.app.should_stop:
                # we're already stopping so ignore
                self.log.info('Consumer shutting down for user cancel.')
                return
            raise
        except Exception as exc:
            self.log.exception('Drain messages raised: %r', exc)
            raise
        finally:
            unset_flag(flag_consumer_fetching)

    def close(self) -> None:
        ...

    @property
    def unacked(self) -> Set[Message]:
        return cast(Set[Message], self._unacked_messages)
Example #42
0
class TileableData(SerializableWithKey, Tileable):
    __slots__ = '__weakref__', '_siblings', '_cix', '_entities'
    _no_copy_attrs_ = SerializableWithKey._no_copy_attrs_ | {'_cix'}

    # required fields
    _op = KeyField('op')
    _shape = TupleField('shape', ValueType.int64,
                        on_serialize=on_serialize_shape, on_deserialize=on_deserialize_shape)
    # optional fields
    # `nsplits` means the sizes of chunks for each dimension
    _nsplits = TupleField('nsplits', ValueType.tuple(ValueType.uint64),
                          on_serialize=on_serialize_nsplits)
    _extra_params = DictField('extra_params', key_type=ValueType.string, on_deserialize=AttributeDict)

    def __init__(self, *args, **kwargs):
        extras = AttributeDict((k, kwargs.pop(k)) for k in set(kwargs) - set(self.__slots__))
        kwargs['_extra_params'] = kwargs.pop('_extra_params', extras)
        if kwargs.get('_nsplits', None) is not None:
            kwargs['_nsplits'] = tuple(tuple(s) for s in kwargs['_nsplits'])

        super(TileableData, self).__init__(*args, **kwargs)

        if hasattr(self, '_chunks') and self._chunks:
            self._chunks = sorted(self._chunks, key=attrgetter('index'))

        self._entities = WeakSet()

    @property
    def ndim(self):
        return len(self.shape)

    def __len__(self):
        try:
            return self.shape[0]
        except IndexError:
            if build_mode().is_build_mode:
                return 0
            raise TypeError('len() of unsized object')

    @property
    def shape(self):
        if hasattr(self, '_shape') and self._shape is not None:
            return self._shape
        if hasattr(self, '_nsplits') and self._nsplits is not None:
            self._shape = tuple(builtins.sum(nsplit) for nsplit in self._nsplits)
            return self._shape

    def _update_shape(self, new_shape):
        self._shape = new_shape

    @property
    def chunk_shape(self):
        if hasattr(self, '_nsplits') and self._nsplits is not None:
            return tuple(map(len, self._nsplits))

    @property
    def chunks(self):
        return getattr(self, '_chunks', None)

    @property
    def op(self):
        return getattr(self, '_op', None)

    @property
    def nsplits(self):
        return getattr(self, '_nsplits', None)

    @nsplits.setter
    def nsplits(self, new_nsplits):
        self._nsplits = new_nsplits

    @property
    def size(self):
        return np.prod(self.shape).item()

    @property
    def inputs(self):
        return self.op.inputs or []

    @inputs.setter
    def inputs(self, new_inputs):
        self.op.inputs = new_inputs

    @property
    def params(self):
        # params return the properties which useful to rebuild a new tileable object
        return {
            'shape': self.shape
        }

    @property
    def extra_params(self):
        return self._extra_params

    @property
    def cix(self):
        if self.ndim == 0:
            return ChunksIndexer(self)

        try:
            if getattr(self, '_cix', None) is None:
                self._cix = ChunksIndexer(self)
            return self._cix
        except (TypeError, ValueError):
            return ChunksIndexer(self)

    @property
    def entities(self):
        return self._entities

    def is_coarse(self):
        return not hasattr(self, '_chunks') or self._chunks is None or len(self._chunks) == 0

    def to_coarse(self):
        if self.is_coarse():
            return self
        new_entity = self.copy()
        new_entity._obj_set('_id', self._id)
        new_entity._chunks = None
        if self.inputs is None or len(self.inputs) == 0:
            new_entity.extra_params.update({'raw_chunk_size': self.nsplits})
        return new_entity

    def is_sparse(self):
        return self.op.is_sparse()

    issparse = is_sparse

    @enter_build_mode
    def attach(self, entity):
        self._entities.add(entity)

    @enter_build_mode
    def detach(self, entity):
        self._entities.discard(entity)

    def tiles(self):
        return handler.tiles(self)

    def single_tiles(self):
        return handler.single_tiles(self)

    @kernel_mode
    def build_graph(self, graph=None, cls=DAG, tiled=False, compose=True, executed_keys=None):
        from .utils import build_fetch

        executed_keys = set(executed_keys or [])
        if tiled and self.is_coarse():
            self.tiles()

        graph = graph if graph is not None else cls()
        keys = None

        if tiled:
            nodes = list(c.data for c in self.chunks)
            keys = list(c.key for c in self.chunks)
        else:
            nodes = list(self.op.outputs)

        node_to_fetch = dict()

        def _generate_fetch_node(n):
            if n in node_to_fetch:
                return node_to_fetch[n]
            fn = build_fetch(n, coarse=True).data
            node_to_fetch[n] = fn
            return fn

        visited = set()
        while len(nodes) > 0:
            node = nodes.pop()

            # replace executed tensor/chunk by tensor/chunk with fetch op
            if node.key in executed_keys:
                node = _generate_fetch_node(node)

            visited.add(node)
            if not graph.contains(node):
                graph.add_node(node)
            children = node.inputs or []
            for c in children:
                if c.key in executed_keys:
                    visited.add(c)
                    c = _generate_fetch_node(c)
                if not graph.contains(c):
                    graph.add_node(c)
                if not graph.has_successor(c, node):
                    graph.add_edge(c, node)
            nodes.extend([c for c in itertools.chain(*[inp.op.outputs for inp in node.inputs or []])
                          if c not in visited])
        if tiled and compose:
            graph.compose(keys=keys)

        if not tiled and any(not n.is_coarse() for n in graph):
            return self._to_coarse_graph(graph)

        return graph

    @staticmethod
    def _to_coarse_graph(graph):
        new_graph = type(graph)()
        visited = dict()
        for n in graph:
            if n not in visited:
                new_node = n.to_coarse()
                visited[n] = new_node
                new_graph.add_node(new_node)
            for succ in graph.successors(n):
                if succ not in visited:
                    new_node = succ.to_coarse()
                    visited[succ] = new_node
                    new_graph.add_node(new_node)
                new_graph.add_edge(visited[n], visited[succ])
        return new_graph

    def visualize(self, graph_attrs=None, node_attrs=None, **kw):
        from graphviz import Source

        g = self.build_graph(**kw)
        dot = g.to_dot(graph_attrs=graph_attrs, node_attrs=node_attrs,
                       result_chunk_keys={c.key for c in self.chunks})

        return Source(dot)

    def execute(self, session=None, **kw):
        from .session import Session

        if session is None:
            session = Session.default_or_local()
        return session.run(self, **kw)

    def fetch(self, session=None, **kw):
        from .session import Session

        if session is None:
            session = Session.default_or_local()
        return session.fetch(self, **kw)

    def _set_execute_session(self, session):
        _cleaner.register(self, session)

    _execute_session = property(fset=_set_execute_session)
Example #43
0
class TestWeakSet(unittest.TestCase):
    def setUp(self):
        # need to keep references to them
        self.items = [ustr(c) for c in ('a', 'b', 'c')]
        self.items2 = [ustr(c) for c in ('x', 'y', 'z')]
        self.letters = [ustr(c) for c in string.ascii_letters]
        self.s = WeakSet(self.items)
        self.d = dict.fromkeys(self.items)
        self.obj = ustr('F')
        self.fs = WeakSet([self.obj])

    def test_methods(self):
        weaksetmethods = dir(WeakSet)
        for method in dir(set):
            if method == 'test_c_api' or method.startswith('_'):
                continue
            self.assertIn(method, weaksetmethods,
                          "WeakSet missing method " + method)

    def test_new_or_init(self):
        self.assertRaises(TypeError, WeakSet, [], 2)

    def test_len(self):
        self.assertEqual(len(self.s), len(self.d))
        self.assertEqual(len(self.fs), 1)
        del self.obj
        self.assertEqual(len(self.fs), 0)

    def test_contains(self):
        for c in self.letters:
            self.assertEqual(c in self.s, c in self.d)
        # 1 is not weakref'able, but that TypeError is caught by __contains__
        self.assertNotIn(1, self.s)
        self.assertIn(self.obj, self.fs)
        del self.obj
        self.assertNotIn(ustr('F'), self.fs)

    def test_union(self):
        u = self.s.union(self.items2)
        for c in self.letters:
            self.assertEqual(c in u, c in self.d or c in self.items2)
        self.assertEqual(self.s, WeakSet(self.items))
        self.assertEqual(type(u), WeakSet)
        self.assertRaises(TypeError, self.s.union, [[]])
        for C in set, frozenset, dict.fromkeys, list, tuple:
            x = WeakSet(self.items + self.items2)
            c = C(self.items2)
            self.assertEqual(self.s.union(c), x)

    def test_or(self):
        i = self.s.union(self.items2)
        self.assertEqual(self.s | set(self.items2), i)
        self.assertEqual(self.s | frozenset(self.items2), i)

    def test_intersection(self):
        i = self.s.intersection(self.items2)
        for c in self.letters:
            self.assertEqual(c in i, c in self.d and c in self.items2)
        self.assertEqual(self.s, WeakSet(self.items))
        self.assertEqual(type(i), WeakSet)
        for C in set, frozenset, dict.fromkeys, list, tuple:
            x = WeakSet([])
            self.assertEqual(self.s.intersection(C(self.items2)), x)

    def test_isdisjoint(self):
        self.assertTrue(self.s.isdisjoint(WeakSet(self.items2)))
        self.assertTrue(not self.s.isdisjoint(WeakSet(self.letters)))

    def test_and(self):
        i = self.s.intersection(self.items2)
        self.assertEqual(self.s & set(self.items2), i)
        self.assertEqual(self.s & frozenset(self.items2), i)

    def test_difference(self):
        i = self.s.difference(self.items2)
        for c in self.letters:
            self.assertEqual(c in i, c in self.d and c not in self.items2)
        self.assertEqual(self.s, WeakSet(self.items))
        self.assertEqual(type(i), WeakSet)
        self.assertRaises(TypeError, self.s.difference, [[]])

    def test_sub(self):
        i = self.s.difference(self.items2)
        self.assertEqual(self.s - set(self.items2), i)
        self.assertEqual(self.s - frozenset(self.items2), i)

    def test_symmetric_difference(self):
        i = self.s.symmetric_difference(self.items2)
        for c in self.letters:
            self.assertEqual(c in i, (c in self.d) ^ (c in self.items2))
        self.assertEqual(self.s, WeakSet(self.items))
        self.assertEqual(type(i), WeakSet)
        self.assertRaises(TypeError, self.s.symmetric_difference, [[]])

    def test_xor(self):
        i = self.s.symmetric_difference(self.items2)
        self.assertEqual(self.s ^ set(self.items2), i)
        self.assertEqual(self.s ^ frozenset(self.items2), i)

    def test_sub_and_super(self):
        pl, ql, rl = map(lambda s: [ustr(c) for c in s],
                         ['ab', 'abcde', 'def'])
        p, q, r = map(WeakSet, (pl, ql, rl))
        self.assertTrue(p < q)
        self.assertTrue(p <= q)
        self.assertTrue(q <= q)
        self.assertTrue(q > p)
        self.assertTrue(q >= p)
        self.assertFalse(q < r)
        self.assertFalse(q <= r)
        self.assertFalse(q > r)
        self.assertFalse(q >= r)
        self.assertTrue(set('a').issubset('abc'))
        self.assertTrue(set('abc').issuperset('a'))
        self.assertFalse(set('a').issubset('cbs'))
        self.assertFalse(set('cbs').issuperset('a'))

    def test_gc(self):
        # Create a nest of cycles to exercise overall ref count check
        s = WeakSet(Foo() for i in range(1000))
        for elem in s:
            elem.cycle = s
            elem.sub = elem
            elem.set = WeakSet([elem])

    def test_subclass_with_custom_hash(self):
        # Bug #1257731
        class H(WeakSet):
            def __hash__(self):
                return int(id(self) & 0x7fffffff)

        s = H()
        f = set()
        f.add(s)
        self.assertIn(s, f)
        f.remove(s)
        f.add(s)
        f.discard(s)

    def test_init(self):
        s = WeakSet()
        s.__init__(self.items)
        self.assertEqual(s, self.s)
        s.__init__(self.items2)
        self.assertEqual(s, WeakSet(self.items2))
        self.assertRaises(TypeError, s.__init__, s, 2)
        self.assertRaises(TypeError, s.__init__, 1)

    def test_constructor_identity(self):
        s = WeakSet(self.items)
        t = WeakSet(s)
        self.assertNotEqual(id(s), id(t))

    def test_hash(self):
        self.assertRaises(TypeError, hash, self.s)

    def test_clear(self):
        self.s.clear()
        self.assertEqual(self.s, WeakSet([]))
        self.assertEqual(len(self.s), 0)

    def test_copy(self):
        dup = self.s.copy()
        self.assertEqual(self.s, dup)
        self.assertNotEqual(id(self.s), id(dup))

    def test_add(self):
        x = ustr('Q')
        self.s.add(x)
        self.assertIn(x, self.s)
        dup = self.s.copy()
        self.s.add(x)
        self.assertEqual(self.s, dup)
        self.assertRaises(TypeError, self.s.add, [])
        self.fs.add(Foo())
        self.assertTrue(len(self.fs) == 1)
        self.fs.add(self.obj)
        self.assertTrue(len(self.fs) == 1)

    def test_remove(self):
        x = ustr('a')
        self.s.remove(x)
        self.assertNotIn(x, self.s)
        self.assertRaises(KeyError, self.s.remove, x)
        self.assertRaises(TypeError, self.s.remove, [])

    def test_discard(self):
        a, q = ustr('a'), ustr('Q')
        self.s.discard(a)
        self.assertNotIn(a, self.s)
        self.s.discard(q)
        self.assertRaises(TypeError, self.s.discard, [])

    def test_pop(self):
        for i in range(len(self.s)):
            elem = self.s.pop()
            self.assertNotIn(elem, self.s)
        self.assertRaises(KeyError, self.s.pop)

    def test_update(self):
        retval = self.s.update(self.items2)
        self.assertEqual(retval, None)
        for c in (self.items + self.items2):
            self.assertIn(c, self.s)
        self.assertRaises(TypeError, self.s.update, [[]])

    def test_update_set(self):
        self.s.update(set(self.items2))
        for c in (self.items + self.items2):
            self.assertIn(c, self.s)

    def test_ior(self):
        self.s |= set(self.items2)
        for c in (self.items + self.items2):
            self.assertIn(c, self.s)

    def test_intersection_update(self):
        retval = self.s.intersection_update(self.items2)
        self.assertEqual(retval, None)
        for c in (self.items + self.items2):
            if c in self.items2 and c in self.items:
                self.assertIn(c, self.s)
            else:
                self.assertNotIn(c, self.s)
        self.assertRaises(TypeError, self.s.intersection_update, [[]])

    def test_iand(self):
        self.s &= set(self.items2)
        for c in (self.items + self.items2):
            if c in self.items2 and c in self.items:
                self.assertIn(c, self.s)
            else:
                self.assertNotIn(c, self.s)

    def test_difference_update(self):
        retval = self.s.difference_update(self.items2)
        self.assertEqual(retval, None)
        for c in (self.items + self.items2):
            if c in self.items and c not in self.items2:
                self.assertIn(c, self.s)
            else:
                self.assertNotIn(c, self.s)
        self.assertRaises(TypeError, self.s.difference_update, [[]])
        self.assertRaises(TypeError, self.s.symmetric_difference_update, [[]])

    def test_isub(self):
        self.s -= set(self.items2)
        for c in (self.items + self.items2):
            if c in self.items and c not in self.items2:
                self.assertIn(c, self.s)
            else:
                self.assertNotIn(c, self.s)

    def test_symmetric_difference_update(self):
        retval = self.s.symmetric_difference_update(self.items2)
        self.assertEqual(retval, None)
        for c in (self.items + self.items2):
            if (c in self.items) ^ (c in self.items2):
                self.assertIn(c, self.s)
            else:
                self.assertNotIn(c, self.s)
        self.assertRaises(TypeError, self.s.symmetric_difference_update, [[]])

    def test_ixor(self):
        self.s ^= set(self.items2)
        for c in (self.items + self.items2):
            if (c in self.items) ^ (c in self.items2):
                self.assertIn(c, self.s)
            else:
                self.assertNotIn(c, self.s)

    def test_inplace_on_self(self):
        t = self.s.copy()
        t |= t
        self.assertEqual(t, self.s)
        t &= t
        self.assertEqual(t, self.s)
        t -= t
        self.assertEqual(t, WeakSet())
        t = self.s.copy()
        t ^= t
        self.assertEqual(t, WeakSet())

    def test_eq(self):
        # issue 5964
        self.assertTrue(self.s == self.s)
        self.assertTrue(self.s == WeakSet(self.items))
        self.assertFalse(self.s == set(self.items))
        self.assertFalse(self.s == list(self.items))
        self.assertFalse(self.s == tuple(self.items))
        self.assertFalse(self.s == WeakSet([Foo]))
        self.assertFalse(self.s == 1)

    def test_weak_destroy_while_iterating(self):
        # Issue #7105: iterators shouldn't crash when a key is implicitly removed
        # Create new items to be sure no-one else holds a reference
        items = [ustr(c) for c in ('a', 'b', 'c')]
        s = WeakSet(items)
        it = iter(s)
        next(it)  # Trigger internal iteration
        # Destroy an item
        del items[-1]
        gc.collect()  # just in case
        # We have removed either the first consumed items, or another one
        self.assertIn(len(list(it)), [len(items), len(items) - 1])
        del it
        # The removal has been committed
        self.assertEqual(len(s), len(items))

    def test_weak_destroy_and_mutate_while_iterating(self):
        # Issue #7105: iterators shouldn't crash when a key is implicitly removed
        items = [ustr(c) for c in string.ascii_letters]
        s = WeakSet(items)

        @contextlib.contextmanager
        def testcontext():
            try:
                it = iter(s)
                next(it)
                # Schedule an item for removal and recreate it
                u = ustr(str(items.pop()))
                gc.collect()  # just in case
                yield u
            finally:
                it = None  # should commit all removals

        with testcontext() as u:
            self.assertNotIn(u, s)
        with testcontext() as u:
            self.assertRaises(KeyError, s.remove, u)
        self.assertNotIn(u, s)
        with testcontext() as u:
            s.add(u)
        self.assertIn(u, s)
        t = s.copy()
        with testcontext() as u:
            s.update(t)
        self.assertEqual(len(s), len(t))
        with testcontext() as u:
            s.clear()
        self.assertEqual(len(s), 0)
Example #44
0
class DataSocket:
    """
    Wrapper for ZMQ socket that sends and recieves dictionaries
    """
    def __init__(self, context, port, type, debug):
        # request reply socket
        self._socket = context.socket(type)
        self._debug = debug
        # store these as wekrefs so that circular refs dont prevent garbage collection
        self._java_objects = WeakSet()
        # try:
        if type == zmq.PUSH:
            if debug:
                print("binding {}".format(port))
            self._socket.bind("tcp://127.0.0.1:{}".format(port))
        else:
            if debug:
                print("connecting {}".format(port))
            self._socket.connect("tcp://127.0.0.1:{}".format(port))
        # except Exception as e:
        #     print(e.__traceback__)
        # raise Exception('Couldnt connect or bind to port {}'.format(port))

    def _register_java_object(self, object):
        self._java_objects.add(object)

    def __del__(self):
        # make sure all shadow objects have signaled to Java side to release references before they shut down
        for java_object in self._java_objects:
            java_object._close()

    def _convert_np_to_python(self, d):
        """
        recursively search dictionary and convert any values from numpy floats/ints to
        python floats/ints so they can be json serialized
        :return:
        """
        if type(d) != dict:
            return
        for k, v in d.items():
            if isinstance(v, dict):
                self._convert_np_to_python(v)
            elif type(v) == list:
                for e in v:
                    self._convert_np_to_python(e)
            elif np.issubdtype(type(v), np.floating):
                d[k] = float(v)
            elif np.issubdtype(type(v), np.integer):
                d[k] = int(v)

    def _remove_bytes(self, bytes_data, structure):
        if isinstance(structure, list):
            for i, entry in enumerate(structure):
                if isinstance(entry, bytes):
                    identifier = np.random.randint(-(2**31),
                                                   2**31 - 1,
                                                   1,
                                                   dtype=np.int32)[0]
                    structure[i] = "@" + str(int(identifier))
                    bytes_data.append((identifier, entry))
                elif isinstance(entry, list) or isinstance(entry, dict):
                    self._remove_bytes(bytes_data, entry)
        elif isinstance(structure, dict):
            for key in structure.keys():
                if isinstance(structure[key], bytes):
                    # make up a random 32 bit int as the identifier
                    identifier = np.random.randint(-(2**31),
                                                   2**31 - 1,
                                                   1,
                                                   dtype=np.int32)[0]
                    bytes_data.append((identifier, structure[key]))
                    structure[key] = "@" + str(int(identifier))
                elif isinstance(structure[key], list) or isinstance(
                        structure[key], dict):
                    self._remove_bytes(bytes_data, structure[key])

    def send(self, message, timeout=0):
        if message is None:
            message = {}
        # make sure any np types convert to python types so they can be json serialized
        self._convert_np_to_python(message)
        # Send binary data in seperate messages so it doesnt need to be json serialized
        bytes_data = []
        self._remove_bytes(bytes_data, message)
        message_string = json.dumps(message)
        if self._debug:
            print("DEBUG, sending: {}".format(message))
        # convert keys to byte array
        key_vals = [(identifier.tobytes(), value)
                    for identifier, value in bytes_data]
        message_parts = [bytes(message_string, "iso-8859-1")
                         ] + [item for keyval in key_vals for item in keyval]
        if timeout == 0:
            self._socket.send_multipart(message_parts)
        else:
            start = time.time()
            while 1000 * (time.time() - start) < timeout:
                try:
                    self._socket.send_multipart(message_parts,
                                                flags=zmq.NOBLOCK)
                    return True
                except zmq.ZMQError:
                    pass  # ignore, keep trying
            return False

    def _replace_bytes(self, dict_or_list, hash, value):
        """
        Replace placeholders for byte arrays in JSON message with their actual values
        """
        if isinstance(dict_or_list, dict):
            for key in dict_or_list:
                if isinstance(dict_or_list[key],
                              str) and "@" in dict_or_list[key]:
                    hash_in_message = int(dict_or_list[key].split("@")[1],
                                          16)  # interpret hex hash string
                    if hash == hash_in_message:
                        dict_or_list[key] = value
                        return
                elif isinstance(dict_or_list[key], list) or isinstance(
                        dict_or_list[key], dict):
                    self._replace_bytes(dict_or_list[key], hash, value)
        elif isinstance(dict_or_list, list):
            for i, entry in enumerate(dict_or_list):
                if isinstance(entry, str) and "@" in dict_or_list[key]:
                    hash_in_message = int(entry.split("@")[1],
                                          16)  # interpret hex hash string
                    if hash == hash_in_message:
                        dict_or_list[i] = value
                        return
                elif isinstance(entry, list) or isinstance(entry, dict):
                    self._replace_bytes(entry, hash, value)

    def receive(self, timeout=0):
        if timeout == 0:
            reply = self._socket.recv_multipart()
        else:
            start = time.time()
            reply = None
            while 1000 * (time.time() - start) < timeout:
                try:
                    reply = self._socket.recv_multipart(flags=zmq.NOBLOCK)
                    if reply is not None:
                        break
                except zmq.ZMQError:
                    pass  # ignore, keep trying
            if reply is None:
                return reply
        message = json.loads(reply[0].decode("iso-8859-1"))
        # replace any byte data placeholders with the byte data itself
        for i in np.arange(1, len(reply), 2):
            # messages come in pairs: first is hash, second it byte data
            identity_hash = int.from_bytes(reply[i], byteorder=sys.byteorder)
            value = reply[i + 1]
            self._replace_bytes(message, identity_hash, value)

        if self._debug:
            print("DEBUG, recieved: {}".format(message))
        self._check_exception(message)
        return message

    def _check_exception(self, response):
        if "type" in response and response["type"] == "exception":
            raise Exception(response["value"])

    def close(self):
        self._socket.close()
Example #45
0
class RobustConnection(Connection):
    """ Robust connection """

    CHANNEL_CLASS = RobustChannel
    KWARGS_TYPES = (
        ('reconnect_interval', parse_int, '5'),
        ('fail_fast', parse_bool, '1'),
    )

    def __init__(self, url, loop=None, **kwargs):
        super().__init__(url=url, loop=loop, **kwargs)

        self.connect_kwargs = {}
        self.reconnect_interval = self.kwargs['reconnect_interval']
        self.fail_fast = self.kwargs['fail_fast']

        self.__channels = WeakSet()
        self._reconnect_callbacks = CallbackCollection()
        self._connect_lock = asyncio.Lock()
        self._closed = False
        self.connected = asyncio.Event()

    @property
    def reconnecting(self) -> bool:
        return self._connect_lock.locked()

    @property
    def reconnect_callbacks(self) -> CallbackCollection:
        return self._reconnect_callbacks

    @property
    def _channels(self) -> dict:
        return {ch.number: ch for ch in self.__channels}

    def __repr__(self):
        return '<{0}: "{1}" {2} channels>'.format(self.__class__.__name__,
                                                  str(self),
                                                  len(self.__channels))

    def _on_connection_close(self, connection, closing, *args, **kwargs):
        if self.reconnecting:
            return

        self.connected.clear()
        self.connection = None

        super()._on_connection_close(connection, closing)

        log.info("Connection to %s closed. Reconnecting after %r seconds.",
                 self, self.reconnect_interval)
        self.loop.call_later(self.reconnect_interval,
                             lambda: self.loop.create_task(self.reconnect()))

    def add_reconnect_callback(self, callback: Callable[[], None]):
        """ Add callback which will be called after reconnect.

        :return: None
        """

        self._reconnect_callbacks.add(callback)

    async def __cleanup_connection(self, exc):
        if self.connection is None:
            return
        await asyncio.gather(
            self.connection.close(exc),
            return_exceptions=True,
        )
        self.connection = None

    async def connect(self, timeout: TimeoutType = None, **kwargs):
        if self.is_closed:
            raise RuntimeError("{!r} connection closed".format(self))

        if kwargs:
            # Store connect kwargs for reconnects
            self.connect_kwargs = kwargs

        if self.reconnecting:
            log.warning(
                "Connect method called but connection %r is "
                "reconnecting right now.", self)

        async with self._connect_lock:
            while True:
                try:
                    result = await super().connect(timeout=timeout,
                                                   **self.connect_kwargs)

                    for channel in self._channels.values():
                        await channel.reopen()

                    self.fail_fast = False
                    self.connected.set()
                    return result
                except CONNECTION_EXCEPTIONS as e:
                    if self.fail_fast:
                        raise

                    await self.__cleanup_connection(e)

                    log.warning(
                        "Connection attempt to \"%s\" failed. "
                        "Reconnecting after %r seconds.",
                        self,
                        self.reconnect_interval,
                        exc_info=True,
                    )
                except asyncio.CancelledError as e:
                    await self.__cleanup_connection(e)
                    raise

                await asyncio.sleep(self.reconnect_interval)

    async def reconnect(self):
        await self.connect()
        self._reconnect_callbacks(self)

    def channel(self,
                channel_number: int = None,
                publisher_confirms: bool = True,
                on_return_raises=False):

        channel = super().channel(
            channel_number=channel_number,
            publisher_confirms=publisher_confirms,
            on_return_raises=on_return_raises,
        )

        self.__channels.add(channel)

        return channel

    @property
    def is_closed(self):
        """ Is this connection is closed """
        return self._closed or super().is_closed

    async def close(self, exc=asyncio.CancelledError):
        if self.is_closed:
            return

        self._closed = True

        if self.connection is None:
            return
        return await super().close(exc)
Example #46
0
class CallbackCollection(Set):
    __slots__ = "__sender", "__callbacks", "__weak_callbacks", "__lock"

    def __init__(self, sender):
        self.__sender = ref(sender)
        self.__callbacks = set()
        self.__weak_callbacks = WeakSet()
        self.__lock = Lock()

    def add(self, callback: Callable, weak=True):
        if self.is_frozen:
            raise RuntimeError("Collection frozen")
        if not callable(callback):
            raise ValueError("Callback is not callable")

        with self.__lock:
            if weak:
                self.__weak_callbacks.add(callback)
            else:
                self.__callbacks.add(callback)

    def remove(self, callback: Callable):
        if self.is_frozen:
            raise RuntimeError("Collection frozen")

        with self.__lock:
            try:
                self.__callbacks.remove(callback)
            except KeyError:
                self.__weak_callbacks.remove(callback)

    def clear(self):
        if self.is_frozen:
            raise RuntimeError("Collection frozen")

        with self.__lock:
            self.__callbacks.clear()
            self.__weak_callbacks.clear()

    @property
    def is_frozen(self) -> bool:
        return isinstance(self.__callbacks, frozenset)

    def freeze(self):
        if self.is_frozen:
            raise RuntimeError("Collection already frozen")

        with self.__lock:
            self.__callbacks = frozenset(self.__callbacks)
            self.__weak_callbacks = WeakSet(self.__weak_callbacks)

    def unfreeze(self):
        if not self.is_frozen:
            raise RuntimeError("Collection is not frozen")

        with self.__lock:
            self.__callbacks = set(self.__callbacks)
            self.__weak_callbacks = WeakSet(self.__weak_callbacks)

    def __contains__(self, x: object) -> bool:
        return x in self.__callbacks or x in self.__weak_callbacks

    def __len__(self) -> int:
        return len(self.__callbacks) + len(self.__weak_callbacks)

    def __iter__(self) -> Iterable[Callable]:
        return iter(chain(self.__callbacks, self.__weak_callbacks))

    def __bool__(self):
        return bool(self.__callbacks) or bool(self.__weak_callbacks)

    def __copy__(self):
        instance = self.__class__(self.__sender())

        with self.__lock:
            for cb in self.__callbacks:
                instance.add(cb, weak=False)

            for cb in self.__weak_callbacks:
                instance.add(cb, weak=True)

        if self.is_frozen:
            instance.freeze()

        return instance

    def __call__(self, *args, **kwargs):
        with self.__lock:
            for cb in self:
                try:
                    cb(self.__sender(), *args, **kwargs)
                except Exception:
                    log.exception("Callback error")
Example #47
0
class IOPubThread(object):
    """An object for sending IOPub messages in a background thread

    Prevents a blocking main thread from delaying output from threads.

    IOPubThread(pub_socket).background_socket is a Socket-API-providing object
    whose IO is always run in a thread.
    """

    def __init__(self, socket, pipe=False):
        """Create IOPub thread

        Parameters
        ----------
        socket : zmq.PUB Socket
            the socket on which messages will be sent.
        pipe : bool
            Whether this process should listen for IOPub messages
            piped from subprocesses.
        """
        self.socket = socket
        self.background_socket = BackgroundSocket(self)
        self._master_pid = os.getpid()
        self._pipe_flag = pipe
        self.io_loop = IOLoop(make_current=False)
        if pipe:
            self._setup_pipe_in()
        self._local = threading.local()
        self._events = deque()
        self._event_pipes = WeakSet()
        self._setup_event_pipe()
        self.thread = threading.Thread(target=self._thread_main)
        self.thread.daemon = True
        self.thread.pydev_do_not_trace = True
        self.thread.is_pydev_daemon_thread = True

    def _thread_main(self):
        """The inner loop that's actually run in a thread"""
        self.io_loop.make_current()
        self.io_loop.start()
        self.io_loop.close(all_fds=True)

    def _setup_event_pipe(self):
        """Create the PULL socket listening for events that should fire in this thread."""
        ctx = self.socket.context
        pipe_in = ctx.socket(zmq.PULL)
        pipe_in.linger = 0

        _uuid = b2a_hex(os.urandom(16)).decode('ascii')
        iface = self._event_interface = 'inproc://%s' % _uuid
        pipe_in.bind(iface)
        self._event_puller = ZMQStream(pipe_in, self.io_loop)
        self._event_puller.on_recv(self._handle_event)

    @property
    def _event_pipe(self):
        """thread-local event pipe for signaling events that should be processed in the thread"""
        try:
            event_pipe = self._local.event_pipe
        except AttributeError:
            # new thread, new event pipe
            ctx = self.socket.context
            event_pipe = ctx.socket(zmq.PUSH)
            event_pipe.linger = 0
            event_pipe.connect(self._event_interface)
            self._local.event_pipe = event_pipe
            # WeakSet so that event pipes will be closed by garbage collection
            # when their threads are terminated
            self._event_pipes.add(event_pipe)
        return event_pipe

    def _handle_event(self, msg):
        """Handle an event on the event pipe

        Content of the message is ignored.

        Whenever *an* event arrives on the event stream,
        *all* waiting events are processed in order.
        """
        # freeze event count so new writes don't extend the queue
        # while we are processing
        n_events = len(self._events)
        for i in range(n_events):
            event_f = self._events.popleft()
            event_f()

    def _setup_pipe_in(self):
        """setup listening pipe for IOPub from forked subprocesses"""
        ctx = self.socket.context

        # use UUID to authenticate pipe messages
        self._pipe_uuid = os.urandom(16)

        pipe_in = ctx.socket(zmq.PULL)
        pipe_in.linger = 0

        try:
            self._pipe_port = pipe_in.bind_to_random_port("tcp://127.0.0.1")
        except zmq.ZMQError as e:
            warnings.warn("Couldn't bind IOPub Pipe to 127.0.0.1: %s" % e +
                "\nsubprocess output will be unavailable."
            )
            self._pipe_flag = False
            pipe_in.close()
            return
        self._pipe_in = ZMQStream(pipe_in, self.io_loop)
        self._pipe_in.on_recv(self._handle_pipe_msg)

    def _handle_pipe_msg(self, msg):
        """handle a pipe message from a subprocess"""
        if not self._pipe_flag or not self._is_master_process():
            return
        if msg[0] != self._pipe_uuid:
            print("Bad pipe message: %s", msg, file=sys.__stderr__)
            return
        self.send_multipart(msg[1:])

    def _setup_pipe_out(self):
        # must be new context after fork
        ctx = zmq.Context()
        pipe_out = ctx.socket(zmq.PUSH)
        pipe_out.linger = 3000 # 3s timeout for pipe_out sends before discarding the message
        pipe_out.connect("tcp://127.0.0.1:%i" % self._pipe_port)
        return ctx, pipe_out

    def _is_master_process(self):
        return os.getpid() == self._master_pid

    def _check_mp_mode(self):
        """check for forks, and switch to zmq pipeline if necessary"""
        if not self._pipe_flag or self._is_master_process():
            return MASTER
        else:
            return CHILD

    def start(self):
        """Start the IOPub thread"""
        self.thread.start()
        # make sure we don't prevent process exit
        # I'm not sure why setting daemon=True above isn't enough, but it doesn't appear to be.
        atexit.register(self.stop)

    def stop(self):
        """Stop the IOPub thread"""
        if not self.thread.is_alive():
            return
        self.io_loop.add_callback(self.io_loop.stop)
        self.thread.join()
        # close *all* event pipes, created in any thread
        # event pipes can only be used from other threads while self.thread.is_alive()
        # so after thread.join, this should be safe
        for event_pipe in self._event_pipes:
            event_pipe.close()

    def close(self):
        if self.closed:
            return
        self.socket.close()
        self.socket = None

    @property
    def closed(self):
        return self.socket is None

    def schedule(self, f):
        """Schedule a function to be called in our IO thread.

        If the thread is not running, call immediately.
        """
        if self.thread.is_alive():
            self._events.append(f)
            # wake event thread (message content is ignored)
            self._event_pipe.send(b'')
        else:
            f()

    def send_multipart(self, *args, **kwargs):
        """send_multipart schedules actual zmq send in my thread.

        If my thread isn't running (e.g. forked process), send immediately.
        """
        self.schedule(lambda : self._really_send(*args, **kwargs))

    def _really_send(self, msg, *args, **kwargs):
        """The callback that actually sends messages"""
        mp_mode = self._check_mp_mode()

        if mp_mode != CHILD:
            # we are master, do a regular send
            self.socket.send_multipart(msg, *args, **kwargs)
        else:
            # we are a child, pipe to master
            # new context/socket for every pipe-out
            # since forks don't teardown politely, use ctx.term to ensure send has completed
            ctx, pipe_out = self._setup_pipe_out()
            pipe_out.send_multipart([self._pipe_uuid] + msg, *args, **kwargs)
            pipe_out.close()
            ctx.term()
Example #48
0
class StudIPSession:
    _sso_base = attr.ib()  # type: str
    _studip_base = attr.ib()  # type: str
    _http_args = attr.ib()  # type: dict
    _loop = attr.ib()  # type: asyncio.AbstractEventLoop

    def __attrs_post_init__(self):
        self._user_selected_semester = None  # type: Semester
        self._user_selected_ansicht = None  # type: str
        self._needs_reset_at = False  # type: int
        self._semester_select_lock = asyncio.Lock()
        self._background_tasks = WeakSet(
        )  # TODO better management of (failing of) background tasks
        if not self._loop:
            self._loop = asyncio.get_event_loop()

        http_args = dict(self._http_args)
        connector = aiohttp.TCPConnector(
            loop=self._loop,
            limit=http_args.pop("limit"),
            keepalive_timeout=http_args.pop("keepalive_timeout"),
            force_close=http_args.pop("force_close"))
        self.ahttp = aiohttp.ClientSession(
            connector=connector,
            loop=self._loop,
            read_timeout=http_args.pop("read_timeout"),
            conn_timeout=http_args.pop("conn_timeout"))
        if http_args:
            raise ValueError("Unknown http_args %s", http_args)

    async def close(self):
        try:
            for task in self._background_tasks:
                task.cancel()
            await self.__reset_selections(force=True)
        finally:
            if self.ahttp:
                await self.ahttp.close()

    def _sso_url(self, url):
        return self._sso_base + url

    def _studip_url(self, url):
        return self._studip_base + url

    async def do_login(self, user_name, password):
        try:
            async with self.ahttp.get(
                    self._studip_url(
                        "/studip/index.php?again=yes&sso=shib")) as r:
                post_url = parse_login_form(await r.text())
        except (ClientError, ParserError) as e:
            raise LoginError(
                "Could not initialize Shibboleth SSO login") from e

        try:
            async with self.ahttp.post(self._sso_url(post_url),
                                       data={
                                           "j_username": user_name,
                                           "j_password": password,
                                           "uApprove.consent-revocation": "",
                                           "_eventId_proceed": ""
                                       }) as r:
                form_data = parse_saml_form(await r.text())
        except (ClientError, ParserError) as e:
            raise LoginError("Shibboleth SSO login failed") from e

        try:
            async with self.ahttp.post(
                    self._studip_url("/Shibboleth.sso/SAML2/POST"),
                    data=form_data) as r:
                await r.text()
                if not r.url.path.startswith("/studip"):
                    raise LoginError(
                        "Invalid redirect after Shibboleth SSO login to %s" %
                        r.url)
        except ClientError as e:
            raise LoginError("Could not complete Shibboleth SSO login") from e

    async def get_semesters(self) -> List[Semester]:
        async with self.ahttp.get(
                self._studip_url("/studip/dispatch.php/my_courses")) as r:
            selected_semester, selected_ansicht = parse_user_selection(
                await r.text())
            self._user_selected_semester = self._user_selected_semester or selected_semester
            self._user_selected_ansicht = self._user_selected_ansicht or selected_ansicht
            log.debug("User selected semester %s in ansicht %s",
                      self._user_selected_semester,
                      self._user_selected_ansicht)
            return list(parse_semester_list(await r.text()))

    async def get_courses(self, semester: Semester) -> List[Course]:
        if not self._user_selected_semester or not self._user_selected_ansicht:
            await self.get_semesters()
            assert self._user_selected_semester and self._user_selected_ansicht

        async with self._semester_select_lock:
            change_ansicht = self._user_selected_ansicht != "sem_number"
            if change_ansicht:
                await self.__select_ansicht("sem_number")

            change_semester = self._user_selected_semester != semester.id
            if change_semester or change_ansicht:
                self._needs_reset_at = self._loop.time() + 9
                self._background_tasks.add(
                    self._loop.call_later(
                        10, lambda: asyncio.ensure_future(
                            self.__reset_selections(quiet=True))))

            courses = list(
                parse_course_list(await self.__select_semester(semester.id),
                                  semester))
            return courses

    async def __select_semester(self, semester):
        semester = semester or "current"
        async with self.ahttp.post(self._studip_url(
                "/studip/dispatch.php/my_courses/set_semester"),
                                   data={"sem_select": semester}) as r:
            selected_semester, selected_ansicht = parse_user_selection(
                await r.text())
            assert selected_semester == semester, "Tried to select semester %s, but Stud.IP delivered semester %s" % \
                                                  (semester, selected_semester)
            return await r.text()

    async def __select_ansicht(self, ansicht):
        ansicht = ansicht or "sem_number"
        async with self.ahttp.post(self._studip_url(
                "/studip/dispatch.php/my_courses/store_groups"),
                                   data={"select_group_field": ansicht}) as r:
            selected_semester, selected_ansicht = parse_user_selection(
                await r.text())
            assert selected_ansicht == ansicht, "Tried to select ansicht %s, but Stud.IP delivered ansicht %s" % \
                                                (ansicht, selected_ansicht)
            return await r.text()

    async def __reset_selections(self, force=False, quiet=False):
        try:
            async with self._semester_select_lock:
                if not self.ahttp or self.ahttp.closed:
                    return
                if not force and (not self._needs_reset_at
                                  or self._needs_reset_at > self._loop.time()):
                    return

                if self._user_selected_semester:
                    await self.__select_semester(self._user_selected_semester)
                if self._user_selected_ansicht:
                    await self.__select_ansicht(self._user_selected_ansicht)

                self._needs_reset_at = False
        except:
            if quiet:
                log.warning("Could not reset semester selection",
                            exc_info=True)
            else:
                raise

    async def get_course_files(self, course: Course) -> Folder:
        async with self.ahttp.get(
                self._studip_url(
                    "/studip/dispatch.php/course/files/index?cid=" +
                    course.id)) as r:
            return parse_file_list_index(await r.text(), course, None)

    async def get_folder_files(self, folder: Folder) -> Folder:
        async with self.ahttp.get(
                self._studip_url(
                    "/studip/dispatch.php/course/files/index/%s?cid=%s" %
                    (folder.id, folder.course.id))) as r:
            return parse_file_list_index(await r.text(), folder.course, folder)

    async def get_file_info(self, file: File) -> File:
        async with self.ahttp.get(
                self._studip_url(
                    "/studip/dispatch.php/file/details/%s?cid=%s" %
                    (file.id, file.course.id))) as r:
            return parse_file_details(await r.text(), file)

    async def download_file_contents(self,
                                     studip_file: File,
                                     local_dest: str = None,
                                     chunk_size: int = 1024 * 256) -> Download:
        log.info("Starting download %s -> %s", studip_file, local_dest)
        download = Download(self.ahttp, self._get_download_url(studip_file),
                            local_dest, chunk_size)
        await download.start()
        old_completed_future = download.completed

        async def await_completed():
            ranges = await old_completed_future
            log.info("Completed download %s -> %s", studip_file, local_dest)

            val = 0
            for r in ranges:
                assert r.start <= val
                val = r.stop
            assert val == download.total_length

            if studip_file.changed:
                timestamp = time.mktime(studip_file.changed.timetuple())
                await self._loop.run_in_executor(None, os.utime, local_dest,
                                                 (timestamp, timestamp))
            else:
                log.warning(
                    "Can't set timestamp of file %s :: %s, because the value wasn't loaded from Stud.IP",
                    studip_file, local_dest)

            return ranges

        download.completed = asyncio.ensure_future(await_completed())
        return download

    def _get_download_url(self, studip_file):
        return self._studip_url(
            "/studip/sendfile.php?force_download=1&type=0&" +
            urlencode({
                "file_id": studip_file.id,
                "file_name": studip_file.name
            }))
Example #49
0
class Environment:
    """A chain of scopes that tracks its elements.

    A value "is an element of" an :class:`Environment` when it is indirectly
    referenced by it (i.e., when the value cannot be garbage collected until the
    :class:`Environment` is). In order for :class:`Environments <Environment>`
    to keep track of their elements, they must be manually notified of any new
    values that may enter them. Therefore, **any** code that allocates a
    :class:`~parthial.vals.LispVal` **must** immediately :meth:`add <new>` it to
    any :class:`Environments <Environment>` that it may become an element of.

    Attributes:
        scopes (list of dict-likes): My chain of scopes. Earlier scopes are
            deeper.

    Args:
        globals (dict-like, optional): My global scope.
        max_things (int, optional): The maximum number of elements that I may
            contain, after which no more may be added.
    """
    def __init__(self, globals={}, max_things=5000):
        self.globals = globals
        self.scopes = ChainMap()
        self.max_things = max_things
        self.things = WeakSet()

    @contextmanager
    def scopes_as(self, new_scopes):
        """Replace my :attr:`scopes` for the duration of the with block.

        My global scope is not replaced.

        Args:
            new_scopes (list of dict-likes): The new :attr:`scopes` to use.
        """
        old_scopes, self.scopes = self.scopes, new_scopes
        yield
        self.scopes = old_scopes

    @contextmanager
    def new_scope(self, new_scope={}):
        """Add a new innermost scope for the duration of the with block.

        Args:
            new_scope (dict-like): The scope to add.
        """
        old_scopes, self.scopes = self.scopes, self.scopes.new_child(new_scope)
        yield
        self.scopes = old_scopes

    def new(self, val):
        """Add a new value to me.

        Args:
            val (LispVal): The value to be added.

        Returns:
            LispVal: The added value.

        Raises:
            ~parthial.errs.LimitationError: If I already contain the maximum
                number of elements.
        """
        if len(self.things) >= self.max_things:
            raise LimitationError('too many things')
        self.things.add(val)
        return val

    def rec_new(self, val):
        """Recursively add a new value and its children to me.

        Args:
            val (LispVal): The value to be added.

        Returns:
            LispVal: The added value.
        """
        if val not in self.things:
            for child in val.children():
                self.rec_new(child)
            self.new(val)
        return val

    def add_rec_new(self, k, val):
        """Recursively add a new value and its children to me, and assign a
        variable to it.

        Args:
            k (str): The name of the variable to assign.
            val (LispVal): The value to be added and assigned.

        Returns:
            LispVal: The added value.
        """
        self.rec_new(val)
        self[k] = val
        return val

    def new_child(self):
        """Get a new child :class:`Environment`.

        The child's scopes will be mine, with an additional empty innermost
        one.

        Returns:
            Environment: The child.
        """
        child = Environment(self.globals, self.max_things)
        child.scopes = self.scopes.new_child()
        child.things = WeakSet(self.things)
        return child

    def __getitem__(self, k):
        """Look up a variable.

        Args:
            k (str): The name of the variable to look up.

        Returns:
            LispVal: The value assigned to the variable.

        Raises:
            KeyError: If the variable has not been assigned to.
        """
        chain = ChainMap(self.scopes, self.globals)
        return chain.__getitem__(k)

    def __setitem__(self, k, val):
        """Assign to a variable.

        This will only mutate my innermost scope.

        This does **not** :meth:`add <new>` anything to me.

        Args:
            k (str): The name of the variable to assign to.
            val (LispVal): The value to assign to the variable.
        """
        self.scopes.__setitem__(k, val)

    def __delitem__(self, k):
        """Clear a variable.

        This will only mutate the innermost scope.

        Args:
            k (str): The name of the variable to clear.

        Raises:
            KeyError: If the variable has not been assigned to.
        """
        return self.scopes.__delitem__(k)

    def __contains__(self, k):
        """Check whether a variable has been assigned to.

        This is **not** the same kind of element-of as described in the
        class documentation.

        Args:
            k (str): The name of the variable to check.

        Returns:
            bool: Whether or not the variable has been assigned to.
        """
        chain = ChainMap(self.scopes, self.globals)
        return chain.__contains__(k)
Example #50
0
class Dispatcher (EventMixin):
  #TODO: Factor out the dispatcher part
  _eventMixin_events = set([NewSession, EndSession, Query])

  secret = None

  max_waiting = 75

  _websocket_enabled = False

  def __init__ (self, websocket_path, websocket_session_type):
    self.exposed_objects = {}
    self.exposed_functions = {}
    self.websocket_path = websocket_path
    self.websocket_session_type = websocket_session_type
    self.sessions = WeakSet()
    if websocket_path:
      self.websocket_enabled = True

    #self.exposed_functions['_query'] = self._query
    #self.exposed_functions['_return_dispatch'] = self._return_dispatch
    #self.exposed_functions['_error_dispatch'] = self._error_dispatch

    self._waiting_returns = OrderedDict()
    self._waiting_errors = OrderedDict()
    self._waiting_id = 1

  def _pdmethod__new_uuid (self, ctxt):
    ctxt.ret(str(uuid.uuid1()))

  def _pdmethod__query (self, ctxt, *args, **kw):
    self.raiseEvent(Query, args, kw)

  def _pdmethod__return_dispatch (self, ctxt, identifier, data):
    f = self._waiting_returns.pop(identifier, None)
    if f is None:
      self.error("Didn't have waiting return event %s", identifier)
      return
    f(data)

  def _pdmethod__error_dispatch (self, ctxt, identifier, data):
    f = self._waiting_errors.pop(identifier, None)
    if f is None:
      self.error("Didn't have waiting error event %s", identifier)
      return
    f(data)

  def _create_waiting (self, d, f):
    while len(d) > self.max_waiting:
      d.popitem(last=False)
    identifier = self._waiting_id
    self._waiting_id += 1
    d[identifier] = f
    return identifier

  def _create_waiting_return (self, f):
    return self._create_waiting(self._waiting_returns, f)

  def _create_waiting_error (self, f):
    return self._create_waiting(self._waiting_errors, f)

  def _register_session (self, session):
    self.sessions.add(session)

  @property
  def websocket_enabled (self):
    return self._websocket_enabled

  @websocket_enabled.setter
  def websocket_enabled (self, value):
    if self._websocket_enabled is value: return
    if value is False: return # Currently don't support disabling it
    self._websocket_enabled = True
    core.WebServer.set_handler(self.websocket_path,
                               self.websocket_session_type,
                               args=self)
Example #51
0
class TestWeakSet(unittest.TestCase):
    def setUp(self):
        # need to keep references to them
        self.items = [ustr(c) for c in ('a', 'b', 'c')]
        self.items2 = [ustr(c) for c in ('x', 'y', 'z')]
        self.letters = [ustr(c) for c in string.ascii_letters]
        self.s = WeakSet(self.items)
        self.d = dict.fromkeys(self.items)
        self.obj = ustr('F')
        self.fs = WeakSet([self.obj])

    def test_methods(self):
        weaksetmethods = dir(WeakSet)
        for method in dir(set):
            if method == 'test_c_api' or method.startswith('_'):
                continue
            self.assertTrue(method in weaksetmethods,
                            "WeakSet missing method " + method)

    def test_new_or_init(self):
        self.assertRaises(TypeError, WeakSet, [], 2)

    def test_len(self):
        self.assertEqual(len(self.s), len(self.d))
        self.assertEqual(len(self.fs), 1)
        del self.obj
        self.assertEqual(len(self.fs), 0)

    def test_contains(self):
        for c in self.letters:
            self.assertEqual(c in self.s, c in self.d)
        self.assertRaises(TypeError, self.s.__contains__, [[]])
        self.assertTrue(self.obj in self.fs)
        del self.obj
        self.assertTrue(ustr('F') not in self.fs)

    def test_union(self):
        u = self.s.union(self.items2)
        for c in self.letters:
            self.assertEqual(c in u, c in self.d or c in self.items2)
        self.assertEqual(self.s, WeakSet(self.items))
        self.assertEqual(type(u), WeakSet)
        self.assertRaises(TypeError, self.s.union, [[]])
        for C in set, frozenset, dict.fromkeys, list, tuple:
            x = WeakSet(self.items + self.items2)
            c = C(self.items2)
            self.assertEqual(self.s.union(c), x)

    def test_or(self):
        i = self.s.union(self.items2)
        self.assertEqual(self.s | set(self.items2), i)
        self.assertEqual(self.s | frozenset(self.items2), i)

    def test_intersection(self):
        i = self.s.intersection(self.items2)
        for c in self.letters:
            self.assertEqual(c in i, c in self.d and c in self.items2)
        self.assertEqual(self.s, WeakSet(self.items))
        self.assertEqual(type(i), WeakSet)
        for C in set, frozenset, dict.fromkeys, list, tuple:
            x = WeakSet([])
            self.assertEqual(self.s.intersection(C(self.items2)), x)

    def test_isdisjoint(self):
        self.assertTrue(self.s.isdisjoint(WeakSet(self.items2)))
        self.assertTrue(not self.s.isdisjoint(WeakSet(self.letters)))

    def test_and(self):
        i = self.s.intersection(self.items2)
        self.assertEqual(self.s & set(self.items2), i)
        self.assertEqual(self.s & frozenset(self.items2), i)

    def test_difference(self):
        i = self.s.difference(self.items2)
        for c in self.letters:
            self.assertEqual(c in i, c in self.d and c not in self.items2)
        self.assertEqual(self.s, WeakSet(self.items))
        self.assertEqual(type(i), WeakSet)
        self.assertRaises(TypeError, self.s.difference, [[]])

    def test_sub(self):
        i = self.s.difference(self.items2)
        self.assertEqual(self.s - set(self.items2), i)
        self.assertEqual(self.s - frozenset(self.items2), i)

    def test_symmetric_difference(self):
        i = self.s.symmetric_difference(self.items2)
        for c in self.letters:
            self.assertEqual(c in i, (c in self.d) ^ (c in self.items2))
        self.assertEqual(self.s, WeakSet(self.items))
        self.assertEqual(type(i), WeakSet)
        self.assertRaises(TypeError, self.s.symmetric_difference, [[]])

    def test_xor(self):
        i = self.s.symmetric_difference(self.items2)
        self.assertEqual(self.s ^ set(self.items2), i)
        self.assertEqual(self.s ^ frozenset(self.items2), i)

    def test_sub_and_super(self):
        pl, ql, rl = map(lambda s: [ustr(c) for c in s],
                         ['ab', 'abcde', 'def'])
        p, q, r = map(WeakSet, (pl, ql, rl))
        self.assertTrue(p < q)
        self.assertTrue(p <= q)
        self.assertTrue(q <= q)
        self.assertTrue(q > p)
        self.assertTrue(q >= p)
        self.assertFalse(q < r)
        self.assertFalse(q <= r)
        self.assertFalse(q > r)
        self.assertFalse(q >= r)
        self.assertTrue(set('a').issubset('abc'))
        self.assertTrue(set('abc').issuperset('a'))
        self.assertFalse(set('a').issubset('cbs'))
        self.assertFalse(set('cbs').issuperset('a'))

    def test_gc(self):
        # Create a nest of cycles to exercise overall ref count check
        s = WeakSet(Foo() for i in range(1000))
        for elem in s:
            elem.cycle = s
            elem.sub = elem
            elem.set = WeakSet([elem])

    def test_subclass_with_custom_hash(self):
        # Bug #1257731
        class H(WeakSet):
            def __hash__(self):
                return int(id(self) & 0x7fffffff)

        s = H()
        f = set()
        f.add(s)
        self.assertTrue(s in f)
        f.remove(s)
        f.add(s)
        f.discard(s)

    def test_init(self):
        s = WeakSet()
        s.__init__(self.items)
        self.assertEqual(s, self.s)
        s.__init__(self.items2)
        self.assertEqual(s, WeakSet(self.items2))
        self.assertRaises(TypeError, s.__init__, s, 2)
        self.assertRaises(TypeError, s.__init__, 1)

    def test_constructor_identity(self):
        s = WeakSet(self.items)
        t = WeakSet(s)
        self.assertNotEqual(id(s), id(t))

    def test_hash(self):
        self.assertRaises(TypeError, hash, self.s)

    def test_clear(self):
        self.s.clear()
        self.assertEqual(self.s, WeakSet([]))
        self.assertEqual(len(self.s), 0)

    def test_copy(self):
        dup = self.s.copy()
        self.assertEqual(self.s, dup)
        self.assertNotEqual(id(self.s), id(dup))

    def test_add(self):
        x = ustr('Q')
        self.s.add(x)
        self.assertTrue(x in self.s)
        dup = self.s.copy()
        self.s.add(x)
        self.assertEqual(self.s, dup)
        self.assertRaises(TypeError, self.s.add, [])
        self.fs.add(Foo())
        self.assertTrue(len(self.fs) == 1)
        self.fs.add(self.obj)
        self.assertTrue(len(self.fs) == 1)

    def test_remove(self):
        x = ustr('a')
        self.s.remove(x)
        self.assertTrue(x not in self.s)
        self.assertRaises(KeyError, self.s.remove, x)
        self.assertRaises(TypeError, self.s.remove, [])

    def test_discard(self):
        a, q = ustr('a'), ustr('Q')
        self.s.discard(a)
        self.assertTrue(a not in self.s)
        self.s.discard(q)
        self.assertRaises(TypeError, self.s.discard, [])

    def test_pop(self):
        for i in range(len(self.s)):
            elem = self.s.pop()
            self.assertTrue(elem not in self.s)
        self.assertRaises(KeyError, self.s.pop)

    def test_update(self):
        retval = self.s.update(self.items2)
        self.assertEqual(retval, None)
        for c in (self.items + self.items2):
            self.assertTrue(c in self.s)
        self.assertRaises(TypeError, self.s.update, [[]])

    def test_update_set(self):
        self.s.update(set(self.items2))
        for c in (self.items + self.items2):
            self.assertTrue(c in self.s)

    def test_ior(self):
        self.s |= set(self.items2)
        for c in (self.items + self.items2):
            self.assertTrue(c in self.s)

    def test_intersection_update(self):
        retval = self.s.intersection_update(self.items2)
        self.assertEqual(retval, None)
        for c in (self.items + self.items2):
            if c in self.items2 and c in self.items:
                self.assertTrue(c in self.s)
            else:
                self.assertTrue(c not in self.s)
        self.assertRaises(TypeError, self.s.intersection_update, [[]])

    def test_iand(self):
        self.s &= set(self.items2)
        for c in (self.items + self.items2):
            if c in self.items2 and c in self.items:
                self.assertTrue(c in self.s)
            else:
                self.assertTrue(c not in self.s)

    def test_difference_update(self):
        retval = self.s.difference_update(self.items2)
        self.assertEqual(retval, None)
        for c in (self.items + self.items2):
            if c in self.items and c not in self.items2:
                self.assertTrue(c in self.s)
            else:
                self.assertTrue(c not in self.s)
        self.assertRaises(TypeError, self.s.difference_update, [[]])
        self.assertRaises(TypeError, self.s.symmetric_difference_update, [[]])

    def test_isub(self):
        self.s -= set(self.items2)
        for c in (self.items + self.items2):
            if c in self.items and c not in self.items2:
                self.assertTrue(c in self.s)
            else:
                self.assertTrue(c not in self.s)

    def test_symmetric_difference_update(self):
        retval = self.s.symmetric_difference_update(self.items2)
        self.assertEqual(retval, None)
        for c in (self.items + self.items2):
            if (c in self.items) ^ (c in self.items2):
                self.assertTrue(c in self.s)
            else:
                self.assertTrue(c not in self.s)
        self.assertRaises(TypeError, self.s.symmetric_difference_update, [[]])

    def test_ixor(self):
        self.s ^= set(self.items2)
        for c in (self.items + self.items2):
            if (c in self.items) ^ (c in self.items2):
                self.assertTrue(c in self.s)
            else:
                self.assertTrue(c not in self.s)

    def test_inplace_on_self(self):
        t = self.s.copy()
        t |= t
        self.assertEqual(t, self.s)
        t &= t
        self.assertEqual(t, self.s)
        t -= t
        self.assertEqual(t, WeakSet())
        t = self.s.copy()
        t ^= t
        self.assertEqual(t, WeakSet())

    def test_eq(self):
        # issue 5964
        self.assertTrue(self.s == self.s)
        self.assertTrue(self.s == WeakSet(self.items))
        self.assertFalse(self.s == set(self.items))
        self.assertFalse(self.s == list(self.items))
        self.assertFalse(self.s == tuple(self.items))
        self.assertFalse(self.s == WeakSet([Foo]))
        self.assertFalse(self.s == 1)
Example #52
0
class binhoDevice(binhoAPI):
    """
    Class describing Binho host adapters.
    """

    # The mappings from GPIO names to port numbers. Paths in names can be delineated
    # with underscores to group gpios. For example, if Jumper 7, Pin 3 is Port 5, Pin 11,
    # you could add an entry that reads "J7_P3": (5, 11).
    SIMPLE_CLASS_MAPPINGS = {"gpio": ("gpio", GPIO)}
    leds = []

    def __init__(self, *args, **kwargs):
        """ Initialize a new binhoDevice instance with our additional properties. """

        # Create a new list of interfaces and programmers.
        self._interfaces = []
        self._instantiated_programmers = WeakSet()

        super().__init__(*args, **kwargs)

    def available_interfaces(self):
        """ Returns a list of peripheral properties that exist on this board. """
        return self._interfaces[:]

    def _populate_leds(self, led_count):
        """Adds the standard set of LEDs to the board object.
        Args:
            led_count -- The number of LEDS present on the board.
        """

        self._add_interface("leds", {})

        for i in range(1, led_count + 1):
            self.leds[i] = LED(self, i)

    @staticmethod
    def _populate_gpio(gpio, mapping):
        """ Adds GPIO pin definitions to the board's main GPIO object. """

        # Handle each GPIO mapping.
        for name, pin in mapping.items():
            gpio.registerGPIO(name, pin)

    @staticmethod
    def _populate_adc(adc, mapping):
        """Adds ADC definitions to the board."""

        # Handle each ADC mapping.
        for name, pin in mapping.items():
            adc.registerADC(name, pin)

    @staticmethod
    def _populate_dac(dac, mapping):
        """Adds DAC definitions to the board."""

        # Handle each ADC mapping.
        for name, pin in mapping.items():
            dac.registerDAC(name, pin)

    def _add_interface(self, name, instance):
        """
        Adds a peripheral to the Binho host adapter object. Prefer this over adding attributes directly,
        as it adds peripherals to a list that can be queried by the user.
        Arguments:
            name -- The name of the attribute to add to this board. "i2c" would create a
                .i2c property on this board.
            instance -- The object to add as that property.
        """

        self._interfaces.append(name)
        setattr(self, name, instance)

    def _add_simple_interface(self, name, cls, *args, **kwargs):
        """Adds a given interface to this board.
        Arguments:
            name -- The attribute name to be added to the board.
            cls -- The class to be instantiated to create the given object.
        """

        # Create an instance of the relevant peripheral class...
        instance = cls(self, *args, **kwargs)

        # ... and add it to this board.
        self._add_interface(name, instance)

    def _populate_simple_interfaces(self):
        """ Adds simple interfaces to the board object by parsing the SIMPLE_CLASS_MAPPINGS dictionary. """

        # pylint: disable=unused-variable
        for comms_class, interface in self.SIMPLE_CLASS_MAPPINGS.items():

            name, python_class = interface
            self._add_simple_interface(name, python_class)
        # pylint: enable=unused-variable

    @classmethod
    def available_accessories(cls):
        """ Returns the list of available accessory drivers. """
        return binhoAccessory.available_accessories()

    def attach_accessory(self, name, *args, **kwargs):
        """ Returns the list of available accessory drivers. """

        # Create a new object for the given neighbor...
        accessory = binhoAccessory.from_name(name, self, *args, **kwargs)

        # TODO: register this and add it to a .accessory object?

        return accessory

    @classmethod
    def available_programmers(cls, as_dictionary=False):
        """ Returns the list of available programmers. """

        programmers = {}

        for module in ProgrammerModules.__dict__.values():
            if isinstance(module, ModuleType) and hasattr(
                    module, "create_programmer"):
                module_name = module.__name__.split(".")[-1]
                programmers[module_name] = module

        if as_dictionary:
            return programmers

        return list(programmers.values())

    def create_programmer(self, name, *args, **kwargs):
        """ Creates a new instance of the programmer with the given name. """

        try:
            programmer_module = self.available_programmers(True)[name]
            programmer = programmer_module.create_programmer(
                self, *args, **kwargs)

            # Keep a weak reference to the relevant programmer.
            # This is useful for re-attaching programmers after a disconnect.
            self._instantiated_programmers.add(programmer)

            # Finally, return the created programmer.
            return programmer

        except KeyError as e:
            raise DriverCapabilityError(
                "no available programmer named {}".format(name)) from e

    def __dir__(self):
        """ Generate a cleaned-up dir listing for the relevant board. """

        items = super().__dir__()
        return [item for item in items if item[0] in string.ascii_lowercase]
Example #53
0
class Events(object):
    """
    This class implements the :attr:`~picraft.world.World.events` attribute.

    There are two ways of responding to picraft's events: the first is to
    :meth:`poll` for them manually, and process each event in the resulting
    list::

        >>> for event in world.events.poll():
        ...     print(repr(event))
        ...
        <BlockHitEvent pos=1,1,1 face="y+" player=1>,
        <PlayerPosEvent old_pos=0.2,1.0,0.7 new_pos=0.3,1.0,0.7 player=1>

    The second is to "tag" functions as event handlers with the decorators
    provided and then call the :meth:`main_loop` function which will handle
    polling the server for you, and call all the relevant functions as needed::

        @world.events.on_block_hit(pos=Vector(1,1,1))
        def hit_block(event):
            print('You hit the block at %s' % event.pos)

        world.events.main_loop()

    By default, only block hit events will be tracked. This is because it is
    the only type of event that the Minecraft server provides information about
    itself, and thus the only type of event that can be processed relatively
    efficiently. If you wish to track player positions, assign a set of player
    ids to the :attr:`track_players` attribute. If you wish to include idle
    events (which fire when nothing else is produced in response to
    :meth:`poll`) then set :attr:`include_idle` to ``True``.

    .. note::

        If you are using a Raspberry Juice server, chat post events are also
        tracked by default. Chat post events are only supported with Raspberry
        Juice servers; Minecraft Pi edition doesn't support chat post events.

    Finally, the :attr:`poll_gap` attribute specifies how long to pause during
    each iteration of :meth:`main_loop` to permit event handlers some time to
    interact with the server. Setting this to 0 will provide the fastest
    response to events, but will result in event handlers having to fight with
    event polling for access to the server.
    """

    def __init__(self, connection, poll_gap=0.1, include_idle=False):
        self._connection = connection
        self._handlers = []
        self._handler_instances = WeakSet()
        self._poll_gap = poll_gap
        self._include_idle = include_idle
        self._track_players = {}

    def _get_poll_gap(self):
        return self._poll_gap
    def _set_poll_gap(self, value):
        self._poll_gap = float(value)
    poll_gap = property(_get_poll_gap, _set_poll_gap, doc="""\
        The length of time (in seconds) to pause during :meth:`main_loop`.

        This property specifies the length of time to wait at the end of each
        iteration of :meth:`main_loop`. By default this is 0.1 seconds.

        The purpose of the pause is to give event handlers executing in the
        background time to communicate with the Minecraft server. Setting this
        to 0.0 will result in faster response to events, but also starves
        threaded event handlers of time to communicate with the server,
        resulting in "choppy" performance.
        """)

    def _get_track_players(self):
        return self._track_players.keys()
    def _set_track_players(self, value):
        try:
            self._track_players = {
                pid: Player(self._connection, pid).pos.round(1)
                for pid in value
                }
        except TypeError:
            if not isinstance(value, int):
                raise ValueError(
                        'track_players value must be a player id '
                        'or a sequence of player ids')
            self._track_players = {
                value: Player(self._connection, value).pos.round(1)
                }
        if self._connection.server_version != 'raspberry-juice':
            # Filter out calculated directions for untracked players
            self._connection._directions = {
                pid: delta
                for (pid, delta) in self._connection._directions.items()
                if pid in self._track_players
                }
    track_players = property(_get_track_players, _set_track_players, doc="""\
        The set of player ids for which movement should be tracked.

        By default the :meth:`poll` method will not produce player position
        events (:class:`PlayerPosEvent`). Producing these events requires extra
        interactions with the Minecraft server (one for each player tracked)
        which slow down response to block hit events.

        If you wish to track player positions, set this attribute to the set of
        player ids you wish to track and their positions will be stored.  The
        next time :meth:`poll` is called it will query the positions for all
        specified players and fire player position events if they have changed.

        Given that the :attr:`~picraft.world.World.players` attribute
        represents a dictionary mapping player ids to players, if you wish to
        track all players you can simply do::

            >>> world.events.track_players = world.players
        """)

    def _get_include_idle(self):
        return self._include_idle
    def _set_include_idle(self, value):
        self._include_idle = bool(value)
    include_idle = property(_get_include_idle, _set_include_idle, doc="""\
        If ``True``, generate an idle event when no other events would be
        generated by :meth:`poll`. This attribute defaults to ``False``.
        """)

    def clear(self):
        """
        Forget all pending events that have not yet been retrieved with
        :meth:`poll`.

        This method is used to clear the list of events that have occurred
        since the last call to :meth:`poll` without retrieving them. This is
        useful for ensuring that events subsequently retrieved definitely
        occurred *after* the call to :meth:`clear`.
        """
        self._set_track_players(self._get_track_players())
        self._connection.send('events.clear()')

    def poll(self):
        """
        Return a list of all events that have occurred since the last call to
        :meth:`poll`.

        For example::

            >>> w = World()
            >>> w.events.track_players = w.players
            >>> w.events.include_idle = True
            >>> w.events.poll()
            [<PlayerPosEvent old_pos=0.2,1.0,0.7 new_pos=0.3,1.0,0.7 player=1>,
             <BlockHitEvent pos=1,1,1 face="x+" player=1>,
             <BlockHitEvent pos=1,1,1 face="x+" player=1>]
            >>> w.events.poll()
            [<IdleEvent>]
        """
        def player_pos_events(positions):
            for pid, old_pos in positions.items():
                player = Player(self._connection, pid)
                new_pos = player.pos.round(1)
                if old_pos != new_pos:
                    if self._connection.server_version != 'raspberry-juice':
                        # Calculate directions for tracked players on platforms
                        # which don't provide it natively
                        self._connection._directions[pid] = new_pos - old_pos
                    yield PlayerPosEvent(old_pos, new_pos, player)
                positions[pid] = new_pos

        def block_hit_events():
            s = self._connection.transact('events.block.hits()')
            if s:
                for e in s.split('|'):
                    yield BlockHitEvent.from_string(self._connection, e)

        def chat_post_events():
            if self._connection.server_version == 'raspberry-juice':
                s = self._connection.transact('events.chat.posts()')
                if s:
                    for e in s.split('|'):
                        yield ChatPostEvent.from_string(self._connection, e)

        events = list(player_pos_events(self._track_players)) + list(block_hit_events()) + list(chat_post_events())

        if events:
            return events
        elif self._include_idle:
            return [IdleEvent()]
        else:
            return []

    def main_loop(self):
        """
        Starts the event polling loop when using the decorator style of event
        handling (see :meth:`on_block_hit`).

        This method will not return, so be sure that you have specified all
        your event handlers before calling it. The event loop can only be
        broken by an unhandled exception, or by closing the world's connection
        (in the latter case the resulting :exc:`~picraft.exc.ConnectionClosed`
        exception will be suppressed as it is assumed that you want to end the
        script cleanly).
        """
        logger.info('Entering event loop')
        try:
            while True:
                self.process()
                time.sleep(self.poll_gap)
        except ConnectionClosed:
            logger.info('Connection closed; exiting event loop')

    def process(self):
        """
        Poll the server for events and call any relevant event handlers
        registered with :meth:`on_block_hit`.

        This method is called repeatedly the event handler loop implemented by
        :meth:`main_loop`; developers should only call this method when
        implementing their own event loop manually, or when their (presumably
        non-threaded) event handler is engaged in a long operation and they
        wish to permit events to be processed in the meantime.
        """
        for event in self.poll():
            for handler in self._handlers:
                if handler.matches(event):
                    handler.execute(event)

    def has_handlers(self, cls):
        """
        Decorator for registering a class as containing picraft event handlers.

        If you are writing a class which contains methods that you wish to
        use as event handlers for picraft events, you must decorate the class
        with ``@has_handlers``. This will ensure that picraft tracks instances
        of the class and dispatches events to each instance that exists when
        the event occurs.

        For example::

            from picraft import World, Block, Vector, X, Y, Z

            world = World()

            @world.events.has_handlers
            class HitMe(object):
                def __init__(self, pos):
                    self.pos = pos
                    self.been_hit = False
                    world.blocks[self.pos] = Block('diamond_block')

                @world.events.on_block_hit()
                def was_i_hit(self, event):
                    if event.pos == self.pos:
                        self.been_hit = True
                        print('Block at %s was hit' % str(self.pos))

            p = world.player.tile_pos
            block1 = HitMe(p + 2*X)
            block2 = HitMe(p + 2*Z)
            world.events.main_loop()

        Class-based handlers are an advanced feature and have some notable
        limitations. For instance, in the example above the ``on_block_hit``
        handler couldn't be declared with the block's position because this was
        only known at instance creation time, not at class creation time (which
        was when the handler was registered).

        Furthermore, class-based handlers must be regular instance methods
        (those which accept the instance, self, as the first argument); they
        cannot be class methods or static methods.

        .. note::

            The ``@has_handlers`` decorator takes no arguments and shouldn't
            be called, unlike event handler decorators.
        """
        # Search the class for handler methods, appending the class to the
        # handler's list of associated classes (if you're thinking why is this
        # a collection, consider that a method can be associated with multiple
        # classes either by inheritance or direct assignment)
        handlers_found = 0
        for item in dir(cls):
            item = getattr(cls, item, None)
            if item: # PY2
                item = getattr(item, 'im_func', item)
            if item and isinstance(item, FunctionType):
                try:
                    item._picraft_classes.add(cls)
                    handlers_found += 1
                except AttributeError:
                    pass
        if not handlers_found:
            warnings.warn(NoHandlersWarning('no handlers found in %s' % cls))
            return cls
        # Replace __init__ on the class with a closure that adds every instance
        # constructed to self._handler_instances. As this is a WeakSet,
        # instances that die will be implicitly removed
        old_init = getattr(cls, '__init__', None)
        def __init__(this, *args, **kwargs):
            if old_init:
                old_init(this, *args, **kwargs)
            self._handler_instances.add(this)
        if old_init:
            update_wrapper(__init__, old_init)
        cls.__init__ = __init__
        return cls

    def _handler_closure(self, f):
        def handler(event):
            if not f._picraft_classes:
                # The handler is a straight-forward function; just call it
                f(event)
            else:
                # The handler is an unbound method (yes, I know these don't
                # really exist in Python 3; it's a function which is expecting
                # to be called from an object instance if you like). Here we
                # search the set of instances of classes which were registered
                # as having handlers (by @has_handlers)
                for cls in f._picraft_classes:
                    for inst in self._handler_instances:
                        # Check whether the instance has the right class; note
                        # that we *don't* use isinstance() here as we want an
                        # exact match
                        if inst.__class__ == cls:
                            # Bind the function to the instance via its
                            # descriptor
                            f.__get__(inst, cls)(event)
        update_wrapper(handler, f)
        return handler

    def on_idle(self, thread=False, multi=True):
        """
        Decorator for registering a function/method as an idle handler.

        This decorator is used to mark a function as an event handler which
        will be called when no other event handlers have been called in an
        iteration of :meth:`main_loop`. The function will be called with the
        corresponding :class:`IdleEvent` as the only argument.

        Note that idle events will only be generated if :attr:`include_idle`
        is set to ``True``.
        """
        def decorator(f):
            self._handlers.append(
                    IdleHandler(self._handler_closure(f), thread, multi))
            f._picraft_classes = set()
            return f
        return decorator

    def on_player_pos(self, thread=False, multi=True, old_pos=None, new_pos=None):
        """
        Decorator for registering a function/method as a position change
        handler.

        This decorator is used to mark a function as an event handler which
        will be called for any events indicating that a player's position has
        changed while :meth:`main_loop` is executing. The function will be
        called with the corresponding :class:`PlayerPosEvent` as the only
        argument.

        The *old_pos* and *new_pos* parameters can be used to specify vectors
        or sequences of vectors (including a
        :class:`~picraft.vector.vector_range`) that the player position events
        must match in order to activate the associated handler. For example, to
        fire a handler every time any player enters or walks over blocks within
        (-10, 0, -10) to (10, 0, 10)::

            from picraft import World, Vector, vector_range

            world = World()
            world.events.track_players = world.players

            from_pos = Vector(-10, 0, -10)
            to_pos = Vector(10, 0, 10)
            @world.events.on_player_pos(new_pos=vector_range(from_pos, to_pos + 1))
            def in_box(event):
                world.say('Player %d stepped in the box' % event.player.player_id)

            world.events.main_loop()

        Various effects can be achieved by combining *old_pos* and *new_pos*
        filters. For example, one could detect when a player crosses a boundary
        in a particular direction, or decide when a player enters or leaves a
        particular area.

        Note that only players specified in :attr:`track_players` will generate
        player position events.
        """
        def decorator(f):
            self._handlers.append(
                    PlayerPosHandler(self._handler_closure(f),
                        thread, multi, old_pos, new_pos))
            f._picraft_classes = set()
            return f
        return decorator

    def on_block_hit(self, thread=False, multi=True, pos=None, face=None):
        """
        Decorator for registering a function/method as a block hit handler.

        This decorator is used to mark a function as an event handler which
        will be called for any events indicating a block has been hit while
        :meth:`main_loop` is executing. The function will be called with the
        corresponding :class:`BlockHitEvent` as the only argument.

        The *pos* parameter can be used to specify a vector or sequence of
        vectors (including a :class:`~picraft.vector.vector_range`); in this
        case the event handler will only be called for block hits on matching
        vectors.

        The *face* parameter can be used to specify a face or sequence of
        faces for which the handler will be called.

        For example, to specify that one handler should be called for hits
        on the top of any blocks, and another should be called only for hits
        on any face of block at the origin one could use the following code::

            from picraft import World, Vector

            world = World()

            @world.events.on_block_hit(pos=Vector(0, 0, 0))
            def origin_hit(event):
                world.say('You hit the block at the origin')

            @world.events.on_block_hit(face="y+")
            def top_hit(event):
                world.say('You hit the top of a block at %d,%d,%d' % event.pos)

            world.events.main_loop()

        The *thread* parameter (which defaults to ``False``) can be used to
        specify that the handler should be executed in its own background
        thread, in parallel with other handlers.

        Finally, the *multi* parameter (which only applies when *thread* is
        ``True``) specifies whether multi-threaded handlers should be allowed
        to execute in parallel. When ``True`` (the default), threaded handlers
        execute as many times as activated in parallel. When ``False``, a
        single instance of a threaded handler is allowed to execute at any
        given time; simultaneous activations are ignored (but not queued, as
        with unthreaded handlers).
        """
        def decorator(f):
            self._handlers.append(
                    BlockHitHandler(self._handler_closure(f),
                        thread, multi, pos, face))
            f._picraft_classes = set()
            return f
        return decorator

    def on_chat_post(self, thread=False, multi=True, message=None):
        """
        Decorator for registering a function/method as a chat event handler.

        This decorator is used to mark a function as an event handler which
        will be called for events indicating a chat message was posted to
        the world while :meth:`main_loop` is executing. The function will be
        called with the corresponding :class:`ChatPostEvent` as the only
        argument.

        .. note::

            Only the Raspberry Juice server generates chat events; Minecraft
            Pi Edition does not support this event type.

        The *message* parameter can be used to specify a string or regular
        expression; in this case the event handler will only be called for chat
        messages which match this value. For example::

            import re
            from picraft import World, Vector

            world = World()

            @world.events.on_chat_post(message="hello world")
            def echo(event):
                world.say("Hello player %d!" % event.player.player_id)

            @world.events.on_chat_post(message=re.compile(r"teleport_me \d+,\d+,\d+"))
            def teleport(event):
                x, y, z = event.message[len("teleport_me "):].split(",")
                event.player.pos = Vector(int(x), int(y), int(z))

            world.events.main_loop()

        The *thread* parameter (which defaults to ``False``) can be used to
        specify that the handler should be executed in its own background
        thread, in parallel with other handlers.

        Finally, the *multi* parameter (which only applies when *thread* is
        ``True``) specifies whether multi-threaded handlers should be allowed
        to execute in parallel. When ``True`` (the default), threaded handlers
        execute as many times as activated in parallel. When ``False``, a
        single instance of a threaded handler is allowed to execute at any
        given time; simultaneous activations are ignored (but not queued, as
        with unthreaded handlers).
        """
        def decorator(f):
            self._handlers.append(
                    ChatPostHandler(self._handler_closure(f),
                        thread, multi, message))
            f._picraft_classes = set()
            return f
        return decorator
Example #54
0
class TransactionManager(BaseManager):
    """ Transaction MQ Manager
    """
    def __init__(self, name):
        self.name = name
        self._task = WeakSet()
        self._event_storage = Storage()

    @property
    def storage(self):
        return self._event_storage

    def set_event(self, evt):
        self._evt = evt

    async def event_run(self):
        try:
            transport, protocol = await aioamqp.connect(host=MQ_HOST,
                                                        port=MQ_PORT,
                                                        login=MQ_USER,
                                                        password=MQ_SEED,
                                                        login_method='PLAIN')
        except aioamqp.AmqpClosedConnection as e:
            print('TransactionManager ClosedConnection : ', str(e))
            # TODO:  execption 처리.
            return

        try:
            self._channel = await protocol.channel()

            await self._channel.exchange_declare(exchange_name=TX_EXCHANGE,
                                                 type_name='fanout')

            await self._channel.queue_declare(queue_name=EVENT_NAME(self.name),
                                              exclusive=True)

            await self._channel.queue_bind(exchange_name=TX_EXCHANGE,
                                           queue_name=EVENT_NAME(self.name),
                                           routing_key='')

            await self._channel.basic_consume(self.process_event,
                                              queue_name=EVENT_NAME(self.name),
                                              no_ack=True)
        except (aioamqp.ChannelClosed, aioamqp.AmqpClosedConnection):
            if not transport.is_closing():
                transport.close()

    async def process_event(self, channel, body, envelope, properties):
        self._task.add(asyncio.ensure_future(self._process_event(body)))

    async def _process_event(self, body):
        """ transaction data processing
        :param body: tx-data
        """
        dict_obj = json.loads(body)
        try:
            transaction = deserialize_transaction(dict_obj)
        except ValueError:
            return
        else:
            if time_distance(transaction.timestamp) < 600:
                await validate_transaction(transaction)
                self.storage[transaction.hash] = transaction  # pending.
                await asyncio.sleep(0.0001)

    async def send(self, obj):
        await self._channel.basic_publish(obj.to_json(),
                                          exchange_name=TX_EXCHANGE,
                                          routing_key='')

    def get_list(self, length: int = 60):
        """
        :param length: data-len
        :return: tx-list
        """
        transaction_list = []
        for tx in self.storage.range(length):
            transaction_list.append(tx.copy())
        return transaction_list

    async def exists(self):
        while True:
            await asyncio.sleep(0.01)
            if len(self.storage) >= 1:
                self._evt.set()
                return

    def clear(self, block):
        complete_tx = [tx.hash for tx in block.list_transactions]
        self.storage.delete_keys(complete_tx)
Example #55
0
class BaseService(ABC, CancellableMixin):
    logger: ExtendedDebugLogger = None
    # Use a WeakSet so that we don't have to bother updating it when tasks finish.
    _child_services: 'WeakSet[BaseService]'
    _tasks: 'WeakSet[asyncio.Future[Any]]'
    _finished_callbacks: List[Callable[['BaseService'], None]]
    # Number of seconds cancel() will wait for run() to finish.
    _wait_until_finished_timeout = 5

    # the custom event loop to run in, or None if the default loop should be used
    _loop: asyncio.AbstractEventLoop = None

    _logger: ExtendedDebugLogger = None

    _start_time: float = None

    def __init__(self,
                 token: CancelToken = None,
                 loop: asyncio.AbstractEventLoop = None) -> None:
        self.events = ServiceEvents()
        self._run_lock = asyncio.Lock()
        self._child_services = WeakSet()
        self._tasks = WeakSet()
        self._finished_callbacks = []

        self._loop = loop

        base_token = CancelToken(type(self).__name__, loop=loop)

        if token is None:
            self.cancel_token = base_token
        else:
            self.cancel_token = base_token.chain(token)

    @property
    def logger(self) -> ExtendedDebugLogger:
        if self._logger is None:
            self._logger = cast(
                ExtendedDebugLogger,
                logging.getLogger(self.__module__ + '.' +
                                  self.__class__.__name__))
        return self._logger

    @property
    def uptime(self) -> float:
        if self._start_time is None:
            return 0.0
        else:
            return time.monotonic() - self._start_time

    def get_event_loop(self) -> asyncio.AbstractEventLoop:
        if self._loop is None:
            return asyncio.get_event_loop()
        else:
            return self._loop

    async def run(
        self,
        finished_callback: Optional[Callable[['BaseService'], None]] = None
    ) -> None:
        """Await for the service's _run() coroutine.

        Once _run() returns, triggers the cancel token, call cleanup() and
        finished_callback (if one was passed).
        """
        if self.is_running:
            raise ValidationError(
                "Cannot start the service while it's already running")
        elif self.is_cancelled:
            raise ValidationError(
                "Cannot restart a service that has already been cancelled")

        if finished_callback:
            self._finished_callbacks.append(finished_callback)

        try:
            async with self._run_lock:
                self.events.started.set()
                self._start_time = time.monotonic()
                await self._run()
        except OperationCancelled as e:
            self.logger.debug("%s finished: %s", self, e)
        except Exception:
            self.logger.exception("Unexpected error in %r, exiting", self)
        else:
            if self.is_cancelled:
                self.logger.debug("%s cancelled, cleaning up...", self)
            else:
                self.logger.debug(
                    "%s had nothing left to do, ceasing operation...", self)
        finally:
            # Trigger our cancel token to ensure all pending asyncio tasks and background
            # coroutines started by this service exit cleanly.
            self.events.cancelled.set()
            self.cancel_token.trigger()

            await self.cleanup()

            for callback in self._finished_callbacks:
                callback(self)

            self.events.finished.set()
            self.logger.debug("%s halted cleanly", self)

    def add_finished_callback(
            self, finished_callback: Callable[['BaseService'], None]) -> None:
        self._finished_callbacks.append(finished_callback)

    def run_task(self, awaitable: Awaitable[Any]) -> None:
        """Run the given awaitable in the background.

        The awaitable should return whenever this service's cancel token is triggered.

        If it raises OperationCancelled, that is caught and ignored.
        """
        @functools.wraps(awaitable)  # type: ignore
        async def _run_task_wrapper() -> None:
            self.logger.debug2("Running task %s", awaitable)
            try:
                await awaitable
            except OperationCancelled:
                pass
            except Exception as e:
                self.logger.warning("Task %s finished unexpectedly: %s",
                                    awaitable, e)
                self.logger.debug("Task failure traceback", exc_info=True)
            else:
                self.logger.debug2("Task %s finished with no errors",
                                   awaitable)

        self._tasks.add(asyncio.ensure_future(_run_task_wrapper()))

    def run_daemon_task(self, awaitable: Awaitable[Any]) -> None:
        """Run the given awaitable in the background.

        Like :meth:`run_task` but if the task ends without cancelling, then this
        this service will terminate as well.
        """
        @functools.wraps(awaitable)  # type: ignore
        async def _run_daemon_task_wrapper() -> None:
            try:
                await awaitable
            finally:
                if not self.is_cancelled:
                    self.logger.debug(
                        "%s finished while %s is still running, terminating as well",
                        awaitable,
                        self,
                    )
                    self.cancel_nowait()

        self.run_task(_run_daemon_task_wrapper())

    def run_child_service(self, child_service: 'BaseService') -> None:
        """
        Run a child service and keep a reference to it to be considered during the cleanup.
        """
        if child_service.is_running:
            raise ValidationError(
                f"Can't start service {child_service!r}, child of {self!r}: it's already running"
            )
        elif child_service.is_cancelled:
            raise ValidationError(
                f"Can't restart {child_service!r}, child of {self!r}: it's already completed"
            )

        self._child_services.add(child_service)
        self.run_task(child_service.run())

    def run_daemon(self, service: 'BaseService') -> None:
        """
        Run a service and keep a reference to it to be considered during the cleanup.

        If the service finishes while we're still running, we'll terminate as well.
        """
        if service.is_running:
            raise ValidationError(
                f"Can't start daemon {service!r}, child of {self!r}: it's already running"
            )
        elif service.is_cancelled:
            raise ValidationError(
                f"Can't restart daemon {service!r}, child of {self!r}: it's already completed"
            )

        self._child_services.add(service)

        @functools.wraps(service.run)
        async def _run_daemon_wrapper() -> None:
            try:
                await service.run()
            except OperationCancelled:
                pass
            except Exception as e:
                self.logger.warning(
                    "Daemon Service %s finished unexpectedly: %s", service, e)
                self.logger.debug("Daemon Service failure traceback",
                                  exc_info=True)
            finally:
                if not self.is_cancelled:
                    self.logger.debug(
                        "%s finished while %s is still running, terminating as well",
                        service,
                        self,
                    )
                    self.cancel_nowait()

        self.run_task(_run_daemon_wrapper())

    def call_later(self, delay: float, callback: 'Callable[..., None]',
                   *args: Any) -> None:
        @functools.wraps(callback)
        async def _call_later_wrapped() -> None:
            await self.sleep(delay)
            callback(*args)

        self.run_task(_call_later_wrapped())

    async def _run_in_executor(self, executor: concurrent.futures.Executor,
                               callback: Callable[...,
                                                  Any], *args: Any) -> Any:

        loop = self.get_event_loop()
        try:
            return await self.wait(
                loop.run_in_executor(executor, callback, *args))
        except concurrent.futures.process.BrokenProcessPool:
            self.logger.exception(
                "Fatal error. Process pool died. Cancelling operations.")
            await self.cancel()

    async def cleanup(self) -> None:
        """
        Run the ``_cleanup()`` coroutine and set the ``cleaned_up`` event after the service as
        well as all child services finished their cleanup.

        The ``_cleanup()`` coroutine is invoked before the child services may have finished
        their cleanup.
        """
        if self._child_services:
            self.logger.debug("Waiting for child services: %s",
                              list(self._child_services))
            wait_for_clean_up_tasks = (
                child_service.events.cleaned_up.wait()
                for child_service in self._child_services)
            await asyncio.gather(*wait_for_clean_up_tasks)
            self.logger.debug("All child services finished")
        if self._tasks:
            self._log_tasks("Waiting for tasks")
            await asyncio.gather(*self._tasks)
            self.logger.debug("All tasks finished")

        await self._cleanup()
        self.events.cleaned_up.set()

    def _log_tasks(self, message: str) -> None:
        MAX_DISPLAY_TASKS = 50
        task_list = list(self._tasks)
        if len(self._tasks) > MAX_DISPLAY_TASKS:
            task_display = ''.join(
                map(str, [
                    task_list[:MAX_DISPLAY_TASKS // 2],
                    '...',
                    task_list[-1 * MAX_DISPLAY_TASKS // 2:],
                ]))
        else:
            task_display = str(task_list)
        self.logger.debug("%s (%d): %s", message, len(self._tasks),
                          task_display)

    def cancel_nowait(self) -> None:
        if self.is_cancelled:
            self.logger.warning(
                "Tried to cancel %s, but it was already cancelled", self)
            return
        elif not self.is_running:
            raise ValidationError(
                "Cannot cancel a service that has not been started")

        self.logger.debug("Cancelling %s", self)
        self.events.cancelled.set()
        self.cancel_token.trigger()

    async def cancel(self) -> None:
        """Trigger the CancelToken and wait for the cleaned_up event to be set."""
        self.cancel_nowait()

        try:
            await asyncio.wait_for(self.events.cleaned_up.wait(),
                                   timeout=self._wait_until_finished_timeout)
        except asyncio.futures.TimeoutError:
            self.logger.info(
                "Timed out waiting for %s to finish its cleanup, forcibly cancelling pending "
                "tasks and exiting anyway", self)
            if self._tasks:
                self._log_tasks("Pending tasks")
            if self._child_services:
                self.logger.debug("Pending child services: %s",
                                  list(self._child_services))
            self._forcibly_cancel_all_tasks()
            # Sleep a bit because the Future.cancel() method just schedules the callbacks, so we
            # need to give the event loop a chance to actually call them.
            await asyncio.sleep(0.5)
        else:
            self.logger.debug("%s finished cleanly", self)

    def _forcibly_cancel_all_tasks(self) -> None:
        for task in self._tasks:
            task.cancel()

    @property
    def is_cancelled(self) -> bool:
        return self.cancel_token.triggered

    @property
    def is_operational(self) -> bool:
        return self.events.started.is_set() and not self.cancel_token.triggered

    @property
    def is_running(self) -> bool:
        return self._run_lock.locked()

    async def cancellation(self) -> None:
        """
        Pause until this service is cancelled
        """
        await self.wait(self.events.cancelled.wait())

    async def threadsafe_cancel(self) -> None:
        """
        Cancel service in another thread. Block until service is cleaned up.

        :param poll_period: how many seconds to wait in between each check for service cleanup
        """
        asyncio.run_coroutine_threadsafe(self.cancel(),
                                         loop=self.get_event_loop())
        await asyncio.wait_for(
            self.events.cleaned_up.wait(),
            timeout=self._wait_until_finished_timeout,
        )

    async def sleep(self, delay: float) -> None:
        """Coroutine that completes after a given time (in seconds)."""
        await self.wait(asyncio.sleep(delay))

    @abstractmethod
    async def _run(self) -> None:
        """Run the service's loop.

        Should return or raise OperationCancelled when the CancelToken is triggered.
        """
        pass

    async def _cleanup(self) -> None:
        """Clean up any resources held by this service.

        Called after the service's _run() method returns.
        """
        pass
Example #56
0
class TestWeakSet(unittest.TestCase):

    def setUp(self):
        # need to keep references to them
        self.items = [SomeClass(c) for c in ('a', 'b', 'c')]
        self.items2 = [SomeClass(c) for c in ('x', 'y', 'z')]
        self.letters = [SomeClass(c) for c in string.ascii_letters]
        self.ab_items = [SomeClass(c) for c in 'ab']
        self.abcde_items = [SomeClass(c) for c in 'abcde']
        self.def_items = [SomeClass(c) for c in 'def']
        self.ab_weakset = WeakSet(self.ab_items)
        self.abcde_weakset = WeakSet(self.abcde_items)
        self.def_weakset = WeakSet(self.def_items)
        self.s = WeakSet(self.items)
        self.d = dict.fromkeys(self.items)
        self.obj = SomeClass('F')
        self.fs = WeakSet([self.obj])

    def test_methods(self):
        weaksetmethods = dir(WeakSet)
        for method in dir(set):
            if method == 'test_c_api' or method.startswith('_'):
                continue
            self.assertIn(method, weaksetmethods,
                         "WeakSet missing method " + method)

    def test_new_or_init(self):
        self.assertRaises(TypeError, WeakSet, [], 2)

    def test_len(self):
        self.assertEqual(len(self.s), len(self.d))
        self.assertEqual(len(self.fs), 1)
        del self.obj
        test_support.gc_collect()
        # len of weak collections is eventually consistent on
        # Jython. In practice this does not matter because of the
        # nature of weaksets - we cannot rely on what happens in the
        # reaper thread and how it interacts with gc
        self.assertIn(len(self.fs), (0, 1))

    def test_contains(self):
        for c in self.letters:
            self.assertEqual(c in self.s, c in self.d)
        # 1 is not weakref'able, but that TypeError is caught by __contains__
        self.assertNotIn(1, self.s)
        self.assertIn(self.obj, self.fs)
        del self.obj
        test_support.gc_collect()
        self.assertNotIn(SomeClass('F'), self.fs)

    def test_union(self):
        u = self.s.union(self.items2)
        for c in self.letters:
            self.assertEqual(c in u, c in self.d or c in self.items2)
        self.assertEqual(self.s, WeakSet(self.items))
        self.assertEqual(type(u), WeakSet)
        self.assertRaises(TypeError, self.s.union, [[]])
        for C in set, frozenset, dict.fromkeys, list, tuple:
            x = WeakSet(self.items + self.items2)
            c = C(self.items2)
            self.assertEqual(self.s.union(c), x)
            del c
            test_support.gc_collect()
        self.assertEqual(len(list(u)), len(list(self.items)) + len(list(self.items2)))
        self.items2.pop()
        test_support.gc_collect()
        self.assertEqual(len(list(u)), len(list(self.items)) + len(list(self.items2)))

    def test_or(self):
        i = self.s.union(self.items2)
        self.assertEqual(self.s | set(self.items2), i)
        self.assertEqual(self.s | frozenset(self.items2), i)

    def test_intersection(self):
        s = WeakSet(self.letters)
        i = s.intersection(self.items2)
        for c in self.letters:
            self.assertEqual(c in i, c in self.items2 and c in self.letters)
        self.assertEqual(s, WeakSet(self.letters))
        self.assertEqual(type(i), WeakSet)
        for C in set, frozenset, dict.fromkeys, list, tuple:
            x = WeakSet([])
            self.assertEqual(i.intersection(C(self.items)), x)
        self.assertEqual(len(i), len(self.items2))
        self.items2.pop()
        test_support.gc_collect()
        self.assertEqual(len(list(i)), len(list(self.items2)))

    def test_isdisjoint(self):
        self.assertTrue(self.s.isdisjoint(WeakSet(self.items2)))
        self.assertTrue(not self.s.isdisjoint(WeakSet(self.letters)))

    def test_and(self):
        i = self.s.intersection(self.items2)
        self.assertEqual(self.s & set(self.items2), i)
        self.assertEqual(self.s & frozenset(self.items2), i)

    def test_difference(self):
        i = self.s.difference(self.items2)
        for c in self.letters:
            self.assertEqual(c in i, c in self.d and c not in self.items2)
        self.assertEqual(self.s, WeakSet(self.items))
        self.assertEqual(type(i), WeakSet)
        self.assertRaises(TypeError, self.s.difference, [[]])

    def test_sub(self):
        i = self.s.difference(self.items2)
        self.assertEqual(self.s - set(self.items2), i)
        self.assertEqual(self.s - frozenset(self.items2), i)

    def test_symmetric_difference(self):
        i = self.s.symmetric_difference(self.items2)
        for c in self.letters:
            self.assertEqual(c in i, (c in self.d) ^ (c in self.items2))
        self.assertEqual(self.s, WeakSet(self.items))
        self.assertEqual(type(i), WeakSet)
        self.assertRaises(TypeError, self.s.symmetric_difference, [[]])
        self.assertEqual(len(i), len(self.items) + len(self.items2))
        self.items2.pop()
        test_support.gc_collect()
        self.assertEqual(len(list(i)), len(list(self.items)) + len(list(self.items2)))

    def test_xor(self):
        i = self.s.symmetric_difference(self.items2)
        self.assertEqual(self.s ^ set(self.items2), i)
        self.assertEqual(self.s ^ frozenset(self.items2), i)

    def test_sub_and_super(self):
        self.assertTrue(self.ab_weakset <= self.abcde_weakset)
        self.assertTrue(self.abcde_weakset <= self.abcde_weakset)
        self.assertTrue(self.abcde_weakset >= self.ab_weakset)
        self.assertFalse(self.abcde_weakset <= self.def_weakset)
        self.assertFalse(self.abcde_weakset >= self.def_weakset)
        self.assertTrue(set('a').issubset('abc'))
        self.assertTrue(set('abc').issuperset('a'))
        self.assertFalse(set('a').issubset('cbs'))
        self.assertFalse(set('cbs').issuperset('a'))

    def test_lt(self):
        self.assertTrue(self.ab_weakset < self.abcde_weakset)
        self.assertFalse(self.abcde_weakset < self.def_weakset)
        self.assertFalse(self.ab_weakset < self.ab_weakset)
        self.assertFalse(WeakSet() < WeakSet())

    def test_gt(self):
        self.assertTrue(self.abcde_weakset > self.ab_weakset)
        self.assertFalse(self.abcde_weakset > self.def_weakset)
        self.assertFalse(self.ab_weakset > self.ab_weakset)
        self.assertFalse(WeakSet() > WeakSet())

    def test_gc(self):
        # Create a nest of cycles to exercise overall ref count check
        s = WeakSet(Foo() for i in range(1000))
        for elem in s:
            elem.cycle = s
            elem.sub = elem
            elem.set = WeakSet([elem])

    def test_subclass_with_custom_hash(self):
        # Bug #1257731
        class H(WeakSet):
            def __hash__(self):
                return int(id(self) & 0x7fffffff)
        s=H()
        f=set()
        f.add(s)
        self.assertIn(s, f)
        f.remove(s)
        f.add(s)
        f.discard(s)

    def test_init(self):
        s = WeakSet()
        s.__init__(self.items)
        self.assertEqual(s, self.s)
        s.__init__(self.items2)
        self.assertEqual(s, WeakSet(self.items2))
        self.assertRaises(TypeError, s.__init__, s, 2);
        self.assertRaises(TypeError, s.__init__, 1);

    def test_constructor_identity(self):
        s = WeakSet(self.items)
        t = WeakSet(s)
        self.assertNotEqual(id(s), id(t))

    def test_hash(self):
        self.assertRaises(TypeError, hash, self.s)

    def test_clear(self):
        self.s.clear()
        self.assertEqual(self.s, WeakSet([]))
        self.assertEqual(len(self.s), 0)

    def test_copy(self):
        dup = self.s.copy()
        self.assertEqual(self.s, dup)
        self.assertNotEqual(id(self.s), id(dup))

    def test_add(self):
        x = SomeClass('Q')
        self.s.add(x)
        self.assertIn(x, self.s)
        dup = self.s.copy()
        self.s.add(x)
        self.assertEqual(self.s, dup)
        if not test_support.is_jython:  # Jython/JVM can weakly reference list and other objects
            self.assertRaises(TypeError, self.s.add, [])
        self.fs.add(Foo())
        test_support.gc_collect()  # CPython assumes Foo() went out of scope and was collected, so ensure the same
        self.assertEqual(len(list(self.fs)), 1)
        self.fs.add(self.obj)
        self.assertEqual(len(list(self.fs)), 1)

    def test_remove(self):
        x = SomeClass('a')
        self.s.remove(x)
        self.assertNotIn(x, self.s)
        self.assertRaises(KeyError, self.s.remove, x)
        if not test_support.is_jython:  # Jython/JVM can weakly reference list and other objects
            self.assertRaises(TypeError, self.s.remove, [])

    def test_discard(self):
        a, q = SomeClass('a'), SomeClass('Q')
        self.s.discard(a)
        self.assertNotIn(a, self.s)
        self.s.discard(q)
        if not test_support.is_jython:  # Jython/JVM can weakly reference list and other objects
            self.assertRaises(TypeError, self.s.discard, [])

    def test_pop(self):
        for i in range(len(self.s)):
            elem = self.s.pop()
            self.assertNotIn(elem, self.s)
        self.assertRaises(KeyError, self.s.pop)

    def test_update(self):
        retval = self.s.update(self.items2)
        self.assertEqual(retval, None)
        for c in (self.items + self.items2):
            self.assertIn(c, self.s)
        self.assertRaises(TypeError, self.s.update, [[]])

    def test_update_set(self):
        self.s.update(set(self.items2))
        for c in (self.items + self.items2):
            self.assertIn(c, self.s)

    def test_ior(self):
        self.s |= set(self.items2)
        for c in (self.items + self.items2):
            self.assertIn(c, self.s)

    def test_intersection_update(self):
        retval = self.s.intersection_update(self.items2)
        self.assertEqual(retval, None)
        for c in (self.items + self.items2):
            if c in self.items2 and c in self.items:
                self.assertIn(c, self.s)
            else:
                self.assertNotIn(c, self.s)
        self.assertRaises(TypeError, self.s.intersection_update, [[]])

    def test_iand(self):
        self.s &= set(self.items2)
        for c in (self.items + self.items2):
            if c in self.items2 and c in self.items:
                self.assertIn(c, self.s)
            else:
                self.assertNotIn(c, self.s)

    def test_difference_update(self):
        retval = self.s.difference_update(self.items2)
        self.assertEqual(retval, None)
        for c in (self.items + self.items2):
            if c in self.items and c not in self.items2:
                self.assertIn(c, self.s)
            else:
                self.assertNotIn(c, self.s)
        if not test_support.is_jython:  # Jython/JVM can weakly reference list and other objects
            self.assertRaises(TypeError, self.s.difference_update, [[]])
            self.assertRaises(TypeError, self.s.symmetric_difference_update, [[]])

    def test_isub(self):
        self.s -= set(self.items2)
        for c in (self.items + self.items2):
            if c in self.items and c not in self.items2:
                self.assertIn(c, self.s)
            else:
                self.assertNotIn(c, self.s)

    def test_symmetric_difference_update(self):
        retval = self.s.symmetric_difference_update(self.items2)
        self.assertEqual(retval, None)
        for c in (self.items + self.items2):
            if (c in self.items) ^ (c in self.items2):
                self.assertIn(c, self.s)
            else:
                self.assertNotIn(c, self.s)
        self.assertRaises(TypeError, self.s.symmetric_difference_update, [[]])

    def test_ixor(self):
        self.s ^= set(self.items2)
        for c in (self.items + self.items2):
            if (c in self.items) ^ (c in self.items2):
                self.assertIn(c, self.s)
            else:
                self.assertNotIn(c, self.s)

    def test_inplace_on_self(self):
        t = self.s.copy()
        t |= t
        self.assertEqual(t, self.s)
        t &= t
        self.assertEqual(t, self.s)
        t -= t
        self.assertEqual(t, WeakSet())
        t = self.s.copy()
        t ^= t
        self.assertEqual(t, WeakSet())

    def test_eq(self):
        # issue 5964 (http://bugs.python.org/issue5964)
        self.assertEqual(self.s, self.s)
        self.assertEqual(self.s, WeakSet(self.items))
        # Jython diverges here in the next test because it constructs
        # WeakSet as a subclass of set; this seems to be the proper
        # thing to do given what is the typical comparison
        self.assertEqual(self.s, set(self.items))
        self.assertNotEqual(self.s, list(self.items))
        self.assertNotEqual(self.s, tuple(self.items))
        self.assertNotEqual(self.s, 1)

    def test_weak_destroy_while_iterating(self):
        # Issue #7105: iterators shouldn't crash when a key is implicitly removed
        # Create new items to be sure no-one else holds a reference
        items = [SomeClass(c) for c in ('a', 'b', 'c')]
        s = WeakSet(items)
        it = iter(s)
        next(it)             # Trigger internal iteration
        # Destroy an item
        del items[-1]
        test_support.gc_collect()    # just in case
        # We have removed either the first consumed items, or another one
        self.assertIn(len(list(it)), [len(items), len(items) - 1])
        del it
        extra_collect()
        # The removal has been committed
        self.assertEqual(len(s), len(items))

    def test_weak_destroy_and_mutate_while_iterating(self):
        # Issue #7105: iterators shouldn't crash when a key is implicitly removed
        items = [SomeClass(c) for c in string.ascii_letters]
        s = WeakSet(items)
        @contextlib.contextmanager
        def testcontext():
            try:
                it = iter(s)
                next(it)
                # Schedule an item for removal and recreate it
                u = SomeClass(str(items.pop()))
                test_support.gc_collect()      # just in case
                yield u
            finally:
                it = None           # should commit all removals

        test_support.gc_collect()

        with testcontext() as u:
            self.assertNotIn(u, s)
        with testcontext() as u:
            self.assertRaises(KeyError, s.remove, u)
        self.assertNotIn(u, s)
        with testcontext() as u:
            s.add(u)
        self.assertIn(u, s)
        t = s.copy()
        with testcontext() as u:
            s.update(t)
        self.assertEqual(len(s), len(t))
        with testcontext() as u:
            s.clear()
        self.assertEqual(len(s), 0)

    def test_len_cycles(self):
        N = 20
        items = [RefCycle() for i in range(N)]
        s = WeakSet(items)
        del items
        # do some gc
        test_support.gc_collect()
        it = iter(s)
        try:
            next(it)
        except StopIteration:
            pass

        # do some gc
        test_support.gc_collect()

        n1 = len(s)
        del it
        # do some gc
        test_support.gc_collect()

        n2 = len(s)
        # one item may be kept alive inside the iterator
        self.assertIn(n1, (0, 1))
        self.assertEqual(n2, 0)

    @unittest.skipIf(test_support.is_jython, "GarbageCollection not deterministic in Jython")
    def test_len_race(self):
        # Extended sanity checks for len() in the face of cyclic collection
        self.addCleanup(gc.set_threshold, *gc.get_threshold())
        for th in range(1, 100):
            N = 20
            gc.collect(0)
            gc.set_threshold(th, th, th)
            items = [RefCycle() for i in range(N)]
            s = WeakSet(items)
            del items
            # All items will be collected at next garbage collection pass
            it = iter(s)
            try:
                next(it)
            except StopIteration:
                pass
            n1 = len(s)
            del it
            n2 = len(s)
            self.assertGreaterEqual(n1, 0)
            self.assertLessEqual(n1, N)
            self.assertGreaterEqual(n2, 0)
            self.assertLessEqual(n2, n1)
Example #57
0
class TestWeakSet(unittest.TestCase):

    def setUp(self):
        # need to keep references to them
        self.items = [SomeClass(c) for c in ('a', 'b', 'c')]
        self.items2 = [SomeClass(c) for c in ('x', 'y', 'z')]
        self.letters = [SomeClass(c) for c in string.ascii_letters]
        self.ab_items = [SomeClass(c) for c in 'ab']
        self.abcde_items = [SomeClass(c) for c in 'abcde']
        self.def_items = [SomeClass(c) for c in 'def']
        self.ab_weakset = WeakSet(self.ab_items)
        self.abcde_weakset = WeakSet(self.abcde_items)
        self.def_weakset = WeakSet(self.def_items)
        self.s = WeakSet(self.items)
        self.d = dict.fromkeys(self.items)
        self.obj = SomeClass('F')
        self.fs = WeakSet([self.obj])

    def test_methods(self):
        weaksetmethods = dir(WeakSet)
        for method in dir(set):
            if method == 'test_c_api' or method.startswith('_'):
                continue
            self.assertIn(method, weaksetmethods,
                         "WeakSet missing method " + method)

    def test_new_or_init(self):
        self.assertRaises(TypeError, WeakSet, [], 2)

    def test_len(self):
        self.assertEqual(len(self.s), len(self.d))
        self.assertEqual(len(self.fs), 1)
        del self.obj
        gc.collect()
        # len of weak collections is eventually consistent on
        # Jython. In practice this does not matter because of the
        # nature of weaksets - we cannot rely on what happens in the
        # reaper thread and how it interacts with gc
        self.assertIn(len(self.fs), (0, 1))

    def test_contains(self):
        for c in self.letters:
            self.assertEqual(c in self.s, c in self.d)
        # 1 is not weakref'able, but that TypeError is caught by __contains__
        self.assertNotIn(1, self.s)
        self.assertIn(self.obj, self.fs)
        del self.obj
        gc.collect()
        self.assertNotIn(SomeClass('F'), self.fs)

    def test_union(self):
        u = self.s.union(self.items2)
        for c in self.letters:
            self.assertEqual(c in u, c in self.d or c in self.items2)
        self.assertEqual(self.s, WeakSet(self.items))
        self.assertEqual(type(u), WeakSet)
        self.assertRaises(TypeError, self.s.union, [[]])
        for C in set, frozenset, dict.fromkeys, list, tuple:
            x = WeakSet(self.items + self.items2)
            c = C(self.items2)
            self.assertEqual(self.s.union(c), x)
            del c
            gc.collect()
        self.assertEqual(len(list(u)), len(list(self.items)) + len(list(self.items2)))
        self.items2.pop()
        gc.collect()
        self.assertEqual(len(list(u)), len(list(self.items)) + len(list(self.items2)))

    def test_or(self):
        i = self.s.union(self.items2)
        self.assertEqual(self.s | set(self.items2), i)
        self.assertEqual(self.s | frozenset(self.items2), i)

    def test_intersection(self):
        s = WeakSet(self.letters)
        i = s.intersection(self.items2)
        for c in self.letters:
            self.assertEqual(c in i, c in self.items2 and c in self.letters)
        self.assertEqual(s, WeakSet(self.letters))
        self.assertEqual(type(i), WeakSet)
        for C in set, frozenset, dict.fromkeys, list, tuple:
            x = WeakSet([])
            self.assertEqual(i.intersection(C(self.items)), x)
        self.assertEqual(len(i), len(self.items2))
        self.items2.pop()
        gc.collect()
        self.assertEqual(len(list(i)), len(list(self.items2)))

    def test_isdisjoint(self):
        self.assertTrue(self.s.isdisjoint(WeakSet(self.items2)))
        self.assertTrue(not self.s.isdisjoint(WeakSet(self.letters)))

    def test_and(self):
        i = self.s.intersection(self.items2)
        self.assertEqual(self.s & set(self.items2), i)
        self.assertEqual(self.s & frozenset(self.items2), i)

    def test_difference(self):
        i = self.s.difference(self.items2)
        for c in self.letters:
            self.assertEqual(c in i, c in self.d and c not in self.items2)
        self.assertEqual(self.s, WeakSet(self.items))
        self.assertEqual(type(i), WeakSet)
        self.assertRaises(TypeError, self.s.difference, [[]])

    def test_sub(self):
        i = self.s.difference(self.items2)
        self.assertEqual(self.s - set(self.items2), i)
        self.assertEqual(self.s - frozenset(self.items2), i)

    def test_symmetric_difference(self):
        i = self.s.symmetric_difference(self.items2)
        for c in self.letters:
            self.assertEqual(c in i, (c in self.d) ^ (c in self.items2))
        self.assertEqual(self.s, WeakSet(self.items))
        self.assertEqual(type(i), WeakSet)
        self.assertRaises(TypeError, self.s.symmetric_difference, [[]])
        self.assertEqual(len(i), len(self.items) + len(self.items2))
        self.items2.pop()
        gc.collect()
        self.assertEqual(len(list(i)), len(list(self.items)) + len(list(self.items2)))

    def test_xor(self):
        i = self.s.symmetric_difference(self.items2)
        self.assertEqual(self.s ^ set(self.items2), i)
        self.assertEqual(self.s ^ frozenset(self.items2), i)

    def test_sub_and_super(self):
        self.assertTrue(self.ab_weakset <= self.abcde_weakset)
        self.assertTrue(self.abcde_weakset <= self.abcde_weakset)
        self.assertTrue(self.abcde_weakset >= self.ab_weakset)
        self.assertFalse(self.abcde_weakset <= self.def_weakset)
        self.assertFalse(self.abcde_weakset >= self.def_weakset)
        self.assertTrue(set('a').issubset('abc'))
        self.assertTrue(set('abc').issuperset('a'))
        self.assertFalse(set('a').issubset('cbs'))
        self.assertFalse(set('cbs').issuperset('a'))

    def test_lt(self):
        self.assertTrue(self.ab_weakset < self.abcde_weakset)
        self.assertFalse(self.abcde_weakset < self.def_weakset)
        self.assertFalse(self.ab_weakset < self.ab_weakset)
        self.assertFalse(WeakSet() < WeakSet())

    def test_gt(self):
        self.assertTrue(self.abcde_weakset > self.ab_weakset)
        self.assertFalse(self.abcde_weakset > self.def_weakset)
        self.assertFalse(self.ab_weakset > self.ab_weakset)
        self.assertFalse(WeakSet() > WeakSet())

    def test_gc(self):
        # Create a nest of cycles to exercise overall ref count check
        s = WeakSet(Foo() for i in range(1000))
        for elem in s:
            elem.cycle = s
            elem.sub = elem
            elem.set = WeakSet([elem])

    def test_subclass_with_custom_hash(self):
        # Bug #1257731
        class H(WeakSet):
            def __hash__(self):
                return int(id(self) & 0x7fffffff)
        s=H()
        f=set()
        f.add(s)
        self.assertIn(s, f)
        f.remove(s)
        f.add(s)
        f.discard(s)

    def test_init(self):
        s = WeakSet()
        s.__init__(self.items)
        self.assertEqual(s, self.s)
        s.__init__(self.items2)
        self.assertEqual(s, WeakSet(self.items2))
        self.assertRaises(TypeError, s.__init__, s, 2);
        self.assertRaises(TypeError, s.__init__, 1);

    def test_constructor_identity(self):
        s = WeakSet(self.items)
        t = WeakSet(s)
        self.assertNotEqual(id(s), id(t))

    def test_hash(self):
        self.assertRaises(TypeError, hash, self.s)

    def test_clear(self):
        self.s.clear()
        self.assertEqual(self.s, WeakSet([]))
        self.assertEqual(len(self.s), 0)

    def test_copy(self):
        dup = self.s.copy()
        self.assertEqual(self.s, dup)
        self.assertNotEqual(id(self.s), id(dup))

    def test_add(self):
        x = SomeClass('Q')
        self.s.add(x)
        self.assertIn(x, self.s)
        dup = self.s.copy()
        self.s.add(x)
        self.assertEqual(self.s, dup)
        if not test_support.is_jython:  # Jython/JVM can weakly reference list and other objects
            self.assertRaises(TypeError, self.s.add, [])
        self.fs.add(Foo())
        gc.collect()  # CPython assumes Foo() went out of scope and was collected, so ensure the same
        self.assertEqual(len(list(self.fs)), 1)
        self.fs.add(self.obj)
        self.assertEqual(len(list(self.fs)), 1)

    def test_remove(self):
        x = SomeClass('a')
        self.s.remove(x)
        self.assertNotIn(x, self.s)
        self.assertRaises(KeyError, self.s.remove, x)
        if not test_support.is_jython:  # Jython/JVM can weakly reference list and other objects
            self.assertRaises(TypeError, self.s.remove, [])

    def test_discard(self):
        a, q = SomeClass('a'), SomeClass('Q')
        self.s.discard(a)
        self.assertNotIn(a, self.s)
        self.s.discard(q)
        if not test_support.is_jython:  # Jython/JVM can weakly reference list and other objects
            self.assertRaises(TypeError, self.s.discard, [])

    def test_pop(self):
        for i in range(len(self.s)):
            elem = self.s.pop()
            self.assertNotIn(elem, self.s)
        self.assertRaises(KeyError, self.s.pop)

    def test_update(self):
        retval = self.s.update(self.items2)
        self.assertEqual(retval, None)
        for c in (self.items + self.items2):
            self.assertIn(c, self.s)
        self.assertRaises(TypeError, self.s.update, [[]])

    def test_update_set(self):
        self.s.update(set(self.items2))
        for c in (self.items + self.items2):
            self.assertIn(c, self.s)

    def test_ior(self):
        self.s |= set(self.items2)
        for c in (self.items + self.items2):
            self.assertIn(c, self.s)

    def test_intersection_update(self):
        retval = self.s.intersection_update(self.items2)
        self.assertEqual(retval, None)
        for c in (self.items + self.items2):
            if c in self.items2 and c in self.items:
                self.assertIn(c, self.s)
            else:
                self.assertNotIn(c, self.s)
        self.assertRaises(TypeError, self.s.intersection_update, [[]])

    def test_iand(self):
        self.s &= set(self.items2)
        for c in (self.items + self.items2):
            if c in self.items2 and c in self.items:
                self.assertIn(c, self.s)
            else:
                self.assertNotIn(c, self.s)

    def test_difference_update(self):
        retval = self.s.difference_update(self.items2)
        self.assertEqual(retval, None)
        for c in (self.items + self.items2):
            if c in self.items and c not in self.items2:
                self.assertIn(c, self.s)
            else:
                self.assertNotIn(c, self.s)
        if not test_support.is_jython:  # Jython/JVM can weakly reference list and other objects
            self.assertRaises(TypeError, self.s.difference_update, [[]])
            self.assertRaises(TypeError, self.s.symmetric_difference_update, [[]])

    def test_isub(self):
        self.s -= set(self.items2)
        for c in (self.items + self.items2):
            if c in self.items and c not in self.items2:
                self.assertIn(c, self.s)
            else:
                self.assertNotIn(c, self.s)

    def test_symmetric_difference_update(self):
        retval = self.s.symmetric_difference_update(self.items2)
        self.assertEqual(retval, None)
        for c in (self.items + self.items2):
            if (c in self.items) ^ (c in self.items2):
                self.assertIn(c, self.s)
            else:
                self.assertNotIn(c, self.s)
        self.assertRaises(TypeError, self.s.symmetric_difference_update, [[]])

    def test_ixor(self):
        self.s ^= set(self.items2)
        for c in (self.items + self.items2):
            if (c in self.items) ^ (c in self.items2):
                self.assertIn(c, self.s)
            else:
                self.assertNotIn(c, self.s)

    def test_inplace_on_self(self):
        t = self.s.copy()
        t |= t
        self.assertEqual(t, self.s)
        t &= t
        self.assertEqual(t, self.s)
        t -= t
        self.assertEqual(t, WeakSet())
        t = self.s.copy()
        t ^= t
        self.assertEqual(t, WeakSet())

    def test_eq(self):
        # issue 5964 (http://bugs.python.org/issue5964)
        self.assertEqual(self.s, self.s)
        self.assertEqual(self.s, WeakSet(self.items))
        # Jython diverges here in the next test because it constructs
        # WeakSet as a subclass of set; this seems to be the proper
        # thing to do given what is the typical comparison
        self.assertEqual(self.s, set(self.items))
        self.assertNotEqual(self.s, list(self.items))
        self.assertNotEqual(self.s, tuple(self.items))
        self.assertNotEqual(self.s, 1)

    def test_weak_destroy_while_iterating(self):
        # Issue #7105: iterators shouldn't crash when a key is implicitly removed
        # Create new items to be sure no-one else holds a reference
        items = [SomeClass(c) for c in ('a', 'b', 'c')]
        s = WeakSet(items)
        it = iter(s)
        next(it)             # Trigger internal iteration
        # Destroy an item
        del items[-1]
        gc.collect()    # just in case
        # We have removed either the first consumed items, or another one
        self.assertIn(len(list(it)), [len(items), len(items) - 1])
        del it
        extra_collect()
        # The removal has been committed
        self.assertEqual(len(s), len(items))

    def test_weak_destroy_and_mutate_while_iterating(self):
        # Issue #7105: iterators shouldn't crash when a key is implicitly removed
        items = [SomeClass(c) for c in string.ascii_letters]
        s = WeakSet(items)
        @contextlib.contextmanager
        def testcontext():
            try:
                it = iter(s)
                next(it)
                # Schedule an item for removal and recreate it
                u = SomeClass(str(items.pop()))
                gc.collect()      # just in case
                yield u
            finally:
                it = None           # should commit all removals

        gc.collect(); gc.collect()  # final before asserts
        with testcontext() as u:
            self.assertNotIn(u, s)
        with testcontext() as u:
            self.assertRaises(KeyError, s.remove, u)
        self.assertNotIn(u, s)
        with testcontext() as u:
            s.add(u)
        self.assertIn(u, s)
        t = s.copy()
        with testcontext() as u:
            s.update(t)
        self.assertEqual(len(s), len(t))
        with testcontext() as u:
            s.clear()
        self.assertEqual(len(s), 0)

    def test_len_cycles(self):
        N = 20
        items = [RefCycle() for i in range(N)]
        s = WeakSet(items)
        del items
        gc.collect()
        it = iter(s)
        try:
            next(it)
        except StopIteration:
            pass
        gc.collect()
        n1 = len(s)
        del it
        gc.collect()
        gc.collect()
        n2 = len(s)
        # one item may be kept alive inside the iterator
        self.assertIn(n1, (0, 1))
        self.assertEqual(n2, 0)

    @unittest.skipIf(test_support.is_jython, "GarbageCollection not deterministic in Jython")
    def test_len_race(self):
        # Extended sanity checks for len() in the face of cyclic collection
        self.addCleanup(gc.set_threshold, *gc.get_threshold())
        for th in range(1, 100):
            N = 20
            gc.collect(0)
            gc.set_threshold(th, th, th)
            items = [RefCycle() for i in range(N)]
            s = WeakSet(items)
            del items
            # All items will be collected at next garbage collection pass
            it = iter(s)
            try:
                next(it)
            except StopIteration:
                pass
            n1 = len(s)
            del it
            n2 = len(s)
            self.assertGreaterEqual(n1, 0)
            self.assertLessEqual(n1, N)
            self.assertGreaterEqual(n2, 0)
            self.assertLessEqual(n2, n1)
Example #58
0
class TileableData(SerializableWithKey, Tileable):
    __slots__ = '__weakref__', '_siblings', '_cix', '_entities'
    _no_copy_attrs_ = SerializableWithKey._no_copy_attrs_ | {'_cix'}

    # required fields
    _op = KeyField('op')
    # optional fields
    # `nsplits` means the sizes of chunks for each dimension
    _nsplits = TupleField('nsplits', ValueType.tuple(ValueType.uint64),
                          on_serialize=on_serialize_nsplits)
    _extra_params = DictField('extra_params', key_type=ValueType.string, on_deserialize=AttributeDict)

    def __init__(self, *args, **kwargs):
        extras = AttributeDict((k, kwargs.pop(k)) for k in set(kwargs) - set(self.__slots__))
        kwargs['_extra_params'] = kwargs.pop('_extra_params', extras)
        if kwargs.get('_nsplits', None) is not None:
            kwargs['_nsplits'] = tuple(tuple(s) for s in kwargs['_nsplits'])

        super(TileableData, self).__init__(*args, **kwargs)

        if hasattr(self, '_chunks') and self._chunks:
            self._chunks = sorted(self._chunks, key=attrgetter('index'))

        self._entities = WeakSet()

    @property
    def chunk_shape(self):
        if hasattr(self, '_nsplits') and self._nsplits is not None:
            return tuple(map(len, self._nsplits))

    @property
    def chunks(self):
        return getattr(self, '_chunks', None)

    @property
    def op(self):
        return getattr(self, '_op', None)

    @property
    def nsplits(self):
        return getattr(self, '_nsplits', None)

    @nsplits.setter
    def nsplits(self, new_nsplits):
        self._nsplits = new_nsplits

    @property
    def inputs(self):
        return self.op.inputs or []

    @inputs.setter
    def inputs(self, new_inputs):
        self.op.inputs = new_inputs

    @property
    def params(self):
        # params return the properties which useful to rebuild a new tileable object
        return dict()

    @property
    def extra_params(self):
        return self._extra_params

    @property
    def cix(self):
        if self.ndim == 0:
            return ChunksIndexer(self)

        try:
            if getattr(self, '_cix', None) is None:
                self._cix = ChunksIndexer(self)
            return self._cix
        except (TypeError, ValueError):
            return ChunksIndexer(self)

    @property
    def entities(self):
        return self._entities

    def is_coarse(self):
        return not hasattr(self, '_chunks') or self._chunks is None or len(self._chunks) == 0

    def is_sparse(self):
        return self.op.is_sparse()

    issparse = is_sparse

    @enter_build_mode
    def attach(self, entity):
        self._entities.add(entity)

    @enter_build_mode
    def detach(self, entity):
        self._entities.discard(entity)

    def execute(self, session=None, **kw):
        from .session import Session

        if session is None:
            session = Session.default_or_local()
        return session.run(self, **kw)

    def fetch(self, session=None, **kw):
        from .session import Session

        if session is None:
            session = Session.default_or_local()
        return session.fetch(self, **kw)

    def _set_execute_session(self, session):
        _cleaner.register(self, session)

    _execute_session = property(fset=_set_execute_session)
Example #59
0
class VersioningManager(object):
    table_listeners = [
        (
            'before_create',
            sa.schema.DDL(read_file('schema.sql')),
        ),
        (
            'after_create',
            sa.schema.DDL(
                read_file('create_activity.sql').replace('%', '%%') +
                read_file('audit_table.sql').replace('%', '%%')
            )
        ),
        (
            'after_drop',
            sa.schema.DDL('DROP SCHEMA audit CASCADE')
        )
    ]

    def __init__(self, actor_cls=None):
        self.values = {}
        self._actor_cls = actor_cls
        self.listeners = (
            (
                orm.mapper,
                'instrument_class',
                self.instrument_versioned_classes
            ),
            (
                orm.mapper,
                'after_configured',
                self.configure_versioned_classes
            ),
            (
                orm.session.Session,
                'after_flush',
                self.receive_after_flush,
            ),
        )
        self.pending_classes = WeakSet()
        self.cached_ddls = {}

    @property
    def transaction_values(self):
        return self.values

    @contextmanager
    def disable(self, session):
        current_setting = session.execute(
            "SELECT current_setting('session_replication_role')"
        ).fetchone().current_setting
        session.execute('SET LOCAL session_replication_role = "local"')
        yield
        session.execute('SET LOCAL session_replication_role = "{}"'.format(
            current_setting,
        ))

    def set_activity_values(self, session):
        table = self.activity_cls.__table__
        if self.values:
            values = convert_callables(self.transaction_values)
            stmt = (
                table
                .update()
                .values(**values)
                .where(
                    sa.and_(
                        table.c.transaction_id == sa.func.txid_current(),
                        table.c.issued_at > (
                            sa.func.now() - timedelta(days=1)
                        )
                    )
                )
            )
            session.execute(stmt)

    def receive_after_flush(self, session, flush_context):
        self.set_activity_values(session)

    def instrument_versioned_classes(self, mapper, cls):
        """
        Collect versioned class and add it to pending_classes list.

        :mapper mapper: SQLAlchemy mapper object
        :cls cls: SQLAlchemy declarative class
        """
        if hasattr(cls, '__versioned__') and cls not in self.pending_classes:
            self.pending_classes.add(cls)

    def configure_versioned_classes(self):
        """
        Configures all versioned classes that were collected during
        instrumentation process.
        """
        for cls in self.pending_classes:
            audit_table(cls.__table__, cls.__versioned__.get('exclude'))
        assign_actor(self.base, self.activity_cls, self.actor_cls)

    def attach_table_listeners(self):
        for values in self.table_listeners:
            sa.event.listen(self.activity_cls.__table__, *values)

    def remove_table_listeners(self):
        for values in self.table_listeners:
            sa.event.remove(self.activity_cls.__table__, *values)

    @property
    def actor_cls(self):
        if isinstance(self._actor_cls, str):
            if not self.base:
                raise ImproperlyConfigured(
                    'This manager does not have declarative base set up yet. '
                    'Call init method to set up this manager.'
                )
            registry = self.base._decl_class_registry
            try:
                return registry[self._actor_cls]
            except KeyError:
                raise ImproperlyConfigured(
                    'Could not build relationship between Activity'
                    ' and %s. %s was not found in declarative class '
                    'registry. Either configure VersioningManager to '
                    'use different actor class or disable this '
                    'relationship by setting it to None.' % (
                        self._actor_cls,
                        self._actor_cls
                    )
                )
        return self._actor_cls

    def attach_listeners(self):
        self.attach_table_listeners()
        for listener in self.listeners:
            sa.event.listen(*listener)

    def remove_listeners(self):
        self.remove_table_listeners()
        for listener in self.listeners:
            sa.event.remove(*listener)

    def activity_model_factory(self, base):
        class Activity(activity_base(base)):
            __tablename__ = 'activity'
            __table_args__ = {'schema': 'audit'}

        return Activity

    def init(self, base):
        self.base = base
        self.activity_cls = self.activity_model_factory(base)
        self.attach_listeners()
Example #60
0
class SchemaStore:
    def __init__(self) -> None:
        self._listeners = WeakSet()  # type: WeakSet[StoreListener]
        self._schema_list = []  # type: List[Dict]
        self._schema_uri_to_content = {}  # type: Dict[str, str]
        self._schemas_loaded = False
        self._watched_settings = []  # type: List[sublime.Settings]

    def add_listener(self, listener: StoreListener) -> None:
        self._listeners.add(listener)
        if self._schemas_loaded:
            sublime.set_timeout_async(
                lambda: listener.on_store_changed(self._schema_list))

    def get_schema_for_uri(self, uri: str) -> Optional[str]:
        if uri in self._schema_uri_to_content:
            return self._schema_uri_to_content[uri]
        if uri.startswith('sublime://'):
            schema_path = uri.replace('sublime://', '')
            schema_components = schema_path.split('/')
            domain = schema_components[0]
            if domain == 'schemas':
                # Internal schema - 1:1 schema path to file path mapping.
                schema_path = 'Packages/{}/{}.json'.format(
                    LspJSONPlugin.package_name, schema_path)
                return sublime.encode_value(sublime.decode_value(
                    ResourcePath(schema_path).read_text()),
                                            pretty=False)
        print('LSP-json: Unknown schema URI "{}"'.format(uri))
        return None

    def cleanup(self) -> None:
        for settings in self._watched_settings:
            settings.clear_on_change(LspJSONPlugin.package_name)

    def load_schemas(self) -> None:
        if self._schemas_loaded:
            return
        settings = sublime.load_settings('sublime-package.json')
        settings.add_on_change(
            LspJSONPlugin.package_name,
            lambda: sublime.set_timeout_async(self._collect_schemas_async))
        self._watched_settings.append(settings)
        sublime.set_timeout_async(self._collect_schemas_async)
        self._schemas_loaded = True

    def _collect_schemas_async(self) -> None:
        self._schema_list = []
        self._schema_uri_to_content = {}
        self._load_bundled_schemas()
        global_preferences_schemas = self._load_package_schemas()
        self._generate_project_settings_schemas(global_preferences_schemas)
        self._load_syntax_schemas(global_preferences_schemas)
        sublime.set_timeout_async(self._on_schemas_changed)

    def _load_bundled_schemas(self) -> None:
        for schema in ['lsp-json-schemas_extra.json', 'lsp-json-schemas.json']:
            path = 'Packages/{}/{}'.format(LspJSONPlugin.package_name, schema)
            schema_list = self._parse_schema(ResourcePath(path))
            if schema_list:
                self._register_schemas(schema_list)

    def _load_package_schemas(self) -> List[Any]:
        global_preferences_schemas = []
        resources = ResourcePath.glob_resources('sublime-package.json')
        for resource in resources:
            schema = self._parse_schema(resource)
            if not schema:
                continue
            settings = schema.get('contributions').get('settings')
            for s in settings:
                i = len(self._schema_uri_to_content)
                file_patterns = s.get('file_patterns')
                schema_content = s.get('schema')
                uri = schema_content.get(
                    '$id') or 'sublime://settings/{}'.format(i)
                self._schema_uri_to_content[uri] = sublime.encode_value(
                    schema_content, pretty=False)
                self._register_schemas([{
                    'fileMatch': file_patterns,
                    'uri': uri
                }])
                if file_patterns:
                    for pattern in file_patterns:
                        if pattern == '/Preferences.sublime-settings':
                            global_preferences_schemas.append(schema_content)
        return global_preferences_schemas

    def _generate_project_settings_schemas(
            self, global_preferences_schemas: List[Any]) -> None:
        """
        Injects schemas mapped to /Preferences.json into the "settings" object in *.sublime.project schemas.
        """
        for i, schema in enumerate(global_preferences_schemas):
            schema_uri = 'sublime://auto-generated/sublime-project/{}'.format(
                i)
            schema_content = {
                '$schema': 'http://json-schema.org/draft-07/schema#',
                '$id': schema_uri,
                'allowComments': True,
                'allowTrailingCommas': True,
                'type': 'object',
                'properties': {
                    'settings': schema,
                },
            }
            self._schema_uri_to_content[schema_uri] = sublime.encode_value(
                schema_content, pretty=False)
            self._register_schemas([{
                'fileMatch': ['/*.sublime-project'],
                'uri': schema_uri
            }])

    def _load_syntax_schemas(self,
                             global_preferences_schemas: List[Any]) -> None:
        """
        Discovers all available syntaxes and maps their file names to schema.
        """
        syntaxes = []
        try:
            syntaxes = sublime.list_syntaxes()
        except AttributeError:
            pass
        file_patterns = [
            '/{}.sublime-settings'.format(
                path.splitext(path.basename(s.path))[0]) for s in syntaxes
        ]
        if file_patterns:
            self._register_schemas([{
                'fileMatch':
                file_patterns,
                'uri':
                'sublime://schemas/syntax.sublime-settings'
            }])
        if global_preferences_schemas:
            for i, schema in enumerate(global_preferences_schemas):
                schema_uri = 'sublime://auto-generated/syntax.sublime-settings/{}'.format(
                    i)
                schema_content = {
                    '$schema': 'http://json-schema.org/draft-07/schema#',
                    '$id': schema_uri,
                    'allowComments': True,
                    'allowTrailingCommas': True,
                    'type': 'object',
                }
                schema_content.update(schema)
                self._schema_uri_to_content[schema_uri] = sublime.encode_value(
                    schema_content, pretty=False)
                self._register_schemas([{
                    'fileMatch': file_patterns,
                    'uri': schema_uri
                }])

    def _parse_schema(self, resource: ResourcePath) -> Any:
        try:
            return sublime.decode_value(resource.read_text())
        except Exception:
            print('Failed parsing schema "{}"'.format(resource.file_path()))
            return None

    def _register_schemas(self, schemas: List[Any]) -> None:
        for schema in schemas:
            file_matches = schema.get('fileMatch')
            if file_matches:
                schema['fileMatch'] = [
                    quote(fm, safe="/*") for fm in file_matches
                ]
            self._schema_list.append(schema)

    def _on_schemas_changed(self) -> None:
        for listener in self._listeners:
            listener.on_store_changed(self._schema_list)