示例#1
0
class ConnectedClientMock(unittest.mock.Mock):
    on_stream_established = callbacks.Signal()
    on_stream_destroyed = callbacks.Signal()
    on_failure = callbacks.Signal()
    on_stopped = callbacks.Signal()

    before_stream_established = callbacks.SyncSignal()

    negotiation_timeout = timedelta(milliseconds=100)

    def __init__(self):
        super().__init__([
            "stream",
            "start",
            "stop",
            "set_presence",
        ])

        self.established = True

        self.stream_features = nonza.StreamFeatures()
        self.stream.send_iq_and_wait_for_reply = CoroutineMock()
        self.mock_services = {}

    def _get_child_mock(self, **kw):
        return unittest.mock.Mock(**kw)

    def summon(self, cls):
        try:
            return self.mock_services[cls]
        except KeyError:
            raise AssertionError("service class not provisioned in mock")
示例#2
0
class ConnectedClientMock(unittest.mock.Mock):
    on_stream_established = callbacks.Signal()
    on_stream_destroyed = callbacks.Signal()
    on_failure = callbacks.Signal()
    on_stopped = callbacks.Signal()

    before_stream_established = callbacks.SyncSignal()

    negotiation_timeout = timedelta(milliseconds=100)

    def __init__(self):
        super().__init__([
            "stream",
            "start",
            "stop",
            "set_presence",
            "local_jid",
            "enqueue",
        ])

        self.established = True

        self.stream_features = nonza.StreamFeatures()
        self.stream.on_message_received = callbacks.AdHocSignal()
        self.stream.on_presence_received = callbacks.AdHocSignal()
        self.stream.on_stream_destroyed = callbacks.AdHocSignal()
        self.stream.app_inbound_message_filter = FilterMock()
        self.stream.app_inbound_presence_filter = FilterMock()
        self.stream.app_outbound_message_filter = FilterMock()
        self.stream.app_outbound_presence_filter = FilterMock()
        self.stream.service_inbound_message_filter = FilterMock()
        self.stream.service_inbound_presence_filter = FilterMock()
        self.stream.service_outbound_message_filter = FilterMock()
        self.stream.service_outbound_presence_filter = FilterMock()
        self.stream.on_stream_destroyed = callbacks.AdHocSignal()
        self.stream.send_iq_and_wait_for_reply.side_effect = \
            AssertionError("use of deprecated function")
        self.stream.send.side_effect = \
            AssertionError("use of deprecated function")
        self.stream.enqueue.side_effect = \
            AssertionError("use of deprecated function")
        self.send = CoroutineMock()
        self.stream.enqueue_stanza = self.stream.enqueue
        self.mock_services = {}

    def _get_child_mock(self, **kw):
        return unittest.mock.Mock(**kw)

    def summon(self, cls):
        try:
            return self.mock_services[cls]
        except KeyError:
            raise AssertionError("service class not provisioned in mock")
class AvatarService(service.Service):
    """
    Access and publish User Avatars (:xep:`84`). Fallback to vCard
    based avatars (:xep:`153`) if no PEP avatar is available.

    This service provides an interface for accessing the avatar of other
    entities in the network, getting notifications on avatar changes and
    publishing an avatar for this entity.

    .. versionchanged:: 0.10

       Support for :xep:`vCard-Based Avatars <153>` was added.

    Observing avatars:

    .. note:: :class:`AvatarService` only caches the metadata, not the
              actual image data. This is the job of the caller.

    .. signal:: on_metadata_changed(jid, metadata)

        Fires when avatar metadata changes.

        :param jid: The JID which the avatar belongs to.
        :param metadata: The new metadata descriptors.
        :type metadata: a sequence of
            :class:`~aioxmpp.avatar.service.AbstractAvatarDescriptor`
            instances

    .. automethod:: get_avatar_metadata

    .. automethod:: subscribe

    Publishing avatars:

    .. automethod:: publish_avatar_set

    .. automethod:: disable_avatar

    .. automethod:: wipe_avatar

    Configuration:

    .. autoattribute:: synchronize_vcard

    .. autoattribute:: advertise_vcard

    .. attribute:: avatar_pep

       The PEP descriptor for claiming the avatar metadata namespace.
       The value is a :class:`~aioxmpp.pep.service.RegisteredPEPNode`,
       whose :attr:`~aioxmpp.pep.service.RegisteredPEPNode.notify`
       property can be used to disable or enable the notification
       feature.

    .. autoattribute:: metadata_cache_size
       :annotation: = 200
    """

    ORDER_AFTER = [
        disco.DiscoClient,
        disco.DiscoServer,
        pubsub.PubSubClient,
        pep.PEPClient,
        vcard.VCardService,
        presence.PresenceClient,
        presence.PresenceServer,
    ]

    avatar_pep = pep.register_pep_node(
        namespaces.xep0084_metadata,
        notify=True,
    )

    on_metadata_changed = callbacks.Signal()

    def __init__(self, client, **kwargs):
        super().__init__(client, **kwargs)
        self._has_pep_avatar = set()
        self._metadata_cache = LRUDict()
        self._metadata_cache.maxsize = 200
        self._pubsub = self.dependencies[pubsub.PubSubClient]
        self._pep = self.dependencies[pep.PEPClient]
        self._presence_server = self.dependencies[presence.PresenceServer]
        self._disco = self.dependencies[disco.DiscoClient]
        self._vcard = self.dependencies[vcard.VCardService]
        # we use this lock to prevent race conditions between different
        # calls of the methods by one client.
        # XXX: Other, independent clients may still cause inconsistent
        # data by race conditions, this should be fixed by at least
        # checking for consistent data after an update.
        self._publish_lock = asyncio.Lock()
        self._synchronize_vcard = False
        self._advertise_vcard = True
        self._vcard_resource_interference = set()
        self._vcard_id = None
        self._vcard_rehashing_for = None
        self._vcard_rehash_task = None

    @property
    def metadata_cache_size(self):
        """
        Maximum number of cache entries in the avatar metadata cache.

        This is mostly a measure to prevent malicious peers from
        exhausting memory by spamming vCard based avatar metadata for
        different resources.

        .. versionadded:: 0.10

        """
        return self._metadata_cache.maxsize

    @metadata_cache_size.setter
    def metadata_cache_size(self, value):
        self._metadata_cache.maxsize = value

    @property
    def synchronize_vcard(self):
        """
        Set this property to true to enable publishing the a vCard avatar.

        This property defaults to false. For the setting true to have
        effect, you have to publish your avatar with :meth:`publish_avatar_set`
        or :meth:`disable_avatar` *after* this switch has been set to true.
        """
        return self._synchronize_vcard

    @synchronize_vcard.setter
    def synchronize_vcard(self, value):
        self._synchronize_vcard = bool(value)

    @property
    def advertise_vcard(self):
        """
        Set this property to false to disable advertisement of the vCard
        avatar via presence broadcast.

        Note, that this reduces traffic, since it makes the presence
        stanzas smaller and we no longer have to recalculate the hash,
        this also disables vCard advertisement for all other
        ressources of the bare local jid, by the business rules of
        :xep:`0153`.

        Note that, when enabling this feature again the vCard has to
        be fetched from the server to recalculate the hash.
        """
        return self._advertise_vcard

    @advertise_vcard.setter
    def advertise_vcard(self, value):
        self._advertise_vcard = bool(value)
        if self._advertise_vcard:
            self._vcard_id = None
            self._start_rehash_task()

    @service.depfilter(aioxmpp.stream.StanzaStream,
                       "service_outbound_presence_filter")
    def _attach_vcard_notify_to_presence(self, stanza):
        if self._advertise_vcard:
            if self._vcard_resource_interference:
                # do not advertise the hash if there is resource interference
                stanza.xep0153_x = avatar_xso.VCardTempUpdate()
            else:
                stanza.xep0153_x = avatar_xso.VCardTempUpdate(self._vcard_id)

        return stanza

    def _update_metadata(self, cache_jid, metadata):
        try:
            cached_metadata = self._metadata_cache[cache_jid]
        except KeyError:
            pass
        else:
            if cached_metadata == metadata:
                return

        self._metadata_cache[cache_jid] = metadata
        self.on_metadata_changed(cache_jid, metadata)

    def _handle_notify(self, full_jid, stanza):
        # handle resource interference as per XEP-153 business rules,
        # we go along with this tracking even if vcard advertisement
        # is off
        if (full_jid.bare() == self.client.local_jid.bare()
                and full_jid != self.client.local_jid):
            if stanza.xep0153_x is None:
                self._vcard_resource_interference.add(full_jid)
            else:
                if self._vcard_resource_interference:
                    self._vcard_resource_interference.discard(full_jid)
                    if not self._vcard_resource_interference:
                        self._vcard_id = None

        # otherwise ignore stanzas without xep0153_x payload, or
        # no photo tag.
        if stanza.xep0153_x is None:
            return

        if stanza.xep0153_x.photo is None:
            return

        # special case MUC presence – otherwise the vcard is retrieved
        # for the bare jid
        if stanza.xep0045_muc_user is not None:
            cache_jid = full_jid
        else:
            cache_jid = full_jid.bare()

        if cache_jid not in self._has_pep_avatar:
            metadata = self._cook_vcard_notify(cache_jid, stanza)
            self._update_metadata(cache_jid, metadata)

        # trigger the download of the vCard and calculation of the
        # vCard avatar hash, if some other resource of our bare jid
        # reported a hash distinct from ours!
        # don't do this if there is a non-compliant resource, we don't
        # send the hash in that case anyway
        if (full_jid.bare() == self.client.local_jid.bare()
                and full_jid != self.client.local_jid and self._advertise_vcard
                and not self._vcard_resource_interference):
            if (self._vcard_id is None or
                    stanza.xep0153_x.photo.lower() != self._vcard_id.lower()):

                # do not rehash if we alread have a rehash task that
                # was triggered by an update with the same hash
                if (self._vcard_rehashing_for is None
                        or self._vcard_rehashing_for !=
                        stanza.xep0153_x.photo.lower()):
                    self._vcard_rehashing_for = stanza.xep0153_x.photo.lower()
                    self._start_rehash_task()

    def _start_rehash_task(self):
        if self._vcard_rehash_task is not None:
            self._vcard_rehash_task.cancel()

        self._vcard_id = None
        # as per XEP immediately resend the presence with empty update
        # element, as this is not synchronous it might already contaiin
        # the new hash, but this is okay as well (as it makes the cached
        # presence stanzas coherent as well).
        self._presence_server.resend_presence()

        self._vcard_rehash_task = asyncio.ensure_future(
            self._calculate_vcard_id())

        def set_new_vcard_id(fut):
            self._vcard_rehashing_for = None
            if not fut.cancelled():
                self._vcard_id = fut.result()

        self._vcard_rehash_task.add_done_callback(set_new_vcard_id)

    async def _calculate_vcard_id(self):
        self.logger.debug("updating vcard hash")
        vcard = await self._vcard.get_vcard()
        self.logger.debug("got vcard for hash update: %s", vcard)
        photo = vcard.get_photo_data()

        # if no photo is set in the vcard, set an empty <photo> element
        # in the update; according to the spec this means the avatar
        # is disabled
        if photo is None:
            self.logger.debug("no photo in vcard, advertising as such")
            return ""

        sha1 = hashlib.sha1()
        sha1.update(photo)
        new_hash = sha1.hexdigest().lower()
        self.logger.debug("updated hash to %s", new_hash)
        return new_hash

    @service.depsignal(presence.PresenceClient, "on_available")
    def _handle_on_available(self, full_jid, stanza):
        self._handle_notify(full_jid, stanza)

    @service.depsignal(presence.PresenceClient, "on_changed")
    def _handle_on_changed(self, full_jid, stanza):
        self._handle_notify(full_jid, stanza)

    @service.depsignal(presence.PresenceClient, "on_unavailable")
    def _handle_on_unavailable(self, full_jid, stanza):
        if full_jid.bare() == self.client.local_jid.bare():
            if self._vcard_resource_interference:
                self._vcard_resource_interference.discard(full_jid)
                if not self._vcard_resource_interference:
                    self._start_rehash_task()

        # correctly handle MUC avatars
        if stanza.xep0045_muc_user is not None:
            self._metadata_cache.pop(full_jid, None)

    def _cook_vcard_notify(self, jid, stanza):
        result = []
        # note: an empty photo element correctly
        # results in an empty avatar metadata list
        if stanza.xep0153_x.photo:
            result.append(
                VCardAvatarDescriptor(
                    remote_jid=jid,
                    id_=stanza.xep0153_x.photo,
                    mime_type=None,
                    vcard=self._vcard,
                    nbytes=None,
                ))
        return result

    def _cook_metadata(self, jid, items):
        def iter_metadata_info_nodes(items):
            for item in items:
                yield from item.registered_payload.iter_info_nodes()

        result = []
        for info_node in iter_metadata_info_nodes(items):
            if info_node.url is not None:
                descriptor = HttpAvatarDescriptor(
                    remote_jid=jid,
                    id_=info_node.id_,
                    mime_type=info_node.mime_type,
                    nbytes=info_node.nbytes,
                    width=info_node.width,
                    height=info_node.height,
                    url=info_node.url,
                )
            else:
                descriptor = PubsubAvatarDescriptor(
                    remote_jid=jid,
                    id_=info_node.id_,
                    mime_type=info_node.mime_type,
                    nbytes=info_node.nbytes,
                    width=info_node.width,
                    height=info_node.height,
                    pubsub=self._pubsub,
                )
            result.append(descriptor)

        return result

    @service.attrsignal(avatar_pep, "on_item_publish")
    def _handle_pubsub_publish(self, jid, node, item, *, message=None):
        # update the metadata cache
        metadata = self._cook_metadata(jid, [item])
        self._has_pep_avatar.add(jid)
        self._update_metadata(jid, metadata)

    async def _get_avatar_metadata_vcard(self, jid):
        logger.debug("trying vCard avatar as fallback for %s", jid)
        vcard = await self._vcard.get_vcard(jid)
        photo = vcard.get_photo_data()
        mime_type = vcard.get_photo_mime_type()
        if photo is None:
            return []

        logger.debug("success vCard avatar as fallback for %s", jid)
        sha1 = hashlib.sha1()
        sha1.update(photo)
        return [
            VCardAvatarDescriptor(
                remote_jid=jid,
                id_=sha1.hexdigest(),
                mime_type=mime_type,
                nbytes=len(photo),
                vcard=self._vcard,
                image_bytes=photo,
            )
        ]

    async def _get_avatar_metadata_pep(self, jid):
        try:
            metadata_raw = await self._pubsub.get_items(
                jid, namespaces.xep0084_metadata, max_items=1)
        except aioxmpp.XMPPCancelError as e:
            # transparently map feature-not-implemented and
            # item-not-found to be equivalent unset avatar
            if e.condition in (aioxmpp.ErrorCondition.FEATURE_NOT_IMPLEMENTED,
                               aioxmpp.ErrorCondition.ITEM_NOT_FOUND):
                return []
            raise

        self._has_pep_avatar.add(jid)
        return self._cook_metadata(jid, metadata_raw.payload.items)

    async def get_avatar_metadata(self,
                                  jid,
                                  *,
                                  require_fresh=False,
                                  disable_pep=False):
        """
        Retrieve a list of avatar descriptors.

        :param jid: the JID for which to retrieve the avatar metadata.
        :type jid: :class:`aioxmpp.JID`
        :param require_fresh: if true, do not return results from the
            avatar metadata chache, but retrieve them again from the server.
        :type require_fresh: :class:`bool`
        :param disable_pep: if true, do not try to retrieve the avatar
            via pep, only try the vCard fallback. This usually only
            useful when querying avatars via MUC, where the PEP request
            would be invalid (since it would be for a full jid).
        :type disable_pep: :class:`bool`

        :returns: an iterable of avatar descriptors.
        :rtype: a :class:`list` of
            :class:`~aioxmpp.avatar.service.AbstractAvatarDescriptor`
            instances

        Returning an empty list means that the avatar not set.

        We mask a :class:`XMPPCancelError` in the case that it is
        ``feature-not-implemented`` or ``item-not-found`` and return
        an empty list of avatar descriptors, since this is
        semantically equivalent to not having an avatar.

        .. note::

           It is usually an error to get the avatar for a full jid,
           normally, the avatar is set for the bare jid of a user. The
           exception are vCard avatars over MUC, where the IQ requests
           for the vCard may be translated by the MUC server. It is
           recommended to use the `disable_pep` option in that case.
        """

        if require_fresh:
            self._metadata_cache.pop(jid, None)
        else:
            try:
                return self._metadata_cache[jid]
            except KeyError:
                pass

        if disable_pep:
            metadata = []
        else:
            metadata = await self._get_avatar_metadata_pep(jid)

        # try the vcard fallback, note: we don't try this
        # if the PEP avatar is disabled!
        if not metadata and jid not in self._has_pep_avatar:
            metadata = await self._get_avatar_metadata_vcard(jid)

        # if a notify was fired while we waited for the results, then
        # use the version in the cache, this will mitigate the race
        # condition because if our version is actually newer we will
        # soon get another notify for this version change!
        if jid not in self._metadata_cache:
            self._update_metadata(jid, metadata)
        return self._metadata_cache[jid]

    async def subscribe(self, jid):
        """
        Explicitly subscribe to metadata change notifications for `jid`.
        """
        await self._pubsub.subscribe(jid, namespaces.xep0084_metadata)

    @aioxmpp.service.depsignal(aioxmpp.stream.StanzaStream,
                               "on_stream_destroyed")
    def handle_stream_destroyed(self, reason):
        self._metadata_cache.clear()
        self._vcard_resource_interference.clear()
        self._has_pep_avatar.clear()

    async def publish_avatar_set(self, avatar_set):
        """
        Make `avatar_set` the current avatar of the jid associated with this
        connection.

        If :attr:`synchronize_vcard` is true and PEP is available the
        vCard is only synchronized if the PEP update is successful.

        This means publishing the ``image/png`` avatar data and the
        avatar metadata set in pubsub. The `avatar_set` must be an
        instance of :class:`AvatarSet`. If :attr:`synchronize_vcard` is
        true the avatar is additionally published in the user vCard.
        """
        id_ = avatar_set.png_id

        done = False
        async with self._publish_lock:
            if await self._pep.available():
                await self._pep.publish(namespaces.xep0084_data,
                                        avatar_xso.Data(
                                            avatar_set.image_bytes),
                                        id_=id_)

                await self._pep.publish(namespaces.xep0084_metadata,
                                        avatar_set.metadata,
                                        id_=id_)
                done = True

            if self._synchronize_vcard:
                my_vcard = await self._vcard.get_vcard()
                my_vcard.set_photo_data("image/png", avatar_set.image_bytes)
                self._vcard_id = avatar_set.png_id
                await self._vcard.set_vcard(my_vcard)
                self._presence_server.resend_presence()
                done = True

        if not done:
            raise RuntimeError(
                "failed to publish avatar: no protocol available")

    async def _disable_vcard_avatar(self):
        my_vcard = await self._vcard.get_vcard()
        my_vcard.clear_photo_data()
        self._vcard_id = ""
        await self._vcard.set_vcard(my_vcard)
        self._presence_server.resend_presence()

    async def disable_avatar(self):
        """
        Temporarily disable the avatar.

        If :attr:`synchronize_vcard` is true, the vCard avatar is
        disabled (even if disabling the PEP avatar fails).

        This is done by setting the avatar metadata node empty and if
        :attr:`synchronize_vcard` is true, downloading the vCard,
        removing the avatar data and re-uploading the vCard.

        This method does not error if neither protocol is active.

        :raises aioxmpp.errors.GatherError: if an exception is raised
            by the spawned tasks.
        """

        async with self._publish_lock:
            todo = []
            if self._synchronize_vcard:
                todo.append(self._disable_vcard_avatar())

            if await self._pep.available():
                todo.append(
                    self._pep.publish(namespaces.xep0084_metadata,
                                      avatar_xso.Metadata()))

            await gather_reraise_multi(*todo, message="disable_avatar")

    async def wipe_avatar(self):
        """
        Remove all avatar data stored on the server.

        If :attr:`synchronize_vcard` is true, the vCard avatar is
        disabled even if disabling the PEP avatar fails.

        This is equivalent to :meth:`disable_avatar` for vCard-based
        avatars, but will also remove the data PubSub node for
        PEP avatars.

        This method does not error if neither protocol is active.

        :raises aioxmpp.errors.GatherError: if an exception is raised
            by the spawned tasks.
        """
        async def _wipe_pep_avatar():
            await self._pep.publish(namespaces.xep0084_metadata,
                                    avatar_xso.Metadata())
            await self._pep.publish(namespaces.xep0084_data,
                                    avatar_xso.Data(b''))

        async with self._publish_lock:
            todo = []
            if self._synchronize_vcard:
                todo.append(self._disable_vcard_avatar())

            if await self._pep.available():
                todo.append(_wipe_pep_avatar())

            await gather_reraise_multi(*todo, message="wipe_avatar")
示例#4
0
class XMLStreamMock(InteractivityMock):
    class Receive(collections.namedtuple("Receive", ["obj"])):
        def do(self, xmlstream):
            clsmap = xmlstream.stanza_parser.get_class_map()
            cls = type(self.obj)
            xmlstream._tester.assertIn(
                cls, clsmap, "no handler registered for {}".format(cls))
            clsmap[cls](self.obj)

    class Fail(collections.namedtuple("Fail", ["exc"])):
        def do(self, xmlstream):
            xmlstream._exception = self.exc
            for fut in xmlstream._error_futures:
                if not fut.done():
                    fut.set_exception(self.exc)
            xmlstream.on_closing(self.exc)

    class Send(collections.namedtuple("Send", ["obj", "response"])):
        def __new__(cls, obj, *, response=None):
            return super().__new__(cls, obj, response)

    class Reset(collections.namedtuple("Reset", ["response"])):
        def __new__(cls, *, response=None):
            return super().__new__(cls, response)

    class Close(collections.namedtuple("Close", ["response"])):
        def __new__(cls, *, response=None):
            return super().__new__(cls, response)

    class Abort(collections.namedtuple("Abort", ["response"])):
        def __new__(cls, *, response=None):
            return super().__new__(cls, response)

    class STARTTLS(
            collections.namedtuple(
                "STARTTLS",
                ["ssl_context", "post_handshake_callback", "response"])):
        def __new__(cls,
                    ssl_context,
                    post_handshake_callback,
                    *,
                    response=None):
            return super().__new__(cls, ssl_context, post_handshake_callback,
                                   response)

    on_closing = callbacks.Signal()

    def __init__(self, tester, *, loop=None):
        super().__init__(tester, loop=loop)
        self._queue = asyncio.Queue()
        self._exception = None
        self._closed = False
        self.stanza_parser = xso.XSOParser()
        self.can_starttls_value = False
        self._error_futures = []

    def _execute_single(self, do):
        do(self)

    @asyncio.coroutine
    def run_test(self, actions, stimulus=None):
        self._done = asyncio.Future()
        self._actions = actions

        self._execute_response(stimulus)

        while not self._queue.empty() or self._actions:
            done, pending = yield from asyncio.wait(
                [self._queue.get(), self._done],
                return_when=asyncio.FIRST_COMPLETED)

            if self._done not in pending:
                # raise if error
                self._done.result()
                done.remove(self._done)

            if done:
                value_future = next(iter(done))
                action, *args = value_future.result()
                if action == "send":
                    yield from self._send_xso(*args)
                elif action == "reset":
                    yield from self._reset(*args)
                elif action == "close":
                    yield from self._close(*args)
                elif action == "starttls":
                    yield from self._starttls(*args)
                elif action == "abort":
                    yield from self._abort(*args)
                else:
                    assert False

            if self._done not in pending:
                break

    @asyncio.coroutine
    def _send_xso(self, obj):
        self._tester.assertTrue(
            self._actions,
            self._format_unexpected_action("send_xso(" + repr(obj) + ")",
                                           "no actions left"))
        head = self._actions[0]
        self._tester.assertIsInstance(
            head, self.Send,
            self._format_unexpected_action("send_xso",
                                           "expected something different"))

        t1 = etree.Element("root")
        obj.unparse_to_node(t1)
        t2 = etree.Element("root")
        head.obj.unparse_to_node(t2)

        self._tester.assertSubtreeEqual(t1, t2)
        self._actions.pop(0)
        self._execute_response(head.response)

    @asyncio.coroutine
    def _reset(self):
        self._basic("reset", self.Reset)

    @asyncio.coroutine
    def _abort(self):
        self._basic("abort", self.Abort)
        self._exception = ConnectionError("not connected")
        for fut in self._error_futures:
            if not fut.done():
                fut.set_exception(self._exception)

    @asyncio.coroutine
    def _close(self):
        self._basic("close", self.Close)
        self._exception = ConnectionError("not connected")
        self.on_closing(None)
        for fut in self._error_futures:
            if not fut.done():
                fut.set_exception(self._exception)

    @asyncio.coroutine
    def _starttls(self, ssl_context, post_handshake_callback, fut):
        self._tester.assertTrue(
            self._actions,
            self._format_unexpected_action("starttls", "no actions left"),
        )
        head = self._actions[0]
        self._tester.assertIsInstance(
            head,
            self.STARTTLS,
            self._format_unexpected_action("starttls",
                                           "expected something else"),
        )
        self._actions.pop(0)

        self._tester.assertEqual(ssl_context, head.ssl_context,
                                 "mismatched starttls argument")
        self._tester.assertEqual(post_handshake_callback,
                                 head.post_handshake_callback,
                                 "mismatched starttls argument")

        if post_handshake_callback:
            try:
                yield from post_handshake_callback(self.transport)
            except Exception as exc:
                fut.set_exception(exc)
            else:
                fut.set_result(None)
        else:
            fut.set_result(None)

        self._execute_response(head.response)

    def send_xso(self, obj):
        if self._exception:
            raise self._exception
        self._queue.put_nowait(("send", obj))

    def reset(self):
        if self._exception:
            raise self._exception
        self._queue.put_nowait(("reset", ))

    def abort(self):
        if self._exception:
            raise self._exception
        self._queue.put_nowait(("abort", ))

    def close(self):
        if self._exception:
            raise self._exception
        self._queue.put_nowait(("close", ))

    @asyncio.coroutine
    def starttls(self, ssl_context, post_handshake_callback=None):
        if self._exception:
            raise self._exception

        fut = asyncio.Future()
        self._queue.put_nowait(
            ("starttls", ssl_context, post_handshake_callback, fut))
        yield from fut

    @asyncio.coroutine
    def close_and_wait(self):
        fut = asyncio.Future()
        self.on_closing.connect(fut, self.on_closing.AUTO_FUTURE)
        self.close()
        try:
            yield from fut
        except Exception:
            pass

    def can_starttls(self):
        return self.can_starttls_value

    def error_future(self):
        fut = asyncio.Future()
        self._error_futures.append(fut)
        return fut
示例#5
0
class RegisteredPEPNode:
    """
    Handle for registered PEP nodes.

    *Never* instanciate this class yourself. Use
    :class:`~aioxmpp.pep.register_pep_node` or
    :attr:`~aioxmpp.pep.PEPClient.claim_pep_node` to obtain instances.

    You have to keep a reference to the instance to
    uphold the claim, when a instance is garbage
    collected it is closed automatically. It is not enough to have a
    callback registered! It is strongly recommended to explicitly
    close the registered node if it is no longer needed or to use the
    :class:`~aioxmpp.pep.register_pep_node` descriptor for automatic
    life-cycle handling.

    .. signal:: on_item_publish(jid, node, item, message=None)

       Fires when an event is received for this PEP node. The arguments
       are as for :attr:`aioxmpp.PubSubClient.on_item_publish`.

       .. warning:: Empty notifications and notifications whose
                    payload namespace does not match the node
                    namespace are filtered and will not cause
                    this signal to fire (since they do not match the
                    PEP specification).

    .. autoattribute:: notify

    .. autoattribute:: feature_registered

    .. automethod:: close

    """
    def __init__(self, pep_service, node, register_feature, notify):
        self._pep_service = pep_service
        self._node = node
        self._feature_registered = register_feature
        self._notify = notify
        self._closed = False

        if self._feature_registered:
            self._register_feature()

        if self._notify:
            self._register_notify()

    on_item_publish = callbacks.Signal()

    def _register_feature(self):
        self._pep_service._disco_server.register_feature(self._node)
        self._feature_registered = True

    def _unregister_feature(self):
        self._pep_service._disco_server.unregister_feature(self._node)
        self._feature_registered = False

    def _register_notify(self):
        self._pep_service._disco_server.register_feature(self._notify_feature)
        self._notify = True

    def _unregister_notify(self):
        self._pep_service._disco_server.unregister_feature(
            self._notify_feature)
        self._notify = False

    def _unregister(self):
        if self._notify:
            self._unregister_notify()

        if self._feature_registered:
            self._unregister_feature()

    def close(self):
        """
        Unclaim the PEP node and unregister the registered features.

        It is not necessary to call close if this claim is managed by
        :class:`~aioxmpp.pep.register_pep_node`.
        """
        if self._closed:
            return

        self._closed = True
        self._pep_service._unclaim(self.node_namespace)
        self._unregister()

    @property
    def node_namespace(self):
        """The claimed node namespace"""
        return self._node

    @property
    def _notify_feature(self):
        return self._node + "+notify"

    @property
    def notify(self):
        """
        Whether we have enabled the ``+notify`` feature to automatically
        receive notifications.

        When setting this property the feature is registered and
        unregistered appropriately.

        .. note::

            For `notify` to work, it is required that
            :class:`aioxmpp.EntityCapsService` is loaded and that presence is
            re-sent soon after
            :meth:`~aioxmpp.EntityCapsService.on_ver_changed` fires. See the
            documentation of the class and the signal for details.
        """
        return self._notify

    @notify.setter
    def notify(self, value):
        if self._closed:
            raise RuntimeError(
                "modifying a closed RegisteredPEPNode is forbidden")
        # XXX: do we want to do strict type checking here?
        if bool(value) == bool(self._notify):
            return

        if self._notify:
            self._unregister_notify()
        else:
            self._register_notify()

    @property
    def feature_registered(self):
        """
        Whether we have registered the node namespace as feature.

        When setting this property the feature is registered and
        unregistered appropriately.
        """
        return self._feature_registered

    @feature_registered.setter
    def feature_registered(self, value):
        if self._closed:
            raise RuntimeError(
                "modifying a closed RegisteredPEPNode is forbidden")
        # XXX: do we want to do strict type checking here?
        if bool(value) == bool(self._feature_registered):
            return

        if self._feature_registered:
            self._unregister_feature()
        else:
            self._register_feature()
示例#6
0
class BookmarkClient(service.Service):
    """
    Supports retrieval and storage of bookmarks on the server.
    It currently only supports :xep:`Private XML Storage <49>` as
    backend.

    There is the general rule *never* to modify the bookmark instances
    retrieved from this class (either by :meth:`get_bookmarks` or as
    an argument to one of the signals). If you need to modify a bookmark
    for use with :meth:`update_bookmark` use :func:`copy.copy` to create
    a copy.

    .. automethod:: sync

    .. automethod:: get_bookmarks

    .. automethod:: set_bookmarks

    The following methods change the bookmark list in a get-modify-set
    pattern, to mitigate the danger of race conditions and should be
    used in most circumstances:

    .. automethod:: add_bookmark

    .. automethod:: discard_bookmark

    .. automethod:: update_bookmark


    The following signals are provided that allow tracking the changes to
    the bookmark list:

    .. signal:: on_bookmark_added(added_bookmark)

        Fires when a new bookmark is added.

    .. signal:: on_bookmark_removed(removed_bookmark)

        Fires when a bookmark is removed.

    .. signal:: on_bookmark_changed(old_bookmark, new_bookmark)

        Fires when a bookmark is changed.

    .. note:: A heuristic is used to determine the change of bookmarks
              and the reported changes may not directly reflect the
              used methods, but it will always be possible to
              construct the list of bookmarks from the events. For
              example, when using :meth:`update_bookmark` to change
              the JID of a :class:`Conference` bookmark a removed and
              a added signal will fire.

    .. note:: The bookmark protocol is prone to race conditions if
              several clients access it concurrently. Be careful to
              use a get-modify-set pattern or the provided highlevel
              interface.

    .. note:: Some other clients extend the bookmark format. For now
              those extensions are silently dropped by our XSOs, and
              therefore are lost, when changing the bookmarks with
              aioxmpp. This is considered a bug to be fixed in the future.
    """

    ORDER_AFTER = [
        private_xml.PrivateXMLService,
    ]

    on_bookmark_added = callbacks.Signal()
    on_bookmark_removed = callbacks.Signal()
    on_bookmark_changed = callbacks.Signal()

    def __init__(self, client, **kwargs):
        super().__init__(client, **kwargs)
        self._private_xml = self.dependencies[private_xml.PrivateXMLService]
        self._bookmark_cache = []
        self._lock = asyncio.Lock()

    @service.depsignal(aioxmpp.Client, "on_stream_established", defer=True)
    async def _stream_established(self):
        await self.sync()

    async def _get_bookmarks(self):
        """
        Get the stored bookmarks from the server.

        :returns: a list of bookmarks
        """
        res = await self._private_xml.get_private_xml(bookmark_xso.Storage())

        return res.registered_payload.bookmarks

    async def _set_bookmarks(self, bookmarks):
        """
        Set the bookmarks stored on the server.
        """
        storage = bookmark_xso.Storage()
        storage.bookmarks[:] = bookmarks
        await self._private_xml.set_private_xml(storage)

    def _diff_emit_update(self, new_bookmarks):
        """
        Diff the bookmark cache and the new bookmark state, emit signals as
        needed and set the bookmark cache to the new data.
        """

        self.logger.debug("diffing %s, %s", self._bookmark_cache,
                          new_bookmarks)

        def subdivide(level, old, new):
            """
            Subdivide the bookmarks according to the data item
            ``bookmark.secondary[level]`` and emit the appropriate
            events.
            """
            if len(old) == len(new) == 1:
                old_entry = old.pop()
                new_entry = new.pop()
                if old_entry == new_entry:
                    pass
                else:
                    self.on_bookmark_changed(old_entry, new_entry)
                return ([], [])

            elif len(old) == 0:
                return ([], new)

            elif len(new) == 0:
                return (old, [])

            else:
                try:
                    groups = {}
                    for entry in old:
                        group = groups.setdefault(entry.secondary[level],
                                                  ([], []))
                        group[0].append(entry)

                    for entry in new:
                        group = groups.setdefault(entry.secondary[level],
                                                  ([], []))
                        group[1].append(entry)
                except IndexError:
                    # the classification is exhausted, this means
                    # all entries in this bin are equal by the
                    # defininition of bookmark equivalence!
                    common = min(len(old), len(new))
                    assert old[:common] == new[:common]
                    return (old[common:], new[common:])

                old_unhandled, new_unhandled = [], []
                for old, new in groups.values():
                    unhandled = subdivide(level + 1, old, new)
                    old_unhandled += unhandled[0]
                    new_unhandled += unhandled[1]

                # match up unhandleds as changes as early as possible
                i = -1
                for i, (old_entry, new_entry) in enumerate(
                        zip(old_unhandled, new_unhandled)):
                    self.logger.debug("changed %s -> %s", old_entry, new_entry)
                    self.on_bookmark_changed(old_entry, new_entry)
                i += 1
                return old_unhandled[i:], new_unhandled[i:]

        # group the bookmarks into groups whose elements may transform
        # among one another by on_bookmark_changed events. This information
        # is given by the type of the bookmark and the .primary property
        changable_groups = {}

        for item in self._bookmark_cache:
            group = changable_groups.setdefault((type(item), item.primary),
                                                ([], []))
            group[0].append(item)

        for item in new_bookmarks:
            group = changable_groups.setdefault((type(item), item.primary),
                                                ([], []))
            group[1].append(item)

        for old, new in changable_groups.values():

            # the first branches are fast paths which should catch
            # most cases – especially all cases where each bare jid of
            # a conference bookmark or each url of an url bookmark is
            # only used in one bookmark
            if len(old) == len(new) == 1:
                old_entry = old.pop()
                new_entry = new.pop()
                if old_entry == new_entry:
                    # the bookmark is unchanged, do not emit an event
                    pass
                else:
                    self.logger.debug("changed %s -> %s", old_entry, new_entry)
                    self.on_bookmark_changed(old_entry, new_entry)
            elif len(new) == 0:
                for removed in old:
                    self.logger.debug("removed %s", removed)
                    self.on_bookmark_removed(removed)
            elif len(old) == 0:
                for added in new:
                    self.logger.debug("added %s", added)
                    self.on_bookmark_added(added)
            else:
                old, new = subdivide(0, old, new)

                assert len(old) == 0 or len(new) == 0

                for removed in old:
                    self.logger.debug("removed %s", removed)
                    self.on_bookmark_removed(removed)

                for added in new:
                    self.logger.debug("added %s", added)
                    self.on_bookmark_added(added)

        self._bookmark_cache = new_bookmarks

    async def get_bookmarks(self):
        """
        Get the stored bookmarks from the server. Causes signals to be
        fired to reflect the changes.

        :returns: a list of bookmarks
        """
        async with self._lock:
            bookmarks = await self._get_bookmarks()
            self._diff_emit_update(bookmarks)
            return bookmarks

    async def set_bookmarks(self, bookmarks):
        """
        Store the sequence of bookmarks `bookmarks`.

        Causes signals to be fired to reflect the changes.

        .. note:: This should normally not be used. It does not
                  mitigate the race condition between clients
                  concurrently modifying the bookmarks and may lead to
                  data loss. Use :meth:`add_bookmark`,
                  :meth:`discard_bookmark` and :meth:`update_bookmark`
                  instead. This method still has use-cases (modifying
                  the bookmarklist at large, e.g. by syncing the
                  remote store with local data).
        """
        async with self._lock:
            await self._set_bookmarks(bookmarks)
            self._diff_emit_update(bookmarks)

    async def sync(self):
        """
        Sync the bookmarks between the local representation and the
        server.

        This must be called periodically to assure that the signals
        are fired.
        """
        await self.get_bookmarks()

    async def add_bookmark(self, new_bookmark, *, max_retries=3):
        """
        Add a bookmark and check whether it was successfully added to the
        bookmark list. Already existant bookmarks are not added twice.

        :param new_bookmark: the bookmark to add
        :type new_bookmark: an instance of :class:`~bookmark_xso.Bookmark`
        :param max_retries: the number of retries if setting the bookmark
                            fails
        :type max_retries: :class:`int`

        :raises RuntimeError: if the bookmark is not in the bookmark list
                              after `max_retries` retries.

        After setting the bookmark it is checked, whether the bookmark
        is in the online storage, if it is not it is tried again at most
        `max_retries` times to add the bookmark. A :class:`RuntimeError`
        is raised if the bookmark could not be added successfully after
        `max_retries`.
        """
        async with self._lock:
            bookmarks = await self._get_bookmarks()

            try:
                modified_bookmarks = list(bookmarks)
                if new_bookmark not in bookmarks:
                    modified_bookmarks.append(new_bookmark)
                await self._set_bookmarks(modified_bookmarks)

                retries = 0
                bookmarks = await self._get_bookmarks()
                while retries < max_retries:
                    if new_bookmark in bookmarks:
                        break
                    modified_bookmarks = list(bookmarks)
                    modified_bookmarks.append(new_bookmark)
                    await self._set_bookmarks(modified_bookmarks)
                    bookmarks = await self._get_bookmarks()
                    retries += 1

                if new_bookmark not in bookmarks:
                    raise RuntimeError("Could not add bookmark")

            finally:
                self._diff_emit_update(bookmarks)

    async def discard_bookmark(self, bookmark_to_remove, *, max_retries=3):
        """
        Remove a bookmark and check it has been removed.

        :param bookmark_to_remove: the bookmark to remove
        :type bookmark_to_remove: a :class:`~bookmark_xso.Bookmark` subclass.
        :param max_retries: the number of retries of removing the bookmark
                            fails.
        :type max_retries: :class:`int`

        :raises RuntimeError: if the bookmark is not removed from
                              bookmark list after `max_retries`
                              retries.

        If there are multiple occurences of the same bookmark exactly
        one is removed.

        This does nothing if the bookmarks does not match an existing
        bookmark according to bookmark-equality.

        After setting the bookmark it is checked, whether the bookmark
        is removed in the online storage, if it is not it is tried
        again at most `max_retries` times to remove the bookmark. A
        :class:`RuntimeError` is raised if the bookmark could not be
        removed successfully after `max_retries`.
        """
        async with self._lock:
            bookmarks = await self._get_bookmarks()
            occurences = bookmarks.count(bookmark_to_remove)

            try:
                if not occurences:
                    return

                modified_bookmarks = list(bookmarks)
                modified_bookmarks.remove(bookmark_to_remove)
                await self._set_bookmarks(modified_bookmarks)

                retries = 0
                bookmarks = await self._get_bookmarks()
                new_occurences = bookmarks.count(bookmark_to_remove)
                while retries < max_retries:
                    if new_occurences < occurences:
                        break
                    modified_bookmarks = list(bookmarks)
                    modified_bookmarks.remove(bookmark_to_remove)
                    await self._set_bookmarks(modified_bookmarks)
                    bookmarks = await self._get_bookmarks()
                    new_occurences = bookmarks.count(bookmark_to_remove)
                    retries += 1

                if new_occurences >= occurences:
                    raise RuntimeError("Could not remove bookmark")
            finally:
                self._diff_emit_update(bookmarks)

    async def update_bookmark(self, old, new, *, max_retries=3):
        """
        Update a bookmark and check it was successful.

        The bookmark matches an existing bookmark `old` according to
        bookmark equalitiy and replaces it by `new`. The bookmark
        `new` is added if no bookmark matching `old` exists.

        :param old: the bookmark to replace
        :type bookmark_to_remove: a :class:`~bookmark_xso.Bookmark` subclass.
        :param new: the replacement bookmark
        :type bookmark_to_remove: a :class:`~bookmark_xso.Bookmark` subclass.
        :param max_retries: the number of retries of removing the bookmark
                            fails.
        :type max_retries: :class:`int`

        :raises RuntimeError: if the bookmark is not in the bookmark list
                              after `max_retries` retries.

        After replacing the bookmark it is checked, whether the
        bookmark `new` is in the online storage, if it is not it is
        tried again at most `max_retries` times to replace the
        bookmark. A :class:`RuntimeError` is raised if the bookmark
        could not be replaced successfully after `max_retries`.

        .. note:: Do not modify a bookmark retrieved from the signals
                  or from :meth:`get_bookmarks` to obtain the bookmark
                  `new`, this will lead to data corruption as they are
                  passed by reference.  Instead use :func:`copy.copy`
                  and modify the copy.

        """
        def replace_bookmark(bookmarks, old, new):
            modified_bookmarks = list(bookmarks)
            try:
                i = bookmarks.index(old)
                modified_bookmarks[i] = new
            except ValueError:
                modified_bookmarks.append(new)
            return modified_bookmarks

        async with self._lock:
            bookmarks = await self._get_bookmarks()

            try:
                await self._set_bookmarks(replace_bookmark(
                    bookmarks, old, new))

                retries = 0
                bookmarks = await self._get_bookmarks()
                while retries < max_retries:
                    if new in bookmarks:
                        break
                    await self._set_bookmarks(
                        replace_bookmark(bookmarks, old, new))
                    bookmarks = await self._get_bookmarks()
                    retries += 1

                if new not in bookmarks:
                    raise RuntimeError("Cold not update bookmark")
            finally:
                self._diff_emit_update(bookmarks)
示例#7
0
class Service(aioxmpp.service.Service):
    """
    A roster client :class:`aioxmpp.service.Service`.

    `client` must be a :class:`~aioxmpp.node.AbstractClient` or
    subclass. Ideally, you create :class:`Service` instances using
    :meth:`.AbstractClient.summon`.

    The interaction with a roster service happens mainly by accessing the
    attributes holding the state and using the events to be notified of state
    changes:

    Attributes for accessing the roster:

    .. attribute:: items

       A dictionary mapping :class:`~.structs.JID` instances to corresponding
       :class:`Item` instances.

    .. attribute:: groups

       A dictionary which allows group-based access to :class:`Item`
       instances. The dictionaries keys are the names of the groups, the values
       are :class:`set` instances, which hold the :class:`Item` instances in
       that group.

       At no point one can observe empty :class:`set` instances in this
       dictionary.

    The :class:`Item` instances stay the same, as long as they represent the
    identical roster entry on the remote side. That is, if the name or
    subscription state are changed in the server side roster, the :class:`Item`
    instance stays the same, but the attributes are mutated. However, if the
    entry is removed from the server roster and re-added later for the same
    JID, it will be a different :class:`Item` instance.

    Signals:

    .. signal:: on_initial_roster_received()

       Fires when the initial roster has been received. Note that if roster
       versioning is used, the initial roster may not be up-to-date. The server
       is allowed to tell the client to re-use its local state and deliver
       changes using roster pushes. In that case, the
       :meth:`on_initial_roster_received` event fires immediately, so that the
       user sees whatever roster has been set up for versioning before the
       stream was established; updates pushed by the server are delivered using
       the normal events.

       The roster data has already been imported at the time the callback is
       fired.

       Note that the initial roster is diffed against whatever is in the local
       store and events are fired just like for normal push updates. Thus, in
       general, you won’t need this signal; it might be better to listen for
       the events below.

    .. signal:: on_entry_added(item)

       Fires when an `item` has been added to the roster. The attributes of the
       `item` are up-to-date when this callback fires.

    .. signal:: on_entry_name_changed(item)

       Fires when a roster update changed the name of the `item`. The new name
       is already applied to the `item`.

    .. signal:: on_entry_subscription_state_changed(item)

       Fires when a roster update changes any of the :attr:`Item.subscription`,
       :attr:`Item.ask` or :attr:`Item.approved` attributes. The new values are
       already applied to `item`.

       The event always fires once per update, even if the update changes
       more than one of the above attributes.

    .. signal:: on_entry_added_to_group(item, group_name)

       Fires when an update adds an `item` to a group. The :attr:`Item.groups`
       attribute is already updated (not only with this, but also other group
       updates, including removals) when this event is fired.

       The event fires for each added group in an update, thus it may fire more
       than once per update.

       The name of the new group is in `group_name`.

    .. signal:: on_entry_removed_from_group(item, group_name)

       Fires when an update removes an `item` from a group. The
       :attr:`Item.groups` attribute is already updated (not only with this,
       but also other group updates, including additions) when this event is
       fired.

       The event fires for each removed group in an update, thus it may fire
       more than once per update.

       The name of the new group is in `group_name`.

    .. signal:: on_entry_removed(item)

       Fires after an entry has been removed from the roster. The entry is
       already removed from all bookkeeping structures, but the values on the
       `item` object are the same as right before the removal.

    Modifying roster contents:

    .. automethod:: set_entry

    .. automethod:: remove_entry

    Managing presence subscriptions:

    .. automethod:: approve

    .. automethod:: subscribe

    .. signal:: on_subscribe(stanza)

       Fires when a peer requested a subscription. The whole stanza recevied is
       included as `stanza`.

       .. seealso::

          To approve a subscription request, use :meth:`approve`.

    .. signal:: on_subscribed(stanza)

       Fires when a peer has confirmed a previous subscription request. The
       ``"subscribed"`` stanza is included as `stanza`.

    .. signal:: on_unsubscribe(stanza)

       Fires when a peer cancelled their subscription for our presence. As per
       :rfc:`6121`, the server forwards the ``"unsubscribe"`` presence stanza
       (which is included as `stanza` argument) *before* sending the roster
       push.

       Unless your application is interested in the specific cause of a
       subscription state change, it is not neccessary to use this signal; the
       subscription state change will be covered by
       :meth:`on_entry_subscription_state_changed`.

    .. signal:: on_unsubscribed(stanza)

       Fires when a peer cancelled our subscription. As per :rfc:`6121`, the
       server forwards the ``"unsubscribed"`` presence stanza (which is
       included as `stanza` argument) *before* sending the roster push.

       Unless your application is interested in the specific cause of a
       subscription state change, it is not neccessary to use this signal; the
       subscription state change will be covered by
       :meth:`on_entry_subscription_state_changed`.

    Import/Export of roster data:

    .. automethod:: export_as_json

    .. automethod:: import_from_json

    To make use of roster versioning, use the above two methods. The general
    workflow is to :meth:`export_as_json` the roster after disconnecting and
    storing it for the next connection attempt. **Before** connecting, the
    stored data needs to be loaded using :meth:`import_from_json`. This only
    needs to happen after a new :class:`Service` has been created, as roster
    services won’t delete roster contents between two connections on the same
    :class:`.node.AbstractClient` instance.
    """

    on_initial_roster_received = callbacks.Signal()
    on_entry_name_changed = callbacks.Signal()
    on_entry_subscription_state_changed = callbacks.Signal()
    on_entry_removed = callbacks.Signal()
    on_entry_added = callbacks.Signal()
    on_entry_added_to_group = callbacks.Signal()
    on_entry_removed_from_group = callbacks.Signal()

    on_subscribed = callbacks.Signal()
    on_subscribe = callbacks.Signal()
    on_unsubscribed = callbacks.Signal()
    on_unsubscribe = callbacks.Signal()

    def __init__(self, client):
        super().__init__(client)

        self._bse_token = client.before_stream_established.connect(
            self._request_initial_roster)

        client.stream.register_iq_request_coro("set", roster_xso.Query,
                                               self.handle_roster_push)
        client.stream.register_presence_callback("subscribe", None,
                                                 self.handle_subscribe)
        client.stream.register_presence_callback("subscribed", None,
                                                 self.handle_subscribed)
        client.stream.register_presence_callback("unsubscribed", None,
                                                 self.handle_unsubscribed)
        client.stream.register_presence_callback("unsubscribe", None,
                                                 self.handle_unsubscribe)

        self.items = {}
        self.groups = {}
        self.version = None

    @asyncio.coroutine
    def _shutdown(self):
        self.client.stream.unregister_presence_callback("unsubscribe", None)
        self.client.stream.unregister_presence_callback("unsubscribed", None)
        self.client.stream.unregister_presence_callback("subscribed", None)
        self.client.stream.unregister_presence_callback("subscribe", None)
        self.client.stream.unregister_iq_request_coro("set", roster_xso.Query)

    def _update_entry(self, xso_item):
        try:
            stored_item = self.items[xso_item.jid]
        except KeyError:
            stored_item = Item.from_xso_item(xso_item)
            self.items[xso_item.jid] = stored_item
            self.on_entry_added(stored_item)
            for group in stored_item.groups:
                self.groups.setdefault(group, set()).add(stored_item)
            return

        to_call = []

        if stored_item.name != xso_item.name:
            to_call.append(self.on_entry_name_changed)

        if (stored_item.subscription != xso_item.subscription
                or stored_item.approved != xso_item.approved
                or stored_item.ask != xso_item.ask):
            to_call.append(self.on_entry_subscription_state_changed)

        old_groups = set(stored_item.groups)

        stored_item.update_from_xso_item(xso_item)

        new_groups = set(stored_item.groups)

        removed_from_groups = old_groups - new_groups
        added_to_groups = new_groups - old_groups

        for cb in to_call:
            cb(stored_item)

        for group in added_to_groups:
            self.groups.setdefault(group, set()).add(stored_item)
            self.on_entry_added_to_group(stored_item, group)

        for group in removed_from_groups:
            groupset = self.groups[group]
            groupset.remove(stored_item)
            if not groupset:
                del self.groups[group]
            self.on_entry_removed_from_group(stored_item, group)

    @asyncio.coroutine
    def handle_roster_push(self, iq):
        if iq.from_:
            raise errors.XMPPAuthError((namespaces.stanzas, "forbidden"))

        request = iq.payload

        for item in request.items:
            if item.subscription == "remove":
                try:
                    old_item = self.items.pop(item.jid)
                except KeyError:
                    pass
                else:
                    for group in old_item.groups:
                        groupset = self.groups[group]
                        groupset.remove(old_item)
                        if not groupset:
                            del self.groups[group]
                    self.on_entry_removed(old_item)
            else:
                self._update_entry(item)

        self.version = request.ver

    def handle_subscribe(self, stanza):
        self.on_subscribe(stanza)

    def handle_subscribed(self, stanza):
        self.on_subscribed(stanza)

    def handle_unsubscribed(self, stanza):
        self.on_unsubscribed(stanza)

    def handle_unsubscribe(self, stanza):
        self.on_unsubscribe(stanza)

    @asyncio.coroutine
    def _request_initial_roster(self):
        iq = stanza.IQ(type_="get")
        iq.payload = roster_xso.Query()

        logger.debug("requesting initial roster")
        if self.client.stream_features.has_feature(
                roster_xso.RosterVersioningFeature):
            logger.debug("requesting incremental updates (old ver = %s)",
                         self.version)
            iq.payload.ver = self.version

        response = yield from self.client.stream.send_iq_and_wait_for_reply(
            iq, timeout=self.client.negotiation_timeout.total_seconds())

        if response is None:
            logger.debug("roster will be updated incrementally")
            self.on_initial_roster_received()
            return True

        self.version = response.ver
        logger.debug("roster update received (new ver = %s)", self.version)

        actual_jids = {item.jid for item in response.items}
        known_jids = set(self.items.keys())

        removed_jids = known_jids - actual_jids
        logger.debug("jids dropped: %r", removed_jids)

        for removed_jid in removed_jids:
            old_item = self.items.pop(removed_jid)
            self.on_entry_removed(old_item)

        logger.debug("jids updated: %r", actual_jids - removed_jids)
        for item in response.items:
            self._update_entry(item)

        self.on_initial_roster_received()
        return True

    def export_as_json(self):
        """
        Export the whole roster as currently stored on the client side into a
        JSON-compatible dictionary and return that dictionary.
        """
        return {
            "items": {
                str(jid): item.export_as_json()
                for jid, item in self.items.items()
            },
            "ver": self.version
        }

    def import_from_json(self, data):
        """
        Replace the current roster with the :meth:`export_as_json`-compatible
        dictionary in `data`.

        No events are fired during this activity. After this method completes,
        the whole roster contents are exchanged with the contents from `data`.

        Also, no data is transferred to the server; this method is intended to
        be used for roster versioning. See below (in the docs of
        :class:`Service`).
        """
        self.version = data.get("ver", None)

        self.items.clear()
        self.groups.clear()
        for jid, data in data.get("items", {}).items():
            jid = structs.JID.fromstr(jid)
            item = Item(jid)
            item.update_from_json(data)
            self.items[jid] = item
            for group in item.groups:
                self.groups.setdefault(group, set()).add(item)

    @asyncio.coroutine
    def set_entry(self,
                  jid,
                  *,
                  name=_Sentinel,
                  add_to_groups=frozenset(),
                  remove_from_groups=frozenset(),
                  timeout=None):
        """
        Set properties of a roster entry or add a new roster entry. The roster
        entry is identified by its bare `jid`.

        If an entry already exists, all values default to those stored in the
        existing entry. For example, if no `name` is given, the current name of
        the entry is re-used, if any.

        If the entry does not exist, it will be created on the server side.

        The `remove_from_groups` and `add_to_groups` arguments have to be based
        on the locally cached state, as XMPP does not support sending
        diffs. `remove_from_groups` takes precedence over `add_to_groups`.

        `timeout` is the time in seconds to wait for a confirmation by the
        server.

        Note that the changes may not be visible immediately after his
        coroutine returns in the :attr:`items` and :attr:`groups`
        attributes. The :class:`Service` waits for the "official" roster push
        from the server for updating the data structures and firing events, to
        ensure that consistent state with other clients is achieved.

        This may raise arbitrary :class:`.errors.XMPPError` exceptions if the
        server replies with an error and also any kind of connection error if
        the connection gets fatally terminated while waiting for a response.
        """

        existing = self.items.get(jid, Item(jid))

        post_groups = (existing.groups | add_to_groups) - remove_from_groups
        post_name = existing.name
        if name is not _Sentinel:
            post_name = name

        item = roster_xso.Item(jid=jid,
                               name=post_name,
                               groups=[
                                   roster_xso.Group(name=group_name)
                                   for group_name in post_groups
                               ])

        yield from self.client.stream.send_iq_and_wait_for_reply(
            stanza.IQ("set", payload=roster_xso.Query(items=[item])),
            timeout=timeout)

    @asyncio.coroutine
    def remove_entry(self, jid, *, timeout=None):
        """
        Request removal of the roster entry identified by the given bare
        `jid`. If the entry currently has any subscription state, the server
        will send the corresponding unsubscribing presence stanzas.

        `timeout` is the maximum time in seconds to wait for a reply from the
        server.

        This may raise arbitrary :class:`.errors.XMPPError` exceptions if the
        server replies with an error and also any kind of connection error if
        the connection gets fatally terminated while waiting for a response.
        """
        yield from self.client.stream.send_iq_and_wait_for_reply(
            stanza.IQ(
                "set",
                payload=roster_xso.Query(
                    items=[roster_xso.Item(jid=jid, subscription="remove")])),
            timeout=timeout)

    def approve(self, peer_jid):
        """
        (Pre-)approve a subscription request from `peer_jid`.

        This sends a ``"subscribed"`` presence to the peer; if the peer has
        previously asked for a subscription, this will seal the deal and create
        the subscription.

        If the peer has not requested a subscription (yet), it is marked as
        pre-approved by the server. A future subscription request by the peer
        will then be confirmed by the server automatically.
        """
        self.client.stream.enqueue_stanza(
            stanza.Presence(type_="subscribed", to=peer_jid))

    def subscribe(self, peer_jid):
        """
        Request presence subscription with the given `peer_jid`.

        This is deliberately not a coroutine; we don’t know whether the peer is
        online (usually) and they may defer the confirmation very long, if they
        confirm at all. Use :meth:`on_subscribed` to get notified when a peer
        accepted a subscription request.
        """
        self.client.stream.enqueue_stanza(
            stanza.Presence(type_="subscribe", to=peer_jid))

    def unsubscribe(self, peer_jid):
        """
        Unsubscribe from the presence of the given `peer_jid`.
        """
        self.client.stream.enqueue_stanza(
            stanza.Presence(type_="unsubscribe", to=peer_jid))
示例#8
0
 class Foo:
     on_a = callbacks.Signal()
示例#9
0
 class Bar(Foo):
     on_a = None
     on_b = callbacks.Signal()
     on_c = callbacks.Signal()
示例#10
0
 class Bar(Foo):
     on_b = callbacks.Signal()
示例#11
0
class BlockingClient(service.Service):
    """
    A :class:`~aioxmpp.service.Service` implementing :xep:`Blocking
    Command <191>`.

    This service maintains the list of blocked JIDs and allows
    manipulating the blocklist.

    Attribute:

    .. autoattribute:: blocklist

    Signals:

    .. signal:: on_initial_blocklist_received(blocklist)

       Fires when the initial blocklist was received from the server.

       :param blocklist: the initial blocklist
       :type blocklist: :class:`~collections.abc.Set` of :class:`~aioxmpp.JID`

    .. signal:: on_jids_blocked(blocked_jids)

       Fires when additional JIDs are blocked.

       :param blocked_jids: the newly blocked JIDs
       :type blocked_jids: :class:`~collections.abc.Set`
           of :class:`~aioxmpp.JID`

    .. signal:: on_jids_blocked(blocked_jids)

       Fires when JIDs are unblocked.

       :param unblocked_jids: the now unblocked JIDs
       :type unblocked_jids: :class:`~collections.abc.Set`
           of :class:`~aioxmpp.JID`

    Coroutine methods:

    .. automethod:: block_jids

    .. automethod:: unblock_jids

    .. automethod:: unblock_all
    """
    ORDER_AFTER = [aioxmpp.DiscoClient]

    def __init__(self, client, **kwargs):
        super().__init__(client, **kwargs)
        self._blocklist = None
        self._lock = asyncio.Lock()
        self._disco = self.dependencies[aioxmpp.DiscoClient]

    on_jids_blocked = callbacks.Signal()
    on_jids_unblocked = callbacks.Signal()
    on_initial_blocklist_received = callbacks.Signal()

    @asyncio.coroutine
    def _check_for_blocking(self):
        server_info = yield from self._disco.query_info(
            self.client.local_jid.replace(
                resource=None,
                localpart=None,
            ))

        if namespaces.xep0191 not in server_info.features:
            self._blocklist = None
            raise RuntimeError("server does not support blocklists!")

    @service.depsignal(aioxmpp.Client, "before_stream_established")
    @asyncio.coroutine
    def _get_initial_blocklist(self):
        try:
            yield from self._check_for_blocking()
        except RuntimeError:
            self.logger.info(
                "server does not support block lists, skipping initial fetch")
            return True

        if self._blocklist is None:
            with (yield from self._lock):
                iq = aioxmpp.IQ(
                    type_=aioxmpp.IQType.GET,
                    payload=blocking_xso.BlockList(),
                )
                result = yield from self.client.send(iq)
                self._blocklist = frozenset(result.items)
            self.on_initial_blocklist_received(self._blocklist)

        return True

    @property
    def blocklist(self):
        """
        :class:`~collections.abc.Set` of JIDs blocked by the account.
        """
        return self._blocklist

    @asyncio.coroutine
    def block_jids(self, jids_to_block):
        """
        Add the JIDs in the sequence `jids_to_block` to the client's
        blocklist.
        """
        yield from self._check_for_blocking()

        if not jids_to_block:
            return

        cmd = blocking_xso.BlockCommand(jids_to_block)
        iq = aioxmpp.IQ(
            type_=aioxmpp.IQType.SET,
            payload=cmd,
        )
        yield from self.client.send(iq)

    @asyncio.coroutine
    def unblock_jids(self, jids_to_unblock):
        """
        Remove the JIDs in the sequence `jids_to_block` from the
        client's blocklist.
        """
        yield from self._check_for_blocking()

        if not jids_to_unblock:
            return

        cmd = blocking_xso.UnblockCommand(jids_to_unblock)
        iq = aioxmpp.IQ(
            type_=aioxmpp.IQType.SET,
            payload=cmd,
        )
        yield from self.client.send(iq)

    @asyncio.coroutine
    def unblock_all(self):
        """
        Unblock all JIDs currently blocked.
        """
        yield from self._check_for_blocking()

        cmd = blocking_xso.UnblockCommand()
        iq = aioxmpp.IQ(
            type_=aioxmpp.IQType.SET,
            payload=cmd,
        )
        yield from self.client.send(iq)

    @service.iq_handler(aioxmpp.IQType.SET, blocking_xso.BlockCommand)
    @asyncio.coroutine
    def handle_block_push(self, block_command):
        diff = ()
        with (yield from self._lock):
            if self._blocklist is None:
                # this means the stream was destroyed while we were waiting for
                # the lock/while the handler was enqueued for scheduling, or
                # the server is buggy and sends pushes before we fetched the
                # blocklist
                return

            if (block_command.from_ is None
                    or block_command.from_ == self.client.local_jid.bare() or
                    # WORKAROUND: ejabberd#2287
                    block_command.from_ == self.client.local_jid):
                diff = frozenset(block_command.payload.items)
                self._blocklist |= diff
            else:
                self.logger.debug(
                    "received block push from unauthorized JID: %s",
                    block_command.from_,
                )

        if diff:
            self.on_jids_blocked(diff)

    @service.iq_handler(aioxmpp.IQType.SET, blocking_xso.UnblockCommand)
    @asyncio.coroutine
    def handle_unblock_push(self, unblock_command):
        diff = ()
        with (yield from self._lock):
            if self._blocklist is None:
                # this means the stream was destroyed while we were waiting for
                # the lock/while the handler was enqueued for scheduling, or
                # the server is buggy and sends pushes before we fetched the
                # blocklist
                return

            if (unblock_command.from_ is None
                    or unblock_command.from_ == self.client.local_jid.bare() or
                    # WORKAROUND: ejabberd#2287
                    unblock_command.from_ == self.client.local_jid):
                if not unblock_command.payload.items:
                    diff = frozenset(self._blocklist)
                    self._blocklist = frozenset()
                else:
                    diff = frozenset(unblock_command.payload.items)
                    self._blocklist -= diff
            else:
                self.logger.debug(
                    "received unblock push from unauthorized JID: %s",
                    unblock_command.from_,
                )
        if diff:
            self.on_jids_unblocked(diff)

    @service.depsignal(aioxmpp.stream.StanzaStream, "on_stream_destroyed")
    def handle_stream_destroyed(self, reason):
        self._blocklist = None
示例#12
0
class AvatarService(service.Service):
    """
    Access and publish User Avatars (:xep:`84`).

    This service provides an interface for accessing the avatar of other
    entities in the network, getting notifications on avatar changes and
    publishing an avatar for this entity.

    Observing avatars:

    .. note:: :class:`AvatarService` only caches the metadata, not the
              actual image data. This is the job of the caller.

    .. signal:: on_metadata_changed(jid, metadata)

        Fires when avatar metadata changes.

        :param jid: The JID which the avatar belongs to.
        :param metadata: The new metadata descriptors.
        :type metadata: a sequence of
            :class:`~aioxmpp.avatar.service.AbstractAvatarDescriptor`
            instances

    .. automethod:: get_avatar_metadata

    .. automethod:: subscribe

    Publishing avatars:

    .. automethod:: publish_avatar_set

    .. automethod:: disable_avatar

    """

    ORDER_AFTER = [
        disco.DiscoClient,
        disco.DiscoServer,
        pubsub.PubSubClient,
        pep.PEPClient,
    ]

    avatar_pep = pep.register_pep_node(
        namespaces.xep0084_metadata,
        notify=True,
    )

    on_metadata_changed = callbacks.Signal()

    def __init__(self, client, **kwargs):
        super().__init__(client, **kwargs)
        self._has_pep = None
        self._metadata_cache = {}
        self._pubsub = self.dependencies[pubsub.PubSubClient]
        self._notify_lock = asyncio.Lock()
        self._disco = self.dependencies[disco.DiscoClient]
        # we use this lock to prevent race conditions between different
        # calls of the methods by one client.
        # XXX: Other, independent clients may still cause inconsistent
        # data by race conditions, this should be fixed by at least
        # checking for consistent data after an update.
        self._publish_lock = asyncio.Lock()

    def _cook_metadata(self, jid, items):
        def iter_metadata_info_nodes(items):
            for item in items:
                yield from item.registered_payload.iter_info_nodes()

        result = collections.defaultdict(lambda: [])
        for info_node in iter_metadata_info_nodes(items):
            if info_node.url is not None:
                descriptor = HttpAvatarDescriptor(
                    remote_jid=jid,
                    mime_type=info_node.mime_type,
                    id_=info_node.id_,
                    nbytes=info_node.nbytes,
                    width=info_node.width,
                    height=info_node.height,
                    url=info_node.url,
                )
            else:
                descriptor = PubsubAvatarDescriptor(
                    remote_jid=jid,
                    mime_type=info_node.mime_type,
                    id_=info_node.id_,
                    nbytes=info_node.nbytes,
                    width=info_node.width,
                    height=info_node.height,
                    pubsub=self._pubsub,
                )
            result[info_node.mime_type].append(descriptor)

        return result

    @service.attrsignal(avatar_pep, "on_item_publish")
    def handle_pubsub_publish(self, jid, node, item, *, message=None):
        # update the metadata cache
        metadata = self._cook_metadata(jid, [item])
        self._metadata_cache[jid] = metadata

        self.on_metadata_changed(jid, metadata)

    @asyncio.coroutine
    def get_avatar_metadata(self, jid, *, require_fresh=False):
        """
        Retrieve a list of avatar descriptors for `jid`.

        The avatar descriptors are returned as a list of instances of
        :class:`~aioxmpp.avatar.service.AbstractAvatarDescriptor`.
        An empty list means that the avatar is unset.

        If `require_fresh` is true, we will not lookup the avatar
        metadata from the cache, but make a new pubsub request.

        We mask a :class:`XMPPCancelError` in the case that it is
        ``feature-not-implemented`` or ``item-not-found`` and return
        an empty list of avatar descriptors, since this is
        semantically equivalent to not having an avatar.
        """
        if not require_fresh:
            try:
                return self._metadata_cache[jid]
            except KeyError:
                pass

        with (yield from self._notify_lock):
            if jid in self._metadata_cache:
                if require_fresh:
                    del self._metadata_cache[jid]
                else:
                    return self._metadata_cache[jid]

            try:
                metadata_raw = yield from self._pubsub.get_items(
                    jid, namespaces.xep0084_metadata, max_items=1)
            except aioxmpp.XMPPCancelError as e:
                # transparently map feature-not-implemente and
                # item-not-found to be equivalent unset avatar
                if e.condition in ((namespaces.stanzas,
                                    "feature-not-implemented"),
                                   (namespaces.stanzas, "item-not-found")):
                    metadata = collections.defaultdict(lambda: [])
                else:
                    raise
            else:
                metadata = self._cook_metadata(jid, metadata_raw.payload.items)

            self._metadata_cache[jid] = metadata
            return metadata

    @asyncio.coroutine
    def subscribe(self, jid):
        """
        Explicitly subscribe to metadata change notifications for `jid`.
        """
        yield from self._pubsub.subscribe(jid, namespaces.xep0084_metadata)

    @asyncio.coroutine
    def _check_for_pep(self):
        # determine support for PEP as specified in XEP-0163 section 6
        # XXX: fix this by implementing a PEPService that is derived from
        # pubsub and checks for the server capability and simplifies the
        # handling
        def raise_exception():
            raise NotImplementedError(
                "Server does not support PEP and we do not support "
                "surrogating for lack of PEP support")

        if self._has_pep is not None:
            if self._has_pep:
                return
            else:
                raise_exception()

        disco_info = yield from self._disco.query_info(
            self.client.local_jid.bare())

        for item in disco_info.identities.filter(attrs={"category": "pubsub"}):
            if item.type_ == "pep":
                self._has_pep = True
                break
        else:
            self._has_pep = False
            raise_exception()

    @aioxmpp.service.depsignal(aioxmpp.stream.StanzaStream,
                               "on_stream_destroyed")
    def handle_stream_destroyed(self, reason):
        # invalidate the cache
        self._has_pep = None

    @asyncio.coroutine
    def publish_avatar_set(self, avatar_set):
        """
        Make `avatar_set` the current avatar of the jid associated with this
        connection.

        This means publishing the ``image/png`` avatar data and the
        avatar metadata set in pubsub. The `avatar_set` must be an
        instance of :class:`AvatarSet`.
        """
        yield from self._check_for_pep()

        id_ = avatar_set.png_id

        with (yield from self._publish_lock):
            yield from self._pubsub.publish(None,
                                            namespaces.xep0084_data,
                                            avatar_xso.Data(
                                                avatar_set.image_bytes),
                                            id_=id_)

            yield from self._pubsub.publish(None,
                                            namespaces.xep0084_metadata,
                                            avatar_set.metadata,
                                            id_=id_)

    @asyncio.coroutine
    def disable_avatar(self):
        """
        Temporarily disable the avatar.

        This is done by setting the avatar metadata node empty.
        """
        yield from self._check_for_pep()

        with (yield from self._publish_lock):
            yield from self._pubsub.publish(None, namespaces.xep0084_metadata,
                                            avatar_xso.Metadata())