コード例 #1
0
ファイル: _base.py プロジェクト: roblabla/synapse
    def handle_new_client_event(self,
                                event,
                                context,
                                extra_destinations=[],
                                extra_users=[],
                                suppress_auth=False):
        # We now need to go and hit out to wherever we need to hit out to.

        if not suppress_auth:
            self.auth.check(event, auth_events=context.current_state)

        yield self.maybe_kick_guest_users(event,
                                          context.current_state.values())

        if event.type == EventTypes.CanonicalAlias:
            # Check the alias is acually valid (at this time at least)
            room_alias_str = event.content.get("alias", None)
            if room_alias_str:
                room_alias = RoomAlias.from_string(room_alias_str)
                directory_handler = self.hs.get_handlers().directory_handler
                mapping = yield directory_handler.get_association(room_alias)

                if mapping["room_id"] != event.room_id:
                    raise SynapseError(
                        400, "Room alias %s does not point to the room" %
                        (room_alias_str, ))

        federation_handler = self.hs.get_handlers().federation_handler

        if event.type == EventTypes.Member:
            if event.content["membership"] == Membership.INVITE:
                event.unsigned["invite_room_state"] = [{
                    "type": e.type,
                    "state_key": e.state_key,
                    "content": e.content,
                    "sender": e.sender,
                } for k, e in context.current_state.items() if e.type in (
                    EventTypes.JoinRules,
                    EventTypes.CanonicalAlias,
                    EventTypes.RoomAvatar,
                    EventTypes.Name,
                )]

                invitee = UserID.from_string(event.state_key)
                if not self.hs.is_mine(invitee):
                    # TODO: Can we add signature from remote server in a nicer
                    # way? If we have been invited by a remote server, we need
                    # to get them to sign the event.

                    returned_invite = yield federation_handler.send_invite(
                        invitee.domain,
                        event,
                    )

                    event.unsigned.pop("room_state", None)

                    # TODO: Make sure the signatures actually are correct.
                    event.signatures.update(returned_invite.signatures)

        if event.type == EventTypes.Redaction:
            if self.auth.check_redaction(event,
                                         auth_events=context.current_state):
                original_event = yield self.store.get_event(
                    event.redacts,
                    check_redacted=False,
                    get_prev_content=False,
                    allow_rejected=False,
                    allow_none=False)
                if event.user_id != original_event.user_id:
                    raise AuthError(
                        403, "You don't have permission to redact events")

        (event_stream_id,
         max_stream_id) = yield self.store.persist_event(event,
                                                         context=context)

        destinations = set(extra_destinations)
        for k, s in context.current_state.items():
            try:
                if k[0] == EventTypes.Member:
                    if s.content["membership"] == Membership.JOIN:
                        destinations.add(
                            UserID.from_string(s.state_key).domain)
            except SynapseError:
                logger.warn("Failed to get destination from event %s",
                            s.event_id)

        with PreserveLoggingContext():
            # Don't block waiting on waking up all the listeners.
            notify_d = self.notifier.on_new_room_event(event,
                                                       event_stream_id,
                                                       max_stream_id,
                                                       extra_users=extra_users)

        def log_failure(f):
            logger.warn("Failed to notify about %s: %s", event.event_id,
                        f.value)

        notify_d.addErrback(log_failure)

        # If invite, remove room_state from unsigned before sending.
        event.unsigned.pop("invite_room_state", None)

        federation_handler.handle_new_event(
            event,
            destinations=destinations,
        )
コード例 #2
0
    def do_invite_join(self, target_hosts, room_id, joinee, content, snapshot):
        """ Attempts to join the `joinee` to the room `room_id` via the
        server `target_host`.

        This first triggers a /make_join/ request that returns a partial
        event that we can fill out and sign. This is then sent to the
        remote server via /send_join/ which responds with the state at that
        event and the auth_chains.

        We suspend processing of any received events from this room until we
        have finished processing the join.
        """
        logger.debug("Joining %s to %s", joinee, room_id)

        yield self.store.clean_room_for_join(room_id)

        origin, pdu = yield self.replication_layer.make_join(
            target_hosts, room_id, joinee)

        logger.debug("Got response to make_join: %s", pdu)

        event = pdu

        # We should assert some things.
        # FIXME: Do this in a nicer way
        assert (event.type == EventTypes.Member)
        assert (event.user_id == joinee)
        assert (event.state_key == joinee)
        assert (event.room_id == room_id)

        event.internal_metadata.outlier = False

        self.room_queues[room_id] = []

        builder = self.event_builder_factory.new(unfreeze(
            event.get_pdu_json()))

        handled_events = set()

        try:
            builder.event_id = self.event_builder_factory.create_event_id()
            builder.origin = self.hs.hostname
            builder.content = content

            if not hasattr(event, "signatures"):
                builder.signatures = {}

            add_hashes_and_signatures(
                builder,
                self.hs.hostname,
                self.hs.config.signing_key[0],
            )

            new_event = builder.build()

            # Try the host we successfully got a response to /make_join/
            # request first.
            try:
                target_hosts.remove(origin)
                target_hosts.insert(0, origin)
            except ValueError:
                pass

            ret = yield self.replication_layer.send_join(
                target_hosts, new_event)

            origin = ret["origin"]
            state = ret["state"]
            auth_chain = ret["auth_chain"]
            auth_chain.sort(key=lambda e: e.depth)

            handled_events.update([s.event_id for s in state])
            handled_events.update([a.event_id for a in auth_chain])
            handled_events.add(new_event.event_id)

            logger.debug("do_invite_join auth_chain: %s", auth_chain)
            logger.debug("do_invite_join state: %s", state)

            logger.debug("do_invite_join event: %s", new_event)

            try:
                yield self.store.store_room(room_id=room_id,
                                            room_creator_user_id="",
                                            is_public=False)
            except:
                # FIXME
                pass

            ev_infos = []
            for e in itertools.chain(state, auth_chain):
                if e.event_id == event.event_id:
                    continue

                e.internal_metadata.outlier = True
                auth_ids = [e_id for e_id, _ in e.auth_events]
                ev_infos.append({
                    "event": e,
                    "auth_events": {(e.type, e.state_key): e
                                    for e in auth_chain
                                    if e.event_id in auth_ids}
                })

            yield self._handle_new_events(origin, ev_infos, outliers=True)

            auth_ids = [e_id for e_id, _ in event.auth_events]
            auth_events = {(e.type, e.state_key): e
                           for e in auth_chain if e.event_id in auth_ids}

            _, event_stream_id, max_stream_id = yield self._handle_new_event(
                origin,
                new_event,
                state=state,
                current_state=state,
                auth_events=auth_events,
            )

            with PreserveLoggingContext():
                d = self.notifier.on_new_room_event(new_event,
                                                    event_stream_id,
                                                    max_stream_id,
                                                    extra_users=[joinee])

            def log_failure(f):
                logger.warn("Failed to notify about %s: %s",
                            new_event.event_id, f.value)

            d.addErrback(log_failure)

            logger.debug("Finished joining %s to %s", joinee, room_id)
        finally:
            room_queue = self.room_queues[room_id]
            del self.room_queues[room_id]

            for p, origin in room_queue:
                if p.event_id in handled_events:
                    continue

                try:
                    self.on_receive_pdu(origin, p, backfilled=False)
                except:
                    logger.exception("Couldn't handle pdu")

        defer.returnValue(True)
コード例 #3
0
ファイル: events_worker.py プロジェクト: sonrebmax/synapse
 def fire(evs, exc):
     for _, d in evs:
         if not d.called:
             with PreserveLoggingContext():
                 d.errback(exc)
コード例 #4
0
ファイル: notifier.py プロジェクト: zymptomLabs/synapse
    def wait_for_events(self, user_id, timeout, callback, room_ids=None,
                        from_token=StreamToken.START):
        """Wait until the callback returns a non empty response or the
        timeout fires.
        """
        user_stream = self.user_to_user_stream.get(user_id)
        if user_stream is None:
            current_token = yield self.event_sources.get_current_token()
            if room_ids is None:
                room_ids = yield self.store.get_rooms_for_user(user_id)
            user_stream = _NotifierUserStream(
                user_id=user_id,
                rooms=room_ids,
                current_token=current_token,
                time_now_ms=self.clock.time_msec(),
            )
            self._register_with_keys(user_stream)

        result = None
        prev_token = from_token
        if timeout:
            end_time = self.clock.time_msec() + timeout

            while not result:
                try:
                    now = self.clock.time_msec()
                    if end_time <= now:
                        break

                    # Now we wait for the _NotifierUserStream to be told there
                    # is a new token.
                    listener = user_stream.new_listener(prev_token)
                    listener.deferred = timeout_deferred(
                        listener.deferred,
                        (end_time - now) / 1000.,
                        self.hs.get_reactor(),
                    )
                    with PreserveLoggingContext():
                        yield listener.deferred

                    current_token = user_stream.current_token

                    result = yield callback(prev_token, current_token)
                    if result:
                        break

                    # Update the prev_token to the current_token since nothing
                    # has happened between the old prev_token and the current_token
                    prev_token = current_token
                except defer.TimeoutError:
                    break
                except defer.CancelledError:
                    break

        if result is None:
            # This happened if there was no timeout or if the timeout had
            # already expired.
            current_token = user_stream.current_token
            result = yield callback(prev_token, current_token)

        defer.returnValue(result)
コード例 #5
0
ファイル: presence.py プロジェクト: tchen0123/synapse
    def incoming_presence(self, origin, content):
        deferreds = []

        for push in content.get("push", []):
            user = self.hs.parse_userid(push["user_id"])

            logger.debug("Incoming presence update from %s", user)

            observers = set(self._remote_recvmap.get(user, set()))
            if observers:
                logger.debug(" | %d interested local observers %r", len(observers), observers)

            rm_handler = self.homeserver.get_handlers().room_member_handler
            room_ids = yield rm_handler.get_rooms_for_user(user)
            if room_ids:
                logger.debug(" | %d interested room IDs %r", len(room_ids), room_ids)

            state = dict(push)
            del state["user_id"]

            if "presence" not in state:
                logger.warning(
                    "Received a presence 'push' EDU from %s without a"
                    " 'presence' key", origin
                )
                continue

            if "last_active_ago" in state:
                state["last_active"] = int(
                    self.clock.time_msec() - state.pop("last_active_ago")
                )

            statuscache = self._get_or_make_usercache(user)

            self._user_cachemap_latest_serial += 1
            statuscache.update(state, serial=self._user_cachemap_latest_serial)

            if not observers and not room_ids:
                logger.debug(" | no interested observers or room IDs")
                continue

            self.push_update_to_clients(
                observed_user=user,
                users_to_push=observers,
                room_ids=room_ids,
                statuscache=statuscache,
            )

            if state["presence"] == PresenceState.OFFLINE:
                del self._user_cachemap[user]

        for poll in content.get("poll", []):
            user = self.hs.parse_userid(poll)

            if not self.hs.is_mine(user):
                continue

            # TODO(paul) permissions checks

            if not user in self._remote_sendmap:
                self._remote_sendmap[user] = set()

            self._remote_sendmap[user].add(origin)

            deferreds.append(self._push_presence_remote(user, origin))

        for unpoll in content.get("unpoll", []):
            user = self.hs.parse_userid(unpoll)

            if not self.hs.is_mine(user):
                continue

            if user in self._remote_sendmap:
                self._remote_sendmap[user].remove(origin)

                if not self._remote_sendmap[user]:
                    del self._remote_sendmap[user]

        with PreserveLoggingContext():
            yield defer.DeferredList(deferreds)
コード例 #6
0
 def on_new_replication_data(self):
     """Used to inform replication listeners that something has happend
     without waking up any of the normal user event streams"""
     with PreserveLoggingContext():
         self.notify_replication()
コード例 #7
0
ファイル: keyring.py プロジェクト: vt0r/synapse
    def verify_json_objects_for_server(self, server_and_json):
        """Bulk verfies signatures of json objects, bulk fetching keys as
        necessary.

        Args:
            server_and_json (list): List of pairs of (server_name, json_object)

        Returns:
            list of deferreds indicating success or failure to verify each
            json object's signature for the given server_name.
        """
        group_id_to_json = {}
        group_id_to_group = {}
        group_ids = []

        next_group_id = 0
        deferreds = {}

        for server_name, json_object in server_and_json:
            logger.debug("Verifying for %s", server_name)
            group_id = next_group_id
            next_group_id += 1
            group_ids.append(group_id)

            key_ids = signature_ids(json_object, server_name)
            if not key_ids:
                deferreds[group_id] = defer.fail(
                    SynapseError(
                        400,
                        "Not signed with a supported algorithm",
                        Codes.UNAUTHORIZED,
                    ))
            else:
                deferreds[group_id] = defer.Deferred()

            group = KeyGroup(server_name, group_id, key_ids)

            group_id_to_group[group_id] = group
            group_id_to_json[group_id] = json_object

        @defer.inlineCallbacks
        def handle_key_deferred(group, deferred):
            server_name = group.server_name
            try:
                _, _, key_id, verify_key = yield deferred
            except IOError as e:
                logger.warn(
                    "Got IOError when downloading keys for %s: %s %s",
                    server_name,
                    type(e).__name__,
                    str(e.message),
                )
                raise SynapseError(
                    502,
                    "Error downloading keys for %s" % (server_name, ),
                    Codes.UNAUTHORIZED,
                )
            except Exception as e:
                logger.exception(
                    "Got Exception when downloading keys for %s: %s %s",
                    server_name,
                    type(e).__name__,
                    str(e.message),
                )
                raise SynapseError(
                    401,
                    "No key for %s with id %s" % (server_name, key_ids),
                    Codes.UNAUTHORIZED,
                )

            json_object = group_id_to_json[group.group_id]

            try:
                verify_signed_json(json_object, server_name, verify_key)
            except:
                raise SynapseError(
                    401,
                    "Invalid signature for server %s with key %s:%s" %
                    (server_name, verify_key.alg, verify_key.version),
                    Codes.UNAUTHORIZED,
                )

        server_to_deferred = {
            server_name: defer.Deferred()
            for server_name, _ in server_and_json
        }

        with PreserveLoggingContext():

            # We want to wait for any previous lookups to complete before
            # proceeding.
            wait_on_deferred = self.wait_for_previous_lookups(
                [server_name for server_name, _ in server_and_json],
                server_to_deferred,
            )

            # Actually start fetching keys.
            wait_on_deferred.addBoth(lambda _: self.get_server_verify_keys(
                group_id_to_group, deferreds))

            # When we've finished fetching all the keys for a given server_name,
            # resolve the deferred passed to `wait_for_previous_lookups` so that
            # any lookups waiting will proceed.
            server_to_gids = {}

            def remove_deferreds(res, server_name, group_id):
                server_to_gids[server_name].discard(group_id)
                if not server_to_gids[server_name]:
                    d = server_to_deferred.pop(server_name, None)
                    if d:
                        d.callback(None)
                return res

            for g_id, deferred in deferreds.items():
                server_name = group_id_to_group[g_id].server_name
                server_to_gids.setdefault(server_name, set()).add(g_id)
                deferred.addBoth(remove_deferreds, server_name, g_id)

        # Pass those keys to handle_key_deferred so that the json object
        # signatures can be verified
        return [
            preserve_context_over_fn(
                handle_key_deferred,
                group_id_to_group[g_id],
                deferreds[g_id],
            ) for g_id in group_ids
        ]
コード例 #8
0
 def wrapped_callback(*args, **kwargs):
     with PreserveLoggingContext():
         callback(*args, **kwargs)
コード例 #9
0
 def rm(r, server_name_):
     with PreserveLoggingContext(ctx):
         logger.debug("Releasing key lookup lock on %s", server_name_)
         self.key_downloads.pop(server_name_, None)
     return r
コード例 #10
0
        def wrapped(*args, **kwargs):
            arg_dict = inspect.getcallargs(self.orig, obj, *args, **kwargs)
            keyargs = [arg_dict[arg_nm] for arg_nm in self.arg_names]
            list_args = arg_dict[self.list_name]

            # cached is a dict arg -> deferred, where deferred results in a
            # 2-tuple (`arg`, `result`)
            results = {}
            cached_defers = {}
            missing = []
            for arg in list_args:
                key = list(keyargs)
                key[self.list_pos] = arg

                try:
                    res = cache.get(tuple(key))
                    if not res.has_succeeded():
                        res = res.observe()
                        res.addCallback(lambda r, arg: (arg, r), arg)
                        cached_defers[arg] = res
                    else:
                        results[arg] = res.get_result()
                except KeyError:
                    missing.append(arg)

            if missing:
                sequence = cache.sequence
                args_to_call = dict(arg_dict)
                args_to_call[self.list_name] = missing

                ret_d = defer.maybeDeferred(preserve_context_over_fn,
                                            self.function_to_call,
                                            **args_to_call)

                ret_d = ObservableDeferred(ret_d)

                # We need to create deferreds for each arg in the list so that
                # we can insert the new deferred into the cache.
                for arg in missing:
                    with PreserveLoggingContext():
                        observer = ret_d.observe()
                    observer.addCallback(lambda r, arg: r.get(arg, None), arg)

                    observer = ObservableDeferred(observer)

                    key = list(keyargs)
                    key[self.list_pos] = arg
                    cache.update(sequence, tuple(key), observer)

                    def invalidate(f, key):
                        cache.invalidate(key)
                        return f

                    observer.addErrback(invalidate, tuple(key))

                    res = observer.observe()
                    res.addCallback(lambda r, arg: (arg, r), arg)

                    cached_defers[arg] = res

            if cached_defers:

                def update_results_dict(res):
                    results.update(res)
                    return results

                return preserve_context_over_deferred(
                    defer.gatherResults(
                        cached_defers.values(),
                        consumeErrors=True,
                    ).addCallback(update_results_dict).addErrback(
                        unwrapFirstError))
            else:
                return results
コード例 #11
0
    def _create_request(self,
                        destination,
                        method,
                        path_bytes,
                        body_callback,
                        headers_dict={},
                        param_bytes=b"",
                        query_bytes=b"",
                        retry_on_dns_fail=True):
        """ Creates and sends a request to the given url
        """
        headers_dict[b"User-Agent"] = [self.version_string]
        headers_dict[b"Host"] = [destination]

        url_bytes = urlparse.urlunparse((
            "",
            "",
            path_bytes,
            param_bytes,
            query_bytes,
            "",
        ))

        logger.info("Sending request to %s: %s %s", destination, method,
                    url_bytes)

        logger.debug("Types: %s", [
            type(destination),
            type(method),
            type(path_bytes),
            type(param_bytes),
            type(query_bytes)
        ])

        # XXX: Would be much nicer to retry only at the transaction-layer
        # (once we have reliable transactions in place)
        retries_left = 5

        endpoint = self._getEndpoint(reactor, destination)

        while True:
            producer = None
            if body_callback:
                producer = body_callback(method, url_bytes, headers_dict)

            try:
                with PreserveLoggingContext():
                    request_deferred = self.agent.request(
                        destination, endpoint, method, path_bytes, param_bytes,
                        query_bytes, Headers(headers_dict), producer)

                    response = yield self.clock.time_bound_deferred(
                        request_deferred,
                        time_out=60,
                    )

                logger.debug("Got response to %s", method)
                break
            except Exception as e:
                if not retry_on_dns_fail and isinstance(e, DNSLookupError):
                    logger.warn("DNS Lookup failed to %s with %s", destination,
                                e)
                    raise

                logger.warn(
                    "Sending request failed to %s: %s %s: %s - %s",
                    destination,
                    method,
                    url_bytes,
                    type(e).__name__,
                    _flatten_response_never_received(e),
                )

                if retries_left:
                    yield sleep(2**(5 - retries_left))
                    retries_left -= 1
                else:
                    raise

        logger.info("Received response %d %s for %s: %s %s", response.code,
                    response.phrase, destination, method, url_bytes)

        if 200 <= response.code < 300:
            pass
        else:
            # :'(
            # Update transactions table?
            body = yield readBody(response)
            raise HttpResponseException(response.code, response.phrase, body)

        defer.returnValue(response)
コード例 #12
0
ファイル: events.py プロジェクト: casics/extractor
    def get_stream(self,
                   auth_user_id,
                   pagin_config,
                   timeout=0,
                   as_client_event=True,
                   affect_presence=True):
        auth_user = UserID.from_string(auth_user_id)

        try:
            if affect_presence:
                if auth_user not in self._streams_per_user:
                    self._streams_per_user[auth_user] = 0
                    if auth_user in self._stop_timer_per_user:
                        try:
                            self.clock.cancel_call_later(
                                self._stop_timer_per_user.pop(auth_user))
                        except:
                            logger.exception("Failed to cancel event timer")
                    else:
                        yield self.distributor.fire("started_user_eventstream",
                                                    auth_user)
                self._streams_per_user[auth_user] += 1

            if pagin_config.from_token is None:
                pagin_config.from_token = None

            rm_handler = self.hs.get_handlers().room_member_handler
            room_ids = yield rm_handler.get_rooms_for_user(auth_user)

            with PreserveLoggingContext():
                events, tokens = yield self.notifier.get_events_for(
                    auth_user, room_ids, pagin_config, timeout)

            time_now = self.clock.time_msec()

            chunks = [
                serialize_event(e, time_now, as_client_event) for e in events
            ]

            chunk = {
                "chunk": chunks,
                "start": tokens[0].to_string(),
                "end": tokens[1].to_string(),
            }

            defer.returnValue(chunk)

        finally:
            if affect_presence:
                self._streams_per_user[auth_user] -= 1
                if not self._streams_per_user[auth_user]:
                    del self._streams_per_user[auth_user]

                    # 10 seconds of grace to allow the client to reconnect again
                    #   before we think they're gone
                    def _later():
                        logger.debug("_later stopped_user_eventstream %s",
                                     auth_user)

                        self._stop_timer_per_user.pop(auth_user, None)

                        return self.distributor.fire(
                            "stopped_user_eventstream", auth_user)

                    logger.debug("Scheduling _later: for %s", auth_user)
                    self._stop_timer_per_user[auth_user] = (
                        self.clock.call_later(30, _later))
コード例 #13
0
def user_joined_room(distributor, user, room_id):
    with PreserveLoggingContext():
        distributor.fire("user_joined_room", user=user, room_id=room_id)
コード例 #14
0
    def on_send_join_request(self, origin, pdu):
        """ We have received a join event for a room. Fully process it and
        respond with the current state and auth chains.
        """
        event = pdu

        logger.debug(
            "on_send_join_request: Got event: %s, signatures: %s",
            event.event_id,
            event.signatures,
        )

        event.internal_metadata.outlier = False

        context, event_stream_id, max_stream_id = yield self._handle_new_event(
            origin, event)

        logger.debug(
            "on_send_join_request: After _handle_new_event: %s, sigs: %s",
            event.event_id,
            event.signatures,
        )

        extra_users = []
        if event.type == EventTypes.Member:
            target_user_id = event.state_key
            target_user = UserID.from_string(target_user_id)
            extra_users.append(target_user)

        with PreserveLoggingContext():
            d = self.notifier.on_new_room_event(event,
                                                event_stream_id,
                                                max_stream_id,
                                                extra_users=extra_users)

        def log_failure(f):
            logger.warn("Failed to notify about %s: %s", event.event_id,
                        f.value)

        d.addErrback(log_failure)

        if event.type == EventTypes.Member:
            if event.content["membership"] == Membership.JOIN:
                user = UserID.from_string(event.state_key)
                yield self.distributor.fire("user_joined_room",
                                            user=user,
                                            room_id=event.room_id)

        new_pdu = event

        destinations = set()

        for k, s in context.current_state.items():
            try:
                if k[0] == EventTypes.Member:
                    if s.content["membership"] == Membership.JOIN:
                        destinations.add(
                            UserID.from_string(s.state_key).domain)
            except:
                logger.warn("Failed to get destination from event %s",
                            s.event_id)

        destinations.discard(origin)

        logger.debug(
            "on_send_join_request: Sending event: %s, signatures: %s",
            event.event_id,
            event.signatures,
        )

        self.replication_layer.send_pdu(new_pdu, destinations)

        state_ids = [e.event_id for e in context.current_state.values()]
        auth_chain = yield self.store.get_auth_chain(
            set([event.event_id] + state_ids))

        defer.returnValue({
            "state": context.current_state.values(),
            "auth_chain": auth_chain,
        })
コード例 #15
0
 def sleep(self, seconds):
     d = defer.Deferred()
     with PreserveLoggingContext():
         self._reactor.callLater(seconds, d.callback, seconds)
         res = yield d
     defer.returnValue(res)
コード例 #16
0
    def on_receive_pdu(self,
                       origin,
                       pdu,
                       backfilled,
                       state=None,
                       auth_chain=None):
        """ Called by the ReplicationLayer when we have a new pdu. We need to
        do auth checks and put it through the StateHandler.
        """
        event = pdu

        logger.debug("Got event: %s", event.event_id)

        # If we are currently in the process of joining this room, then we
        # queue up events for later processing.
        if event.room_id in self.room_queues:
            self.room_queues[event.room_id].append((pdu, origin))
            return

        logger.debug("Processing event: %s", event.event_id)

        logger.debug("Event: %s", event)

        # FIXME (erikj): Awful hack to make the case where we are not currently
        # in the room work
        current_state = None
        is_in_room = yield self.auth.check_host_in_room(
            event.room_id, self.server_name)
        if not is_in_room and not event.internal_metadata.is_outlier():
            logger.debug("Got event for room we're not in.")
            current_state = state

        event_ids = set()
        if state:
            event_ids |= {e.event_id for e in state}
        if auth_chain:
            event_ids |= {e.event_id for e in auth_chain}

        seen_ids = set((yield self.store.have_events(event_ids)).keys())

        if state and auth_chain is not None:
            # If we have any state or auth_chain given to us by the replication
            # layer, then we should handle them (if we haven't before.)

            event_infos = []

            for e in itertools.chain(auth_chain, state):
                if e.event_id in seen_ids:
                    continue
                e.internal_metadata.outlier = True
                auth_ids = [e_id for e_id, _ in e.auth_events]
                auth = {(e.type, e.state_key): e
                        for e in auth_chain if e.event_id in auth_ids}
                event_infos.append({
                    "event": e,
                    "auth_events": auth,
                })
                seen_ids.add(e.event_id)

            yield self._handle_new_events(origin, event_infos, outliers=True)

        try:
            _, event_stream_id, max_stream_id = yield self._handle_new_event(
                origin,
                event,
                state=state,
                backfilled=backfilled,
                current_state=current_state,
            )
        except AuthError as e:
            raise FederationError(
                "ERROR",
                e.code,
                e.msg,
                affected=event.event_id,
            )

        # if we're receiving valid events from an origin,
        # it's probably a good idea to mark it as not in retry-state
        # for sending (although this is a bit of a leap)
        retry_timings = yield self.store.get_destination_retry_timings(origin)
        if retry_timings and retry_timings["retry_last_ts"]:
            self.store.set_destination_retry_timings(origin, 0, 0)

        room = yield self.store.get_room(event.room_id)

        if not room:
            try:
                yield self.store.store_room(
                    room_id=event.room_id,
                    room_creator_user_id="",
                    is_public=False,
                )
            except StoreError:
                logger.exception("Failed to store room.")

        if not backfilled:
            extra_users = []
            if event.type == EventTypes.Member:
                target_user_id = event.state_key
                target_user = UserID.from_string(target_user_id)
                extra_users.append(target_user)

            with PreserveLoggingContext():
                d = self.notifier.on_new_room_event(event,
                                                    event_stream_id,
                                                    max_stream_id,
                                                    extra_users=extra_users)

            def log_failure(f):
                logger.warn("Failed to notify about %s: %s", event.event_id,
                            f.value)

            d.addErrback(log_failure)

        if event.type == EventTypes.Member:
            if event.membership == Membership.JOIN:
                user = UserID.from_string(event.state_key)
                yield self.distributor.fire("user_joined_room",
                                            user=user,
                                            room_id=event.room_id)
コード例 #17
0
ファイル: keyring.py プロジェクト: ysoffner/synapse
 def on_err(err):
     with PreserveLoggingContext():
         for verify_request in verify_requests:
             if not verify_request.deferred.called:
                 verify_request.deferred.errback(err)
コード例 #18
0
    def create_and_send_event(self, event_dict, ratelimit=True,
                              token_id=None, txn_id=None, is_guest=False):
        """ Given a dict from a client, create and handle a new event.

        Creates an FrozenEvent object, filling out auth_events, prev_events,
        etc.

        Adds display names to Join membership events.

        Persists and notifies local clients and federation.

        Args:
            event_dict (dict): An entire event
        """
        builder = self.event_builder_factory.new(event_dict)

        self.validator.validate_new(builder)

        if ratelimit:
            self.ratelimit(builder.user_id)
        # TODO(paul): Why does 'event' not have a 'user' object?
        user = UserID.from_string(builder.user_id)
        assert self.hs.is_mine(user), "User must be our own: %s" % (user,)

        if builder.type == EventTypes.Member:
            membership = builder.content.get("membership", None)
            if membership == Membership.JOIN:
                joinee = UserID.from_string(builder.state_key)
                # If event doesn't include a display name, add one.
                yield collect_presencelike_data(
                    self.distributor, joinee, builder.content
                )

        if token_id is not None:
            builder.internal_metadata.token_id = token_id

        if txn_id is not None:
            builder.internal_metadata.txn_id = txn_id

        event, context = yield self._create_new_client_event(
            builder=builder,
        )

        if event.is_state():
            prev_state = context.current_state.get((event.type, event.state_key))
            if prev_state and event.user_id == prev_state.user_id:
                prev_content = encode_canonical_json(prev_state.content)
                next_content = encode_canonical_json(event.content)
                if prev_content == next_content:
                    # Duplicate suppression for state updates with same sender
                    # and content.
                    defer.returnValue(prev_state)

        if event.type == EventTypes.Member:
            member_handler = self.hs.get_handlers().room_member_handler
            yield member_handler.change_membership(event, context, is_guest=is_guest)
        else:
            yield self.handle_new_client_event(
                event=event,
                context=context,
            )

        if event.type == EventTypes.Message:
            presence = self.hs.get_handlers().presence_handler
            with PreserveLoggingContext():
                presence.bump_presence_active_time(user)

        defer.returnValue(event)
コード例 #19
0
ファイル: keyring.py プロジェクト: ysoffner/synapse
def _handle_key_deferred(verify_request):
    """Waits for the key to become available, and then performs a verification

    Args:
        verify_request (VerifyKeyRequest):

    Returns:
        Deferred[None]

    Raises:
        SynapseError if there was a problem performing the verification
    """
    server_name = verify_request.server_name
    try:
        with PreserveLoggingContext():
            _, key_id, verify_key = yield verify_request.deferred
    except IOError as e:
        logger.warn(
            "Got IOError when downloading keys for %s: %s %s",
            server_name,
            type(e).__name__,
            str(e),
        )
        raise SynapseError(
            502,
            "Error downloading keys for %s" % (server_name, ),
            Codes.UNAUTHORIZED,
        )
    except Exception as e:
        logger.exception(
            "Got Exception when downloading keys for %s: %s %s",
            server_name,
            type(e).__name__,
            str(e),
        )
        raise SynapseError(
            401,
            "No key for %s with id %s" % (server_name, verify_request.key_ids),
            Codes.UNAUTHORIZED,
        )

    json_object = verify_request.json_object

    logger.debug("Got key %s %s:%s for server %s, verifying" % (
        key_id,
        verify_key.alg,
        verify_key.version,
        server_name,
    ))
    try:
        verify_signed_json(json_object, server_name, verify_key)
    except SignatureVerifyException as e:
        logger.debug(
            "Error verifying signature for %s:%s:%s with key %s: %s",
            server_name,
            verify_key.alg,
            verify_key.version,
            encode_verify_key_base64(verify_key),
            str(e),
        )
        raise SynapseError(
            401,
            "Invalid signature for server %s with key %s:%s: %s" % (
                server_name,
                verify_key.alg,
                verify_key.version,
                str(e),
            ),
            Codes.UNAUTHORIZED,
        )
コード例 #20
0
    def wrapped_request_handler(self, request):
        global _next_request_id
        request_id = "%s-%s" % (request.method, _next_request_id)
        _next_request_id += 1

        with LoggingContext(request_id) as request_context:
            with Measure(self.clock, "wrapped_request_handler"):
                request_metrics = RequestMetrics()
                # we start the request metrics timer here with an initial stab
                # at the servlet name. For most requests that name will be
                # JsonResource (or a subclass), and JsonResource._async_render
                # will update it once it picks a servlet.
                servlet_name = self.__class__.__name__
                request_metrics.start(self.clock, name=servlet_name)

                request_context.request = request_id
                with request.processing():
                    try:
                        with PreserveLoggingContext(request_context):
                            if include_metrics:
                                yield request_handler(self, request, request_metrics)
                            else:
                                requests_counter.inc(request.method, servlet_name)
                                yield request_handler(self, request)
                    except CodeMessageException as e:
                        code = e.code
                        if isinstance(e, SynapseError):
                            logger.info(
                                "%s SynapseError: %s - %s", request, code, e.msg
                            )
                        else:
                            logger.exception(e)
                        outgoing_responses_counter.inc(request.method, str(code))
                        respond_with_json(
                            request, code, cs_exception(e), send_cors=True,
                            pretty_print=_request_user_agent_is_curl(request),
                            version_string=self.version_string,
                        )
                    except Exception:
                        # failure.Failure() fishes the original Failure out
                        # of our stack, and thus gives us a sensible stack
                        # trace.
                        f = failure.Failure()
                        logger.error(
                            "Failed handle request %s.%s on %r: %r: %s",
                            request_handler.__module__,
                            request_handler.__name__,
                            self,
                            request,
                            f.getTraceback().rstrip(),
                        )
                        respond_with_json(
                            request,
                            500,
                            {
                                "error": "Internal server error",
                                "errcode": Codes.UNKNOWN,
                            },
                            send_cors=True,
                            pretty_print=_request_user_agent_is_curl(request),
                            version_string=self.version_string,
                        )
                    finally:
                        try:
                            request_metrics.stop(
                                self.clock, request
                            )
                        except Exception as e:
                            logger.warn("Failed to stop metrics: %r", e)
コード例 #21
0
    def _create_request(self,
                        destination,
                        method,
                        path_bytes,
                        body_callback,
                        headers_dict={},
                        param_bytes=b"",
                        query_bytes=b"",
                        retry_on_dns_fail=True):
        """ Creates and sends a request to the given url
        """
        headers_dict[b"User-Agent"] = [AGENT_NAME]
        headers_dict[b"Host"] = [destination]

        url_bytes = urlparse.urlunparse((
            "",
            "",
            path_bytes,
            param_bytes,
            query_bytes,
            "",
        ))

        logger.info("Sending request to %s: %s %s", destination, method,
                    url_bytes)

        logger.debug("Types: %s", [
            type(destination),
            type(method),
            type(path_bytes),
            type(param_bytes),
            type(query_bytes)
        ])

        # XXX: Would be much nicer to retry only at the transaction-layer
        # (once we have reliable transactions in place)
        retries_left = 5

        endpoint = self._getEndpoint(reactor, destination)

        while True:
            producer = None
            if body_callback:
                producer = body_callback(method, url_bytes, headers_dict)

            try:
                with PreserveLoggingContext():
                    response = yield self.agent.request(
                        destination, endpoint, method, path_bytes, param_bytes,
                        query_bytes, Headers(headers_dict), producer)

                logger.debug("Got response to %s", method)
                break
            except Exception as e:
                if not retry_on_dns_fail and isinstance(e, DNSLookupError):
                    logger.warn("DNS Lookup failed to %s with %s", destination,
                                e)
                    raise SynapseError(400, "Domain specified not found.")

                logger.warn("Sending request failed to %s: %s %s : %s",
                            destination, method, url_bytes, e)
                _print_ex(e)

                if retries_left:
                    yield sleep(2**(5 - retries_left))
                    retries_left -= 1
                else:
                    raise

        logger.info("Received response %d %s for %s: %s %s", response.code,
                    response.phrase, destination, method, url_bytes)

        if 200 <= response.code < 300:
            # We need to update the transactions table to say it was sent?
            pass
        else:
            # :'(
            # Update transactions table?
            raise CodeMessageException(response.code, response.phrase)

        defer.returnValue(response)