Esempio n. 1
0
    def _do_initial_spam(self):
        """Populates the user_directory from the current state of the DB, used
        when synapse first starts with user_directory support
        """
        new_pos = yield self.store.get_max_stream_id_in_current_state_deltas()

        # Delete any existing entries just in case there are any
        yield self.store.delete_all_from_user_dir()

        # We process by going through each existing room at a time.
        room_ids = yield self.store.get_all_rooms()

        logger.info("Doing initial update of user directory. %d rooms", len(room_ids))
        num_processed_rooms = 0

        for room_id in room_ids:
            logger.info("Handling room %d/%d", num_processed_rooms + 1, len(room_ids))
            yield self._handle_initial_room(room_id)
            num_processed_rooms += 1
            yield sleep(self.INITIAL_ROOM_SLEEP_MS / 1000.)

        logger.info("Processed all rooms.")

        if self.search_all_users:
            num_processed_users = 0
            user_ids = yield self.store.get_all_local_users()
            logger.info("Doing initial update of user directory. %d users", len(user_ids))
            for user_id in user_ids:
                # We add profiles for all users even if they don't match the
                # include pattern, just in case we want to change it in future
                logger.info("Handling user %d/%d", num_processed_users + 1, len(user_ids))
                yield self._handle_local_user(user_id)
                num_processed_users += 1
                yield sleep(self.INITIAL_USER_SLEEP_MS / 1000.)

            logger.info("Processed all users")

        self.initially_handled_users = None
        self.initially_handled_users_in_public = None
        self.initially_handled_users_share = None
        self.initially_handled_users_share_private_room = None

        yield self.store.update_user_directory_stream_pos(new_pos)
Esempio n. 2
0
def send_event_to_master(client, host, port, requester, event, context,
                         ratelimit, extra_users):
    """Send event to be handled on the master

    Args:
        client (SimpleHttpClient)
        host (str): host of master
        port (int): port on master listening for HTTP replication
        requester (Requester)
        event (FrozenEvent)
        context (EventContext)
        ratelimit (bool)
        extra_users (list(UserID)): Any extra users to notify about event
    """
    uri = "http://%s:%s/_synapse/replication/send_event/%s" % (
        host, port, event.event_id,
    )

    payload = {
        "event": event.get_pdu_json(),
        "internal_metadata": event.internal_metadata.get_dict(),
        "rejected_reason": event.rejected_reason,
        "context": context.serialize(event),
        "requester": requester.serialize(),
        "ratelimit": ratelimit,
        "extra_users": [u.to_string() for u in extra_users],
    }

    try:
        # We keep retrying the same request for timeouts. This is so that we
        # have a good idea that the request has either succeeded or failed on
        # the master, and so whether we should clean up or not.
        while True:
            try:
                result = yield client.put_json(uri, payload)
                break
            except CodeMessageException as e:
                if e.code != 504:
                    raise

            logger.warn("send_event request timed out")

            # If we timed out we probably don't need to worry about backing
            # off too much, but lets just wait a little anyway.
            yield sleep(1)
    except MatrixCodeMessageException as e:
        # We convert to SynapseError as we know that it was a SynapseError
        # on the master process that we should send to the client. (And
        # importantly, not stack traces everywhere)
        raise SynapseError(e.code, e.msg, e.errcode)
    defer.returnValue(result)
Esempio n. 3
0
    def test_sleep(self):
        @defer.inlineCallbacks
        def competing_callback():
            with LoggingContext() as competing_context:
                competing_context.test_key = "competing"
                yield sleep(0)
                self._check_test_key("competing")

        reactor.callLater(0, competing_callback)

        with LoggingContext() as context_one:
            context_one.test_key = "one"
            yield sleep(0)
            self._check_test_key("one")
Esempio n. 4
0
    def replicate(self):
        http_client = self.get_simple_http_client()
        store = self.get_datastore()
        replication_url = self.config.worker_replication_url

        while True:
            try:
                args = store.stream_positions()
                args["timeout"] = 30000
                result = yield http_client.get_json(replication_url, args=args)
                yield store.process_replication(result)
            except:
                logger.exception("Error replicating from %r", replication_url)
                yield sleep(5)
Esempio n. 5
0
    def _rotate_notifs(self):
        if self._doing_notif_rotation or self.stream_ordering_day_ago is None:
            return
        self._doing_notif_rotation = True

        try:
            while True:
                logger.info("Rotating notifications")

                caught_up = yield self.runInteraction(
                    "_rotate_notifs",
                    self._rotate_notifs_txn
                )
                if caught_up:
                    break
                yield sleep(5)
        finally:
            self._doing_notif_rotation = False
Esempio n. 6
0
    def replicate(self):
        http_client = self.get_simple_http_client()
        store = self.get_datastore()
        replication_url = self.config.worker_replication_url
        appservice_handler = self.get_application_service_handler()

        @defer.inlineCallbacks
        def replicate(results):
            stream = results.get("events")
            if stream:
                max_stream_id = stream["position"]
                yield appservice_handler.notify_interested_services(max_stream_id)

        while True:
            try:
                args = store.stream_positions()
                args["timeout"] = 30000
                result = yield http_client.get_json(replication_url, args=args)
                yield store.process_replication(result)
                replicate(result)
            except:
                logger.exception("Error replicating from %r", replication_url)
                yield sleep(30)
    def _create_request(self, destination, method, path_bytes,
                        body_callback, headers_dict={}, param_bytes=b"",
                        query_bytes=b"", retry_on_dns_fail=True):
        """ Creates and sends a request to the given url
        """
        headers_dict[b"User-Agent"] = [self.version_string]
        headers_dict[b"Host"] = [destination]

        url_bytes = urlparse.urlunparse(
            ("", "", path_bytes, param_bytes, query_bytes, "",)
        )

        logger.info("Sending request to %s: %s %s",
                    destination, method, url_bytes)

        logger.debug(
            "Types: %s",
            [
                type(destination), type(method), type(path_bytes),
                type(param_bytes),
                type(query_bytes)
            ]
        )

        # XXX: Would be much nicer to retry only at the transaction-layer
        # (once we have reliable transactions in place)
        retries_left = 5

        endpoint = self._getEndpoint(reactor, destination)

        while True:
            producer = None
            if body_callback:
                producer = body_callback(method, url_bytes, headers_dict)

            try:
                with PreserveLoggingContext():
                    request_deferred = self.agent.request(
                        destination,
                        endpoint,
                        method,
                        path_bytes,
                        param_bytes,
                        query_bytes,
                        Headers(headers_dict),
                        producer
                    )

                    response = yield self.clock.time_bound_deferred(
                        request_deferred,
                        time_out=60,
                    )

                logger.debug("Got response to %s", method)
                break
            except Exception as e:
                if not retry_on_dns_fail and isinstance(e, DNSLookupError):
                    logger.warn(
                        "DNS Lookup failed to %s with %s",
                        destination,
                        e
                    )
                    raise

                logger.warn(
                    "Sending request failed to %s: %s %s: %s - %s",
                    destination,
                    method,
                    url_bytes,
                    type(e).__name__,
                    _flatten_response_never_received(e),
                )

                if retries_left:
                    yield sleep(2 ** (5 - retries_left))
                    retries_left -= 1
                else:
                    raise

        logger.info(
            "Received response %d %s for %s: %s %s",
            response.code,
            response.phrase,
            destination,
            method,
            url_bytes
        )

        if 200 <= response.code < 300:
            pass
        else:
            # :'(
            # Update transactions table?
            body = yield readBody(response)
            raise HttpResponseException(
                response.code, response.phrase, body
            )

        defer.returnValue(response)
Esempio n. 8
0
    def _create_request(self, destination, method, path_bytes,
                        body_callback, headers_dict={}, param_bytes=b"",
                        query_bytes=b"", retry_on_dns_fail=True,
                        timeout=None, long_retries=False):
        """ Creates and sends a request to the given url
        """
        headers_dict[b"User-Agent"] = [self.version_string]
        headers_dict[b"Host"] = [destination]

        url_bytes = self._create_url(
            destination, path_bytes, param_bytes, query_bytes
        )

        txn_id = "%s-O-%s" % (method, self._next_id)
        self._next_id = (self._next_id + 1) % (sys.maxint - 1)

        outbound_logger.info(
            "{%s} [%s] Sending request: %s %s",
            txn_id, destination, method, url_bytes
        )

        # XXX: Would be much nicer to retry only at the transaction-layer
        # (once we have reliable transactions in place)
        if long_retries:
            retries_left = MAX_LONG_RETRIES
        else:
            retries_left = MAX_SHORT_RETRIES

        http_url_bytes = urlparse.urlunparse(
            ("", "", path_bytes, param_bytes, query_bytes, "")
        )

        log_result = None
        try:
            while True:
                producer = None
                if body_callback:
                    producer = body_callback(method, http_url_bytes, headers_dict)

                try:
                    def send_request():
                        request_deferred = preserve_context_over_fn(
                            self.agent.request,
                            method,
                            url_bytes,
                            Headers(headers_dict),
                            producer
                        )

                        return self.clock.time_bound_deferred(
                            request_deferred,
                            time_out=timeout/1000. if timeout else 60,
                        )

                    response = yield preserve_context_over_fn(
                        send_request,
                    )

                    log_result = "%d %s" % (response.code, response.phrase,)
                    break
                except Exception as e:
                    if not retry_on_dns_fail and isinstance(e, DNSLookupError):
                        logger.warn(
                            "DNS Lookup failed to %s with %s",
                            destination,
                            e
                        )
                        log_result = "DNS Lookup failed to %s with %s" % (
                            destination, e
                        )
                        raise

                    logger.warn(
                        "{%s} Sending request failed to %s: %s %s: %s - %s",
                        txn_id,
                        destination,
                        method,
                        url_bytes,
                        type(e).__name__,
                        _flatten_response_never_received(e),
                    )

                    log_result = "%s - %s" % (
                        type(e).__name__, _flatten_response_never_received(e),
                    )

                    if retries_left and not timeout:
                        if long_retries:
                            delay = 4 ** (MAX_LONG_RETRIES + 1 - retries_left)
                            delay = min(delay, 60)
                            delay *= random.uniform(0.8, 1.4)
                        else:
                            delay = 0.5 * 2 ** (MAX_SHORT_RETRIES - retries_left)
                            delay = min(delay, 2)
                            delay *= random.uniform(0.8, 1.4)

                        yield sleep(delay)
                        retries_left -= 1
                    else:
                        raise
        finally:
            outbound_logger.info(
                "{%s} [%s] Result: %s",
                txn_id,
                destination,
                log_result,
            )

        if 200 <= response.code < 300:
            pass
        else:
            # :'(
            # Update transactions table?
            body = yield preserve_context_over_fn(readBody, response)
            raise HttpResponseException(
                response.code, response.phrase, body
            )

        defer.returnValue(response)
Esempio n. 9
0
    def replicate(self):
        http_client = self.get_simple_http_client()
        store = self.get_datastore()
        replication_url = self.config.worker_replication_url
        clock = self.get_clock()
        notifier = self.get_notifier()
        presence_handler = self.get_presence_handler()
        typing_handler = self.get_typing_handler()

        def expire_broken_caches():
            store.who_forgot_in_room.invalidate_all()
            store.get_presence_list_accepted.invalidate_all()

        def notify_from_stream(
            result, stream_name, stream_key, room=None, user=None
        ):
            stream = result.get(stream_name)
            if stream:
                position_index = stream["field_names"].index("position")
                if room:
                    room_index = stream["field_names"].index(room)
                if user:
                    user_index = stream["field_names"].index(user)

                users = ()
                rooms = ()
                for row in stream["rows"]:
                    position = row[position_index]

                    if user:
                        users = (row[user_index],)

                    if room:
                        rooms = (row[room_index],)

                    notifier.on_new_event(
                        stream_key, position, users=users, rooms=rooms
                    )

        def notify(result):
            stream = result.get("events")
            if stream:
                max_position = stream["position"]
                for row in stream["rows"]:
                    position = row[0]
                    internal = json.loads(row[1])
                    event_json = json.loads(row[2])
                    event = FrozenEvent(event_json, internal_metadata_dict=internal)
                    extra_users = ()
                    if event.type == EventTypes.Member:
                        extra_users = (event.state_key,)
                    notifier.on_new_room_event(
                        event, position, max_position, extra_users
                    )

            notify_from_stream(
                result, "push_rules", "push_rules_key", user="******"
            )
            notify_from_stream(
                result, "user_account_data", "account_data_key", user="******"
            )
            notify_from_stream(
                result, "room_account_data", "account_data_key", user="******"
            )
            notify_from_stream(
                result, "tag_account_data", "account_data_key", user="******"
            )
            notify_from_stream(
                result, "receipts", "receipt_key", room="room_id"
            )
            notify_from_stream(
                result, "typing", "typing_key", room="room_id"
            )

        next_expire_broken_caches_ms = 0
        while True:
            try:
                args = store.stream_positions()
                args.update(typing_handler.stream_positions())
                args["timeout"] = 30000
                result = yield http_client.get_json(replication_url, args=args)
                now_ms = clock.time_msec()
                if now_ms > next_expire_broken_caches_ms:
                    expire_broken_caches()
                    next_expire_broken_caches_ms = (
                        now_ms + store.BROKEN_CACHE_EXPIRY_MS
                    )
                yield store.process_replication(result)
                typing_handler.process_replication(result)
                presence_handler.process_replication(result)
                notify(result)
            except:
                logger.exception("Error replicating from %r", replication_url)
                yield sleep(5)
Esempio n. 10
0
    def _request(self,
                 destination,
                 method,
                 path,
                 body_callback,
                 headers_dict={},
                 param_bytes=b"",
                 query_bytes=b"",
                 retry_on_dns_fail=True,
                 timeout=None,
                 long_retries=False,
                 ignore_backoff=False,
                 backoff_on_404=False):
        """ Creates and sends a request to the given server
        Args:
            destination (str): The remote server to send the HTTP request to.
            method (str): HTTP method
            path (str): The HTTP path
            ignore_backoff (bool): true to ignore the historical backoff data
                and try the request anyway.
            backoff_on_404 (bool): Back off if we get a 404

        Returns:
            Deferred: resolves with the http response object on success.

            Fails with ``HTTPRequestException``: if we get an HTTP response
                code >= 300.

            Fails with ``NotRetryingDestination`` if we are not yet ready
                to retry this server.

            Fails with ``FederationDeniedError`` if this destination
                is not on our federation whitelist

            (May also fail with plenty of other Exceptions for things like DNS
                failures, connection failures, SSL failures.)
        """
        if (self.hs.config.federation_domain_whitelist and destination
                not in self.hs.config.federation_domain_whitelist):
            raise FederationDeniedError(destination)

        limiter = yield synapse.util.retryutils.get_retry_limiter(
            destination,
            self.clock,
            self._store,
            backoff_on_404=backoff_on_404,
            ignore_backoff=ignore_backoff,
        )

        destination = destination.encode("ascii")
        path_bytes = path.encode("ascii")
        with limiter:
            headers_dict[b"User-Agent"] = [self.version_string]
            headers_dict[b"Host"] = [destination]

            url_bytes = self._create_url(destination, path_bytes, param_bytes,
                                         query_bytes)

            txn_id = "%s-O-%s" % (method, self._next_id)
            self._next_id = (self._next_id + 1) % (sys.maxint - 1)

            outbound_logger.info("{%s} [%s] Sending request: %s %s", txn_id,
                                 destination, method, url_bytes)

            # XXX: Would be much nicer to retry only at the transaction-layer
            # (once we have reliable transactions in place)
            if long_retries:
                retries_left = MAX_LONG_RETRIES
            else:
                retries_left = MAX_SHORT_RETRIES

            http_url_bytes = urlparse.urlunparse(
                ("", "", path_bytes, param_bytes, query_bytes, ""))

            log_result = None
            try:
                while True:
                    producer = None
                    if body_callback:
                        producer = body_callback(method, http_url_bytes,
                                                 headers_dict)

                    try:

                        def send_request():
                            request_deferred = self.agent.request(
                                method, url_bytes, Headers(headers_dict),
                                producer)

                            return self.clock.time_bound_deferred(
                                request_deferred,
                                time_out=timeout / 1000. if timeout else 60,
                            )

                        with logcontext.PreserveLoggingContext():
                            response = yield send_request()

                        log_result = "%d %s" % (
                            response.code,
                            response.phrase,
                        )
                        break
                    except Exception as e:
                        if not retry_on_dns_fail and isinstance(
                                e, DNSLookupError):
                            logger.warn("DNS Lookup failed to %s with %s",
                                        destination, e)
                            log_result = "DNS Lookup failed to %s with %s" % (
                                destination, e)
                            raise

                        logger.warn(
                            "{%s} Sending request failed to %s: %s %s: %s",
                            txn_id,
                            destination,
                            method,
                            url_bytes,
                            _flatten_response_never_received(e),
                        )

                        log_result = _flatten_response_never_received(e)

                        if retries_left and not timeout:
                            if long_retries:
                                delay = 4**(MAX_LONG_RETRIES + 1 -
                                            retries_left)
                                delay = min(delay, 60)
                                delay *= random.uniform(0.8, 1.4)
                            else:
                                delay = 0.5 * 2**(MAX_SHORT_RETRIES -
                                                  retries_left)
                                delay = min(delay, 2)
                                delay *= random.uniform(0.8, 1.4)

                            yield sleep(delay)
                            retries_left -= 1
                        else:
                            raise
            finally:
                outbound_logger.info(
                    "{%s} [%s] Result: %s",
                    txn_id,
                    destination,
                    log_result,
                )

            if 200 <= response.code < 300:
                pass
            else:
                # :'(
                # Update transactions table?
                with logcontext.PreserveLoggingContext():
                    body = yield readBody(response)
                raise HttpResponseException(response.code, response.phrase,
                                            body)

            defer.returnValue(response)
 def competing_callback():
     with LoggingContext() as competing_context:
         competing_context.test_key = "competing"
         yield sleep(0)
         self._check_test_key("competing")
Esempio n. 12
0
    def _request(self, destination, method, path,
                 body_callback, headers_dict={}, param_bytes=b"",
                 query_bytes=b"", retry_on_dns_fail=True,
                 timeout=None, long_retries=False,
                 ignore_backoff=False,
                 backoff_on_404=False):
        """ Creates and sends a request to the given server
        Args:
            destination (str): The remote server to send the HTTP request to.
            method (str): HTTP method
            path (str): The HTTP path
            ignore_backoff (bool): true to ignore the historical backoff data
                and try the request anyway.
            backoff_on_404 (bool): Back off if we get a 404

        Returns:
            Deferred: resolves with the http response object on success.

            Fails with ``HTTPRequestException``: if we get an HTTP response
                code >= 300.

            Fails with ``NotRetryingDestination`` if we are not yet ready
                to retry this server.

            Fails with ``FederationDeniedError`` if this destination
                is not on our federation whitelist

            (May also fail with plenty of other Exceptions for things like DNS
                failures, connection failures, SSL failures.)
        """
        if (
            self.hs.config.federation_domain_whitelist and
            destination not in self.hs.config.federation_domain_whitelist
        ):
            raise FederationDeniedError(destination)

        limiter = yield synapse.util.retryutils.get_retry_limiter(
            destination,
            self.clock,
            self._store,
            backoff_on_404=backoff_on_404,
            ignore_backoff=ignore_backoff,
        )

        destination = destination.encode("ascii")
        path_bytes = path.encode("ascii")
        with limiter:
            headers_dict[b"User-Agent"] = [self.version_string]
            headers_dict[b"Host"] = [destination]

            url_bytes = self._create_url(
                destination, path_bytes, param_bytes, query_bytes
            )

            txn_id = "%s-O-%s" % (method, self._next_id)
            self._next_id = (self._next_id + 1) % (sys.maxint - 1)

            outbound_logger.info(
                "{%s} [%s] Sending request: %s %s",
                txn_id, destination, method, url_bytes
            )

            # XXX: Would be much nicer to retry only at the transaction-layer
            # (once we have reliable transactions in place)
            if long_retries:
                retries_left = MAX_LONG_RETRIES
            else:
                retries_left = MAX_SHORT_RETRIES

            http_url_bytes = urlparse.urlunparse(
                ("", "", path_bytes, param_bytes, query_bytes, "")
            )

            log_result = None
            try:
                while True:
                    producer = None
                    if body_callback:
                        producer = body_callback(method, http_url_bytes, headers_dict)

                    try:
                        request_deferred = self.agent.request(
                            method,
                            url_bytes,
                            Headers(headers_dict),
                            producer
                        )
                        add_timeout_to_deferred(
                            request_deferred,
                            timeout / 1000. if timeout else 60,
                            cancelled_to_request_timed_out_error,
                        )
                        response = yield make_deferred_yieldable(
                            request_deferred,
                        )

                        log_result = "%d %s" % (response.code, response.phrase,)
                        break
                    except Exception as e:
                        if not retry_on_dns_fail and isinstance(e, DNSLookupError):
                            logger.warn(
                                "DNS Lookup failed to %s with %s",
                                destination,
                                e
                            )
                            log_result = "DNS Lookup failed to %s with %s" % (
                                destination, e
                            )
                            raise

                        logger.warn(
                            "{%s} Sending request failed to %s: %s %s: %s",
                            txn_id,
                            destination,
                            method,
                            url_bytes,
                            _flatten_response_never_received(e),
                        )

                        log_result = _flatten_response_never_received(e)

                        if retries_left and not timeout:
                            if long_retries:
                                delay = 4 ** (MAX_LONG_RETRIES + 1 - retries_left)
                                delay = min(delay, 60)
                                delay *= random.uniform(0.8, 1.4)
                            else:
                                delay = 0.5 * 2 ** (MAX_SHORT_RETRIES - retries_left)
                                delay = min(delay, 2)
                                delay *= random.uniform(0.8, 1.4)

                            yield sleep(delay)
                            retries_left -= 1
                        else:
                            raise
            finally:
                outbound_logger.info(
                    "{%s} [%s] Result: %s",
                    txn_id,
                    destination,
                    log_result,
                )

            if 200 <= response.code < 300:
                pass
            else:
                # :'(
                # Update transactions table?
                with logcontext.PreserveLoggingContext():
                    body = yield readBody(response)
                raise HttpResponseException(
                    response.code, response.phrase, body
                )

            defer.returnValue(response)
Esempio n. 13
0
    def _create_request(self,
                        destination,
                        method,
                        path_bytes,
                        body_callback,
                        headers_dict={},
                        param_bytes=b"",
                        query_bytes=b"",
                        retry_on_dns_fail=True):
        """ Creates and sends a request to the given url
        """
        headers_dict[b"User-Agent"] = [self.version_string]
        headers_dict[b"Host"] = [destination]

        url_bytes = urlparse.urlunparse((
            "",
            "",
            path_bytes,
            param_bytes,
            query_bytes,
            "",
        ))

        logger.info("Sending request to %s: %s %s", destination, method,
                    url_bytes)

        logger.debug("Types: %s", [
            type(destination),
            type(method),
            type(path_bytes),
            type(param_bytes),
            type(query_bytes)
        ])

        # XXX: Would be much nicer to retry only at the transaction-layer
        # (once we have reliable transactions in place)
        retries_left = 5

        endpoint = self._getEndpoint(reactor, destination)

        while True:
            producer = None
            if body_callback:
                producer = body_callback(method, url_bytes, headers_dict)

            try:
                with PreserveLoggingContext():
                    request_deferred = self.agent.request(
                        destination, endpoint, method, path_bytes, param_bytes,
                        query_bytes, Headers(headers_dict), producer)

                    response = yield self.clock.time_bound_deferred(
                        request_deferred,
                        time_out=60,
                    )

                logger.debug("Got response to %s", method)
                break
            except Exception as e:
                if not retry_on_dns_fail and isinstance(e, DNSLookupError):
                    logger.warn("DNS Lookup failed to %s with %s", destination,
                                e)
                    raise

                logger.warn(
                    "Sending request failed to %s: %s %s: %s - %s",
                    destination,
                    method,
                    url_bytes,
                    type(e).__name__,
                    _flatten_response_never_received(e),
                )

                if retries_left:
                    yield sleep(2**(5 - retries_left))
                    retries_left -= 1
                else:
                    raise

        logger.info("Received response %d %s for %s: %s %s", response.code,
                    response.phrase, destination, method, url_bytes)

        if 200 <= response.code < 300:
            pass
        else:
            # :'(
            # Update transactions table?
            body = yield readBody(response)
            raise HttpResponseException(response.code, response.phrase, body)

        defer.returnValue(response)
Esempio n. 14
0
 def blocking_function():
     yield sleep(0)
Esempio n. 15
0
    def replicate(self):
        http_client = self.get_simple_http_client()
        store = self.get_datastore()
        replication_url = self.config.worker_replication_url
        notifier = self.get_notifier()
        presence_handler = self.get_presence_handler()
        typing_handler = self.get_typing_handler()

        def notify_from_stream(result,
                               stream_name,
                               stream_key,
                               room=None,
                               user=None):
            stream = result.get(stream_name)
            if stream:
                position_index = stream["field_names"].index("position")
                if room:
                    room_index = stream["field_names"].index(room)
                if user:
                    user_index = stream["field_names"].index(user)

                users = ()
                rooms = ()
                for row in stream["rows"]:
                    position = row[position_index]

                    if user:
                        users = (row[user_index], )

                    if room:
                        rooms = (row[room_index], )

                    notifier.on_new_event(stream_key,
                                          position,
                                          users=users,
                                          rooms=rooms)

        @defer.inlineCallbacks
        def notify_device_list_update(result):
            stream = result.get("device_lists")
            if not stream:
                return

            position_index = stream["field_names"].index("position")
            user_index = stream["field_names"].index("user_id")

            for row in stream["rows"]:
                position = row[position_index]
                user_id = row[user_index]

                room_ids = yield store.get_rooms_for_user(user_id)

                notifier.on_new_event(
                    "device_list_key",
                    position,
                    rooms=room_ids,
                )

        @defer.inlineCallbacks
        def notify(result):
            stream = result.get("events")
            if stream:
                max_position = stream["position"]

                event_map = yield store.get_events(
                    [row[1] for row in stream["rows"]])

                for row in stream["rows"]:
                    position = row[0]
                    event_id = row[1]
                    event = event_map.get(event_id, None)
                    if not event:
                        continue

                    extra_users = ()
                    if event.type == EventTypes.Member:
                        extra_users = (event.state_key, )
                    notifier.on_new_room_event(event, position, max_position,
                                               extra_users)

            notify_from_stream(result,
                               "push_rules",
                               "push_rules_key",
                               user="******")
            notify_from_stream(result,
                               "user_account_data",
                               "account_data_key",
                               user="******")
            notify_from_stream(result,
                               "room_account_data",
                               "account_data_key",
                               user="******")
            notify_from_stream(result,
                               "tag_account_data",
                               "account_data_key",
                               user="******")
            notify_from_stream(result,
                               "receipts",
                               "receipt_key",
                               room="room_id")
            notify_from_stream(result, "typing", "typing_key", room="room_id")
            notify_from_stream(result,
                               "to_device",
                               "to_device_key",
                               user="******")
            yield notify_device_list_update(result)

        while True:
            try:
                args = store.stream_positions()
                args.update(typing_handler.stream_positions())
                args["timeout"] = 30000
                result = yield http_client.get_json(replication_url, args=args)
                yield store.process_replication(result)
                typing_handler.process_replication(result)
                yield presence_handler.process_replication(result)
                yield notify(result)
            except:
                logger.exception("Error replicating from %r", replication_url)
                yield sleep(5)
Esempio n. 16
0
    def _create_request(self, destination, method, path_bytes,
                        body_callback, headers_dict={}, param_bytes=b"",
                        query_bytes=b"", retry_on_dns_fail=True):
        """ Creates and sends a request to the given url
        """
        headers_dict[b"User-Agent"] = [b"Synapse"]
        headers_dict[b"Host"] = [destination]

        url_bytes = urlparse.urlunparse(
            ("", "", path_bytes, param_bytes, query_bytes, "",)
        )

        logger.info("Sending request to %s: %s %s",
                    destination, method, url_bytes)

        logger.debug(
            "Types: %s",
            [
                type(destination), type(method), type(path_bytes),
                type(param_bytes),
                type(query_bytes)
            ]
        )

        # XXX: Would be much nicer to retry only at the transaction-layer
        # (once we have reliable transactions in place)
        retries_left = 5

        endpoint = self._getEndpoint(reactor, destination)

        while True:
            producer = None
            if body_callback:
                producer = body_callback(method, url_bytes, headers_dict)

            try:
                with PreserveLoggingContext():
                    response = yield self.agent.request(
                        destination,
                        endpoint,
                        method,
                        path_bytes,
                        param_bytes,
                        query_bytes,
                        Headers(headers_dict),
                        producer
                    )

                logger.debug("Got response to %s", method)
                break
            except Exception as e:
                if not retry_on_dns_fail and isinstance(e, DNSLookupError):
                    logger.warn(
                        "DNS Lookup failed to %s with %s",
                        destination,
                        e
                    )
                    raise SynapseError(400, "Domain specified not found.")

                logger.warn(
                    "Sending request failed to %s: %s %s : %s",
                    destination,
                    method,
                    url_bytes,
                    e
                )
                _print_ex(e)

                if retries_left:
                    yield sleep(2 ** (5 - retries_left))
                    retries_left -= 1
                else:
                    raise

        logger.info(
            "Received response %d %s for %s: %s %s",
            response.code,
            response.phrase,
            destination,
            method,
            url_bytes
        )

        if 200 <= response.code < 300:
            # We need to update the transactions table to say it was sent?
            pass
        else:
            # :'(
            # Update transactions table?
            raise CodeMessageException(
                response.code, response.phrase
            )

        defer.returnValue(response)
Esempio n. 17
0
    def _create_request(self,
                        destination,
                        method,
                        path_bytes,
                        body_callback,
                        headers_dict={},
                        param_bytes=b"",
                        query_bytes=b"",
                        retry_on_dns_fail=True,
                        timeout=None,
                        long_retries=False):
        """ Creates and sends a request to the given url
        """
        headers_dict[b"User-Agent"] = [self.version_string]
        headers_dict[b"Host"] = [destination]

        url_bytes = self._create_url(destination, path_bytes, param_bytes,
                                     query_bytes)

        txn_id = "%s-O-%s" % (method, self._next_id)
        self._next_id = (self._next_id + 1) % (sys.maxint - 1)

        outbound_logger.info("{%s} [%s] Sending request: %s %s", txn_id,
                             destination, method, url_bytes)

        # XXX: Would be much nicer to retry only at the transaction-layer
        # (once we have reliable transactions in place)
        if long_retries:
            retries_left = MAX_LONG_RETRIES
        else:
            retries_left = MAX_SHORT_RETRIES

        http_url_bytes = urlparse.urlunparse(
            ("", "", path_bytes, param_bytes, query_bytes, ""))

        log_result = None
        try:
            while True:
                producer = None
                if body_callback:
                    producer = body_callback(method, http_url_bytes,
                                             headers_dict)

                try:

                    def send_request():
                        request_deferred = preserve_context_over_fn(
                            self.agent.request, method, url_bytes,
                            Headers(headers_dict), producer)

                        return self.clock.time_bound_deferred(
                            request_deferred,
                            time_out=timeout / 1000. if timeout else 60,
                        )

                    response = yield preserve_context_over_fn(send_request, )

                    log_result = "%d %s" % (
                        response.code,
                        response.phrase,
                    )
                    break
                except Exception as e:
                    if not retry_on_dns_fail and isinstance(e, DNSLookupError):
                        logger.warn("DNS Lookup failed to %s with %s",
                                    destination, e)
                        log_result = "DNS Lookup failed to %s with %s" % (
                            destination, e)
                        raise

                    logger.warn(
                        "{%s} Sending request failed to %s: %s %s: %s - %s",
                        txn_id,
                        destination,
                        method,
                        url_bytes,
                        type(e).__name__,
                        _flatten_response_never_received(e),
                    )

                    log_result = "%s - %s" % (
                        type(e).__name__,
                        _flatten_response_never_received(e),
                    )

                    if retries_left and not timeout:
                        if long_retries:
                            delay = 4**(MAX_LONG_RETRIES + 1 - retries_left)
                            delay = min(delay, 60)
                            delay *= random.uniform(0.8, 1.4)
                        else:
                            delay = 0.5 * 2**(MAX_SHORT_RETRIES - retries_left)
                            delay = min(delay, 2)
                            delay *= random.uniform(0.8, 1.4)

                        yield sleep(delay)
                        retries_left -= 1
                    else:
                        raise
        finally:
            outbound_logger.info(
                "{%s} [%s] Result: %s",
                txn_id,
                destination,
                log_result,
            )

        if 200 <= response.code < 300:
            pass
        else:
            # :'(
            # Update transactions table?
            body = yield preserve_context_over_fn(readBody, response)
            raise HttpResponseException(response.code, response.phrase, body)

        defer.returnValue(response)
Esempio n. 18
0
    def _handle_initial_room(self, room_id):
        """Called when we initially fill out user_directory one room at a time
        """
        is_in_room = yield self.store.is_host_joined(room_id, self.server_name)
        if not is_in_room:
            return

        is_public = yield self.store.is_room_world_readable_or_publicly_joinable(room_id)

        users_with_profile = yield self.state.get_current_user_in_room(room_id)
        user_ids = set(users_with_profile)
        unhandled_users = user_ids - self.initially_handled_users

        yield self.store.add_profiles_to_user_dir(
            room_id, {
                user_id: users_with_profile[user_id] for user_id in unhandled_users
            }
        )

        self.initially_handled_users |= unhandled_users

        if is_public:
            yield self.store.add_users_to_public_room(
                room_id,
                user_ids=user_ids - self.initially_handled_users_in_public
            )
            self.initially_handled_users_in_public |= user_ids

        # We now go and figure out the new users who share rooms with user entries
        # We sleep aggressively here as otherwise it can starve resources.
        # We also batch up inserts/updates, but try to avoid too many at once.
        to_insert = set()
        to_update = set()
        count = 0
        for user_id in user_ids:
            if count % self.INITIAL_ROOM_SLEEP_COUNT == 0:
                yield sleep(self.INITIAL_ROOM_SLEEP_MS / 1000.)

            if not self.is_mine_id(user_id):
                count += 1
                continue

            if self.store.get_if_app_services_interested_in_user(user_id):
                count += 1
                continue

            for other_user_id in user_ids:
                if user_id == other_user_id:
                    continue

                if count % self.INITIAL_ROOM_SLEEP_COUNT == 0:
                    yield sleep(self.INITIAL_ROOM_SLEEP_MS / 1000.)
                count += 1

                user_set = (user_id, other_user_id)

                if user_set in self.initially_handled_users_share_private_room:
                    continue

                if user_set in self.initially_handled_users_share:
                    if is_public:
                        continue
                    to_update.add(user_set)
                else:
                    to_insert.add(user_set)

                if is_public:
                    self.initially_handled_users_share.add(user_set)
                else:
                    self.initially_handled_users_share_private_room.add(user_set)

                if len(to_insert) > self.INITIAL_ROOM_BATCH_SIZE:
                    yield self.store.add_users_who_share_room(
                        room_id, not is_public, to_insert,
                    )
                    to_insert.clear()

                if len(to_update) > self.INITIAL_ROOM_BATCH_SIZE:
                    yield self.store.update_users_who_share_room(
                        room_id, not is_public, to_update,
                    )
                    to_update.clear()

        if to_insert:
            yield self.store.add_users_who_share_room(
                room_id, not is_public, to_insert,
            )
            to_insert.clear()

        if to_update:
            yield self.store.update_users_who_share_room(
                room_id, not is_public, to_update,
            )
            to_update.clear()
 def blocking_function():
     yield sleep(0)
Esempio n. 20
0
    def _on_enter(self, request_id):
        time_now = self.clock.time_msec()
        self.request_times[:] = [
            r for r in self.request_times
            if time_now - r < self.window_size
        ]

        queue_size = len(self.ready_request_queue) + len(self.sleeping_requests)
        if queue_size > self.reject_limit:
            raise LimitExceededError(
                retry_after_ms=int(
                    self.window_size / self.sleep_limit
                ),
            )

        self.request_times.append(time_now)

        def queue_request():
            if len(self.current_processing) > self.concurrent_requests:
                logger.debug("Ratelimit [%s]: Queue req", id(request_id))
                queue_defer = defer.Deferred()
                self.ready_request_queue[request_id] = queue_defer
                return queue_defer
            else:
                return defer.succeed(None)

        logger.debug(
            "Ratelimit [%s]: len(self.request_times)=%d",
            id(request_id), len(self.request_times),
        )

        if len(self.request_times) > self.sleep_limit:
            logger.debug(
                "Ratelimit [%s]: sleeping req",
                id(request_id),
            )
            ret_defer = sleep(self.sleep_msec/1000.0)

            self.sleeping_requests.add(request_id)

            def on_wait_finished(_):
                logger.debug(
                    "Ratelimit [%s]: Finished sleeping",
                    id(request_id),
                )
                self.sleeping_requests.discard(request_id)
                queue_defer = queue_request()
                return queue_defer

            ret_defer.addBoth(on_wait_finished)
        else:
            ret_defer = queue_request()

        def on_start(r):
            logger.debug(
                "Ratelimit [%s]: Processing req",
                id(request_id),
            )
            self.current_processing.add(request_id)
            return r

        def on_err(r):
            self.current_processing.discard(request_id)
            return r

        def on_both(r):
            # Ensure that we've properly cleaned up.
            self.sleeping_requests.discard(request_id)
            self.ready_request_queue.pop(request_id, None)
            return r

        ret_defer.addCallbacks(on_start, on_err)
        ret_defer.addBoth(on_both)
        return ret_defer
Esempio n. 21
0
    def _create_request(self, destination, method, path_bytes, param_bytes=b"",
                        query_bytes=b"", producer=None, headers_dict={},
                        retry_on_dns_fail=True, on_send_callback=None):
        """ Creates and sends a request to the given url
        """
        headers_dict[b"User-Agent"] = [b"Synapse"]
        headers_dict[b"Host"] = [destination]

        logger.debug("Sending request to %s: %s %s;%s?%s",
                     destination, method, path_bytes, param_bytes, query_bytes)

        logger.debug(
            "Types: %s",
            [
                type(destination), type(method), type(path_bytes),
                type(param_bytes),
                type(query_bytes)
            ]
        )

        retries_left = 5

        # TODO: setup and pass in an ssl_context to enable TLS
        endpoint = self._getEndpoint(reactor, destination);

        while True:
            if on_send_callback:
                on_send_callback(destination, method, path_bytes, producer)

            try:
                response = yield self.agent.request(
                    destination,
                    endpoint,
                    method,
                    path_bytes,
                    param_bytes,
                    query_bytes,
                    Headers(headers_dict),
                    producer
                )

                logger.debug("Got response to %s", method)
                break
            except Exception as e:
                if not retry_on_dns_fail and isinstance(e, DNSLookupError):
                    logger.warn("DNS Lookup failed to %s with %s", destination,
                                e)
                    raise SynapseError(400, "Domain specified not found.")

                logger.exception("Got error in _create_request")
                _print_ex(e)

                if retries_left:
                    yield sleep(2 ** (5 - retries_left))
                    retries_left -= 1
                else:
                    raise

        if 200 <= response.code < 300:
            # We need to update the transactions table to say it was sent?
            pass
        else:
            # :'(
            # Update transactions table?
            logger.error(
                "Got response %d %s", response.code, response.phrase
            )
            raise CodeMessageException(
                response.code, response.phrase
            )

        defer.returnValue(response)
Esempio n. 22
0
    def _create_request(self,
                        destination,
                        method,
                        path_bytes,
                        param_bytes=b"",
                        query_bytes=b"",
                        producer=None,
                        headers_dict={},
                        retry_on_dns_fail=True,
                        on_send_callback=None):
        """ Creates and sends a request to the given url
        """
        headers_dict[b"User-Agent"] = [b"Synapse"]
        headers_dict[b"Host"] = [destination]

        logger.debug("Sending request to %s: %s %s;%s?%s", destination, method,
                     path_bytes, param_bytes, query_bytes)

        logger.debug("Types: %s", [
            type(destination),
            type(method),
            type(path_bytes),
            type(param_bytes),
            type(query_bytes)
        ])

        retries_left = 5

        # TODO: setup and pass in an ssl_context to enable TLS
        endpoint = self._getEndpoint(reactor, destination)

        while True:
            if on_send_callback:
                on_send_callback(destination, method, path_bytes, producer)

            try:
                response = yield self.agent.request(destination, endpoint,
                                                    method, path_bytes,
                                                    param_bytes, query_bytes,
                                                    Headers(headers_dict),
                                                    producer)

                logger.debug("Got response to %s", method)
                break
            except Exception as e:
                if not retry_on_dns_fail and isinstance(e, DNSLookupError):
                    logger.warn("DNS Lookup failed to %s with %s", destination,
                                e)
                    raise SynapseError(400, "Domain specified not found.")

                logger.exception("Got error in _create_request")
                _print_ex(e)

                if retries_left:
                    yield sleep(2**(5 - retries_left))
                    retries_left -= 1
                else:
                    raise

        if 200 <= response.code < 300:
            # We need to update the transactions table to say it was sent?
            pass
        else:
            # :'(
            # Update transactions table?
            logger.error("Got response %d %s", response.code, response.phrase)
            raise CodeMessageException(response.code, response.phrase)

        defer.returnValue(response)
Esempio n. 23
0
 def competing_callback():
     with LoggingContext() as competing_context:
         competing_context.test_key = "competing"
         yield sleep(0)
         self._check_test_key("competing")
Esempio n. 24
0
    def replicate(self):
        http_client = self.get_simple_http_client()
        store = self.get_datastore()
        replication_url = self.config.worker_replication_url
        notifier = self.get_notifier()
        presence_handler = self.get_presence_handler()
        typing_handler = self.get_typing_handler()

        def notify_from_stream(result,
                               stream_name,
                               stream_key,
                               room=None,
                               user=None):
            stream = result.get(stream_name)
            if stream:
                position_index = stream["field_names"].index("position")
                if room:
                    room_index = stream["field_names"].index(room)
                if user:
                    user_index = stream["field_names"].index(user)

                users = ()
                rooms = ()
                for row in stream["rows"]:
                    position = row[position_index]

                    if user:
                        users = (row[user_index], )

                    if room:
                        rooms = (row[room_index], )

                    notifier.on_new_event(stream_key,
                                          position,
                                          users=users,
                                          rooms=rooms)

        def notify(result):
            stream = result.get("events")
            if stream:
                max_position = stream["position"]
                for row in stream["rows"]:
                    position = row[0]
                    internal = json.loads(row[1])
                    event_json = json.loads(row[2])
                    event = FrozenEvent(event_json,
                                        internal_metadata_dict=internal)
                    extra_users = ()
                    if event.type == EventTypes.Member:
                        extra_users = (event.state_key, )
                    notifier.on_new_room_event(event, position, max_position,
                                               extra_users)

            notify_from_stream(result,
                               "push_rules",
                               "push_rules_key",
                               user="******")
            notify_from_stream(result,
                               "user_account_data",
                               "account_data_key",
                               user="******")
            notify_from_stream(result,
                               "room_account_data",
                               "account_data_key",
                               user="******")
            notify_from_stream(result,
                               "tag_account_data",
                               "account_data_key",
                               user="******")
            notify_from_stream(result,
                               "receipts",
                               "receipt_key",
                               room="room_id")
            notify_from_stream(result, "typing", "typing_key", room="room_id")
            notify_from_stream(result,
                               "to_device",
                               "to_device_key",
                               user="******")

        while True:
            try:
                args = store.stream_positions()
                args.update(typing_handler.stream_positions())
                args["timeout"] = 30000
                result = yield http_client.get_json(replication_url, args=args)
                yield store.process_replication(result)
                typing_handler.process_replication(result)
                yield presence_handler.process_replication(result)
                notify(result)
            except:
                logger.exception("Error replicating from %r", replication_url)
                yield sleep(5)
Esempio n. 25
0
    def replicate(self):
        http_client = self.get_simple_http_client()
        store = self.get_datastore()
        replication_url = self.config.worker_replication_url
        pusher_pool = self.get_pusherpool()
        clock = self.get_clock()

        def stop_pusher(user_id, app_id, pushkey):
            key = "%s:%s" % (app_id, pushkey)
            pushers_for_user = pusher_pool.pushers.get(user_id, {})
            pusher = pushers_for_user.pop(key, None)
            if pusher is None:
                return
            logger.info("Stopping pusher %r / %r", user_id, key)
            pusher.on_stop()

        def start_pusher(user_id, app_id, pushkey):
            key = "%s:%s" % (app_id, pushkey)
            logger.info("Starting pusher %r / %r", user_id, key)
            return pusher_pool._refresh_pusher(app_id, pushkey, user_id)

        @defer.inlineCallbacks
        def poke_pushers(results):
            pushers_rows = set(
                map(tuple, results.get("pushers", {}).get("rows", []))
            )
            deleted_pushers_rows = set(
                map(tuple, results.get("deleted_pushers", {}).get("rows", []))
            )
            for row in sorted(pushers_rows | deleted_pushers_rows):
                if row in deleted_pushers_rows:
                    user_id, app_id, pushkey = row[1:4]
                    stop_pusher(user_id, app_id, pushkey)
                elif row in pushers_rows:
                    user_id = row[1]
                    app_id = row[5]
                    pushkey = row[8]
                    yield start_pusher(user_id, app_id, pushkey)

            stream = results.get("events")
            if stream:
                min_stream_id = stream["rows"][0][0]
                max_stream_id = stream["position"]
                preserve_fn(pusher_pool.on_new_notifications)(
                    min_stream_id, max_stream_id
                )

            stream = results.get("receipts")
            if stream:
                rows = stream["rows"]
                affected_room_ids = set(row[1] for row in rows)
                min_stream_id = rows[0][0]
                max_stream_id = stream["position"]
                preserve_fn(pusher_pool.on_new_receipts)(
                    min_stream_id, max_stream_id, affected_room_ids
                )

        def expire_broken_caches():
            store.who_forgot_in_room.invalidate_all()

        next_expire_broken_caches_ms = 0
        while True:
            try:
                args = store.stream_positions()
                args["timeout"] = 30000
                result = yield http_client.get_json(replication_url, args=args)
                now_ms = clock.time_msec()
                if now_ms > next_expire_broken_caches_ms:
                    expire_broken_caches()
                    next_expire_broken_caches_ms = (
                        now_ms + store.BROKEN_CACHE_EXPIRY_MS
                    )
                yield store.process_replication(result)
                poke_pushers(result)
            except:
                logger.exception("Error replicating from %r", replication_url)
                yield sleep(30)
Esempio n. 26
0
    def _handle_initial_room(self, room_id):
        """Called when we initially fill out user_directory one room at a time
        """
        is_in_room = yield self.store.is_host_joined(room_id, self.server_name)
        if not is_in_room:
            return

        is_public = yield self.store.is_room_world_readable_or_publicly_joinable(
            room_id)

        users_with_profile = yield self.state.get_current_user_in_room(room_id)
        user_ids = set(users_with_profile)
        unhandled_users = user_ids - self.initially_handled_users

        yield self.store.add_profiles_to_user_dir(room_id, {
            user_id: users_with_profile[user_id]
            for user_id in unhandled_users
        })

        self.initially_handled_users |= unhandled_users

        if is_public:
            yield self.store.add_users_to_public_room(
                room_id,
                user_ids=user_ids - self.initially_handled_users_in_public)
            self.initially_handled_users_in_public |= user_ids

        # We now go and figure out the new users who share rooms with user entries
        # We sleep aggressively here as otherwise it can starve resources.
        # We also batch up inserts/updates, but try to avoid too many at once.
        to_insert = set()
        to_update = set()
        count = 0
        for user_id in user_ids:
            if count % self.INITIAL_ROOM_SLEEP_COUNT == 0:
                yield sleep(self.INITIAL_ROOM_SLEEP_MS / 1000.)

            if not self.is_mine_id(user_id):
                count += 1
                continue

            if self.store.get_if_app_services_interested_in_user(user_id):
                count += 1
                continue

            for other_user_id in user_ids:
                if user_id == other_user_id:
                    continue

                if count % self.INITIAL_ROOM_SLEEP_COUNT == 0:
                    yield sleep(self.INITIAL_ROOM_SLEEP_MS / 1000.)
                count += 1

                user_set = (user_id, other_user_id)

                if user_set in self.initially_handled_users_share_private_room:
                    continue

                if user_set in self.initially_handled_users_share:
                    if is_public:
                        continue
                    to_update.add(user_set)
                else:
                    to_insert.add(user_set)

                if is_public:
                    self.initially_handled_users_share.add(user_set)
                else:
                    self.initially_handled_users_share_private_room.add(
                        user_set)

                if len(to_insert) > self.INITIAL_ROOM_BATCH_SIZE:
                    yield self.store.add_users_who_share_room(
                        room_id,
                        not is_public,
                        to_insert,
                    )
                    to_insert.clear()

                if len(to_update) > self.INITIAL_ROOM_BATCH_SIZE:
                    yield self.store.update_users_who_share_room(
                        room_id,
                        not is_public,
                        to_update,
                    )
                    to_update.clear()

        if to_insert:
            yield self.store.add_users_who_share_room(
                room_id,
                not is_public,
                to_insert,
            )
            to_insert.clear()

        if to_update:
            yield self.store.update_users_who_share_room(
                room_id,
                not is_public,
                to_update,
            )
            to_update.clear()
Esempio n. 27
0
    def replicate(self):
        http_client = self.get_simple_http_client()
        store = self.get_datastore()
        replication_url = self.config.worker_replication_url
        pusher_pool = self.get_pusherpool()

        def stop_pusher(user_id, app_id, pushkey):
            key = "%s:%s" % (app_id, pushkey)
            pushers_for_user = pusher_pool.pushers.get(user_id, {})
            pusher = pushers_for_user.pop(key, None)
            if pusher is None:
                return
            logger.info("Stopping pusher %r / %r", user_id, key)
            pusher.on_stop()

        def start_pusher(user_id, app_id, pushkey):
            key = "%s:%s" % (app_id, pushkey)
            logger.info("Starting pusher %r / %r", user_id, key)
            return pusher_pool._refresh_pusher(app_id, pushkey, user_id)

        @defer.inlineCallbacks
        def poke_pushers(results):
            pushers_rows = set(
                map(tuple,
                    results.get("pushers", {}).get("rows", [])))
            deleted_pushers_rows = set(
                map(tuple,
                    results.get("deleted_pushers", {}).get("rows", [])))
            for row in sorted(pushers_rows | deleted_pushers_rows):
                if row in deleted_pushers_rows:
                    user_id, app_id, pushkey = row[1:4]
                    stop_pusher(user_id, app_id, pushkey)
                elif row in pushers_rows:
                    user_id = row[1]
                    app_id = row[5]
                    pushkey = row[8]
                    yield start_pusher(user_id, app_id, pushkey)

            stream = results.get("events")
            if stream and stream["rows"]:
                min_stream_id = stream["rows"][0][0]
                max_stream_id = stream["position"]
                preserve_fn(pusher_pool.on_new_notifications)(min_stream_id,
                                                              max_stream_id)

            stream = results.get("receipts")
            if stream and stream["rows"]:
                rows = stream["rows"]
                affected_room_ids = set(row[1] for row in rows)
                min_stream_id = rows[0][0]
                max_stream_id = stream["position"]
                preserve_fn(pusher_pool.on_new_receipts)(min_stream_id,
                                                         max_stream_id,
                                                         affected_room_ids)

        while True:
            try:
                args = store.stream_positions()
                args["timeout"] = 30000
                result = yield http_client.get_json(replication_url, args=args)
                yield store.process_replication(result)
                poke_pushers(result)
            except:
                logger.exception("Error replicating from %r", replication_url)
                yield sleep(30)
Esempio n. 28
0
    def _on_enter(self, request_id):
        time_now = self.clock.time_msec()
        self.request_times[:] = [
            r for r in self.request_times if time_now - r < self.window_size
        ]

        queue_size = len(self.ready_request_queue) + len(
            self.sleeping_requests)
        if queue_size > self.reject_limit:
            raise LimitExceededError(retry_after_ms=int(self.window_size /
                                                        self.sleep_limit), )

        self.request_times.append(time_now)

        def queue_request():
            if len(self.current_processing) > self.concurrent_requests:
                logger.debug("Ratelimit [%s]: Queue req", id(request_id))
                queue_defer = defer.Deferred()
                self.ready_request_queue[request_id] = queue_defer
                return queue_defer
            else:
                return defer.succeed(None)

        logger.debug(
            "Ratelimit [%s]: len(self.request_times)=%d",
            id(request_id),
            len(self.request_times),
        )

        if len(self.request_times) > self.sleep_limit:
            logger.debug(
                "Ratelimit [%s]: sleeping req",
                id(request_id),
            )
            ret_defer = sleep(self.sleep_msec / 1000.0)

            self.sleeping_requests.add(request_id)

            def on_wait_finished(_):
                logger.debug(
                    "Ratelimit [%s]: Finished sleeping",
                    id(request_id),
                )
                self.sleeping_requests.discard(request_id)
                queue_defer = queue_request()
                return queue_defer

            ret_defer.addBoth(on_wait_finished)
        else:
            ret_defer = queue_request()

        def on_start(r):
            logger.debug(
                "Ratelimit [%s]: Processing req",
                id(request_id),
            )
            self.current_processing.add(request_id)
            return r

        def on_err(r):
            self.current_processing.discard(request_id)
            return r

        def on_both(r):
            # Ensure that we've properly cleaned up.
            self.sleeping_requests.discard(request_id)
            self.ready_request_queue.pop(request_id, None)
            return r

        ret_defer.addCallbacks(on_start, on_err)
        ret_defer.addBoth(on_both)
        return ret_defer