Ejemplo n.º 1
0
    def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer):
        super().prepare(reactor, clock, hs)

        # poke the other server's signing key into the key store, so that we don't
        # make requests for it
        verify_key = signedjson.key.get_verify_key(
            self.OTHER_SERVER_SIGNATURE_KEY)
        verify_key_id = "%s:%s" % (verify_key.alg, verify_key.version)

        self.get_success(hs.get_datastores().main.store_server_verify_keys(
            from_server=self.OTHER_SERVER_NAME,
            ts_added_ms=clock.time_msec(),
            verify_keys=[(
                self.OTHER_SERVER_NAME,
                verify_key_id,
                FetchKeyResult(
                    verify_key=verify_key,
                    valid_until_ts=clock.time_msec() + 1000,
                ),
            )],
        ))
Ejemplo n.º 2
0
def create_local_event_from_event_dict(
    clock: Clock,
    hostname: str,
    signing_key: SigningKey,
    room_version: RoomVersion,
    event_dict: JsonDict,
    internal_metadata_dict: Optional[JsonDict] = None,
) -> EventBase:
    """Takes a fully formed event dict, ensuring that fields like `origin`
    and `origin_server_ts` have correct values for a locally produced event,
    then signs and hashes it.
    """

    format_version = room_version.event_format
    if format_version not in KNOWN_EVENT_FORMAT_VERSIONS:
        raise Exception("No event format defined for version %r" %
                        (format_version, ))

    if internal_metadata_dict is None:
        internal_metadata_dict = {}

    time_now = int(clock.time_msec())

    if format_version == EventFormatVersions.V1:
        event_dict["event_id"] = _create_event_id(clock, hostname)

    event_dict["origin"] = hostname
    event_dict.setdefault("origin_server_ts", time_now)

    event_dict.setdefault("unsigned", {})
    age = event_dict["unsigned"].pop("age", 0)
    event_dict["unsigned"].setdefault("age_ts", time_now - age)

    event_dict.setdefault("signatures", {})

    add_hashes_and_signatures(room_version, event_dict, hostname, signing_key)
    return make_event_from_dict(event_dict,
                                room_version,
                                internal_metadata_dict=internal_metadata_dict)
Ejemplo n.º 3
0
async def get_retry_limiter(
    destination: str,
    clock: Clock,
    store: DataStore,
    ignore_backoff: bool = False,
    **kwargs: Any,
) -> "RetryDestinationLimiter":
    """For a given destination check if we have previously failed to
    send a request there and are waiting before retrying the destination.
    If we are not ready to retry the destination, this will raise a
    NotRetryingDestination exception. Otherwise, will return a Context Manager
    that will mark the destination as down if an exception is thrown (excluding
    CodeMessageException with code < 500)

    Args:
        destination: name of homeserver
        clock: timing source
        store: datastore
        ignore_backoff: true to ignore the historical backoff data and
            try the request anyway. We will still reset the retry_interval on success.

    Example usage:

        try:
            limiter = await get_retry_limiter(destination, clock, store)
            with limiter:
                response = await do_request()
        except NotRetryingDestination:
            # We aren't ready to retry that destination.
            raise
    """
    failure_ts = None
    retry_last_ts, retry_interval = (0, 0)

    retry_timings = await store.get_destination_retry_timings(destination)

    if retry_timings:
        failure_ts = retry_timings.failure_ts
        retry_last_ts = retry_timings.retry_last_ts
        retry_interval = retry_timings.retry_interval

        now = int(clock.time_msec())

        if not ignore_backoff and retry_last_ts + retry_interval > now:
            raise NotRetryingDestination(
                retry_last_ts=retry_last_ts,
                retry_interval=retry_interval,
                destination=destination,
            )

    # if we are ignoring the backoff data, we should also not increment the backoff
    # when we get another failure - otherwise a server can very quickly reach the
    # maximum backoff even though it might only have been down briefly
    backoff_on_failure = not ignore_backoff

    return RetryDestinationLimiter(
        destination,
        clock,
        store,
        failure_ts,
        retry_interval,
        backoff_on_failure=backoff_on_failure,
        **kwargs,
    )