Пример #1
0
def compute_event_signature(event, signature_name, signing_key):
    tmp_event = prune_event(event)
    redact_json = tmp_event.get_pdu_json()
    redact_json.pop("age_ts", None)
    redact_json.pop("unsigned", None)
    logger.debug("Signing event: %s", encode_canonical_json(redact_json))
    redact_json = sign_json(redact_json, signature_name, signing_key)
    logger.debug("Signed event: %s", encode_canonical_json(redact_json))
    return redact_json["signatures"]
Пример #2
0
    def deduplicate_state_event(self, event, context):
        """
        Checks whether event is in the latest resolved state in context.

        If so, returns the version of the event in context.
        Otherwise, returns None.
        """
        prev_event = context.current_state.get((event.type, event.state_key))
        if prev_event and event.user_id == prev_event.user_id:
            prev_content = encode_canonical_json(prev_event.content)
            next_content = encode_canonical_json(event.content)
            if prev_content == next_content:
                return prev_event
        return None
Пример #3
0
def sign_json(json_object, signature_name, signing_key):
    """Sign the JSON object. Stores the signature in json_object["signatures"].

    Args:
        json_object (dict): The JSON object to sign.
        signature_name (str): The name of the signing entity.
        signing_key (syutil.crypto.SigningKey): The key to sign the JSON with.

    Returns:
        The modified, signed JSON object."""

    signatures = json_object.pop("signatures", {})
    unsigned = json_object.pop("unsigned", None)

    message_bytes = encode_canonical_json(json_object)
    signed = signing_key.sign(message_bytes)
    signature_base64 = encode_base64(signed.signature)

    key_id = "%s:%s" % (signing_key.alg, signing_key.version)
    signatures.setdefault(signature_name, {})[key_id] = signature_base64

    # logger.debug("SIGNING: %s %s %s", signature_name, key_id, message_bytes)

    json_object["signatures"] = signatures
    if unsigned is not None:
        json_object["unsigned"] = unsigned

    return json_object
Пример #4
0
        def _set_e2e_device_keys_txn(txn):
            old_key_json = self._simple_select_one_onecol_txn(
                txn,
                table="e2e_device_keys_json",
                keyvalues={
                    "user_id": user_id,
                    "device_id": device_id,
                },
                retcol="key_json",
                allow_none=True,
            )

            new_key_json = encode_canonical_json(device_keys)
            if old_key_json == new_key_json:
                return False

            self._simple_upsert_txn(
                txn,
                table="e2e_device_keys_json",
                keyvalues={
                    "user_id": user_id,
                    "device_id": device_id,
                },
                values={
                    "ts_added_ms": time_now,
                    "key_json": new_key_json,
                }
            )

            return True
Пример #5
0
def respond_with_json(request, code, json_object, send_cors=False,
                      response_code_message=None, pretty_print=False,
                      canonical_json=True):
    # could alternatively use request.notifyFinish() and flip a flag when
    # the Deferred fires, but since the flag is RIGHT THERE it seems like
    # a waste.
    if request._disconnected:
        logger.warn(
            "Not sending response to request %s, already disconnected.",
            request)
        return

    if pretty_print:
        json_bytes = encode_pretty_printed_json(json_object) + b"\n"
    else:
        if canonical_json or synapse.events.USE_FROZEN_DICTS:
            # canonicaljson already encodes to bytes
            json_bytes = encode_canonical_json(json_object)
        else:
            json_bytes = json.dumps(json_object).encode("utf-8")

    return respond_with_json_bytes(
        request, code, json_bytes,
        send_cors=send_cors,
        response_code_message=response_code_message,
    )
Пример #6
0
 def add_pusher(self, user_name, access_token, profile_tag, kind, app_id,
                app_display_name, device_display_name,
                pushkey, pushkey_ts, lang, data):
     try:
         next_id = yield self._pushers_id_gen.get_next()
         yield self._simple_upsert(
             PushersTable.table_name,
             dict(
                 app_id=app_id,
                 pushkey=pushkey,
                 user_name=user_name,
             ),
             dict(
                 access_token=access_token,
                 kind=kind,
                 profile_tag=profile_tag,
                 app_display_name=app_display_name,
                 device_display_name=device_display_name,
                 ts=pushkey_ts,
                 lang=lang,
                 data=encode_canonical_json(data),
             ),
             insertion_values=dict(
                 id=next_id,
             ),
             desc="add_pusher",
         )
     except Exception as e:
         logger.error("create_pusher with failed: %s", e)
         raise StoreError(500, "Problem creating pusher.")
Пример #7
0
    def set_received_txn_response(self, transaction_id, origin, code,
                                  response_dict):
        """Persist the response we returened for an incoming transaction, and
        should return for subsequent transactions with the same transaction_id
        and origin.

        Args:
            txn
            transaction_id (str)
            origin (str)
            code (int)
            response_json (str)
        """

        return self._simple_insert(
            table="received_transactions",
            values={
                "transaction_id": transaction_id,
                "origin": origin,
                "response_code": code,
                "response_json": buffer(encode_canonical_json(response_dict)),
            },
            or_ignore=True,
            desc="set_received_txn_response",
        )
Пример #8
0
    def add_user_filter(self, user_localpart, user_filter):
        def_json = encode_canonical_json(user_filter)

        # Need an atomic transaction to SELECT the maximal ID so far then
        # INSERT a new one
        def _do_txn(txn):
            sql = (
                "SELECT filter_id FROM user_filters "
                "WHERE user_id = ? AND filter_json = ?"
            )
            txn.execute(sql, (user_localpart, def_json))
            filter_id_response = txn.fetchone()
            if filter_id_response is not None:
                return filter_id_response[0]

            sql = (
                "SELECT MAX(filter_id) FROM user_filters "
                "WHERE user_id = ?"
            )
            txn.execute(sql, (user_localpart,))
            max_id = txn.fetchone()[0]
            if max_id is None:
                filter_id = 0
            else:
                filter_id = max_id + 1

            sql = (
                "INSERT INTO user_filters (user_id, filter_id, filter_json)"
                "VALUES(?, ?, ?)"
            )
            txn.execute(sql, (user_localpart, filter_id, def_json))

            return filter_id

        return self.runInteraction("add_user_filter", _do_txn)
Пример #9
0
 def f(txn):
     newly_inserted = self._simple_upsert_txn(
         txn,
         "pushers",
         {
             "app_id": app_id,
             "pushkey": pushkey,
             "user_name": user_id,
         },
         {
             "access_token": access_token,
             "kind": kind,
             "app_display_name": app_display_name,
             "device_display_name": device_display_name,
             "ts": pushkey_ts,
             "lang": lang,
             "data": encode_canonical_json(data),
             "last_stream_ordering": last_stream_ordering,
             "profile_tag": profile_tag,
             "id": stream_id,
         },
     )
     if newly_inserted:
         # get_if_user_has_pusher only cares if the user has
         # at least *one* pusher.
         txn.call_after(self.get_if_user_has_pusher.invalidate, (user_id,))
Пример #10
0
def compute_content_hash(event_dict, hash_algorithm):
    """Compute the content hash of an event, which is the hash of the
    unredacted event.

    Args:
        event_dict (dict): The unredacted event as a dict
        hash_algorithm: A hasher from `hashlib`, e.g. hashlib.sha256, to use
            to hash the event

    Returns:
        tuple[str, bytes]: A tuple of the name of hash and the hash as raw
        bytes.
    """
    event_dict = dict(event_dict)
    event_dict.pop("age_ts", None)
    event_dict.pop("unsigned", None)
    event_dict.pop("signatures", None)
    event_dict.pop("hashes", None)
    event_dict.pop("outlier", None)
    event_dict.pop("destinations", None)

    event_json_bytes = encode_canonical_json(event_dict)

    hashed = hash_algorithm(event_json_bytes)
    return (hashed.name, hashed.digest())
Пример #11
0
 def __init__(self, hs):
     self.hs = hs
     self.version_string = hs.version_string
     self.response_body = encode_canonical_json(
         self.response_json_object(hs.config)
     )
     Resource.__init__(self)
Пример #12
0
def compute_event_reference_hash(event, hash_algorithm=hashlib.sha256):
    tmp_event = prune_event(event)
    event_json = tmp_event.get_pdu_json()
    event_json.pop("signatures", None)
    event_json.pop("age_ts", None)
    event_json.pop("unsigned", None)
    event_json_bytes = encode_canonical_json(event_json)
    hashed = hash_algorithm(event_json_bytes)
    return (hashed.name, hashed.digest())
Пример #13
0
    def deduplicate_state_event(self, event, context):
        """
        Checks whether event is in the latest resolved state in context.

        If so, returns the version of the event in context.
        Otherwise, returns None.
        """
        prev_event_id = context.prev_state_ids.get((event.type, event.state_key))
        prev_event = yield self.store.get_event(prev_event_id, allow_none=True)
        if not prev_event:
            return

        if prev_event and event.user_id == prev_event.user_id:
            prev_content = encode_canonical_json(prev_event.content)
            next_content = encode_canonical_json(event.content)
            if prev_content == next_content:
                defer.returnValue(prev_event)
        return
Пример #14
0
    def on_POST(self, request, device_id):
        requester = yield self.auth.get_user_by_req(request)
        user_id = requester.user.to_string()
        # TODO: Check that the device_id matches that in the authentication
        # or derive the device_id from the authentication instead.
        try:
            body = json.loads(request.content.read())
        except:
            raise SynapseError(400, "Invalid key JSON")
        time_now = self.clock.time_msec()

        # TODO: Validate the JSON to make sure it has the right keys.
        device_keys = body.get("device_keys", None)
        if device_keys:
            logger.info(
                "Updating device_keys for device %r for user %s at %d",
                device_id, user_id, time_now
            )
            # TODO: Sign the JSON with the server key
            yield self.store.set_e2e_device_keys(
                user_id, device_id, time_now,
                encode_canonical_json(device_keys)
            )

        one_time_keys = body.get("one_time_keys", None)
        if one_time_keys:
            logger.info(
                "Adding %d one_time_keys for device %r for user %r at %d",
                len(one_time_keys), device_id, user_id, time_now
            )
            key_list = []
            for key_id, key_json in one_time_keys.items():
                algorithm, key_id = key_id.split(":")
                key_list.append((
                    algorithm, key_id, encode_canonical_json(key_json)
                ))

            yield self.store.add_e2e_one_time_keys(
                user_id, device_id, time_now, key_list
            )

        result = yield self.store.count_e2e_one_time_keys(user_id, device_id)
        defer.returnValue((200, {"one_time_key_counts": result}))
Пример #15
0
    def upload_keys_for_user(self, user_id, device_id, keys):
        time_now = self.clock.time_msec()

        # TODO: Validate the JSON to make sure it has the right keys.
        device_keys = keys.get("device_keys", None)
        if device_keys:
            logger.info(
                "Updating device_keys for device %r for user %s at %d",
                device_id, user_id, time_now
            )
            # TODO: Sign the JSON with the server key
            yield self.store.set_e2e_device_keys(
                user_id, device_id, time_now,
                encode_canonical_json(device_keys)
            )

        one_time_keys = keys.get("one_time_keys", None)
        if one_time_keys:
            logger.info(
                "Adding %d one_time_keys for device %r for user %r at %d",
                len(one_time_keys), device_id, user_id, time_now
            )
            key_list = []
            for key_id, key_json in one_time_keys.items():
                algorithm, key_id = key_id.split(":")
                key_list.append((
                    algorithm, key_id, encode_canonical_json(key_json)
                ))

            yield self.store.add_e2e_one_time_keys(
                user_id, device_id, time_now, key_list
            )

        # the device should have been registered already, but it may have been
        # deleted due to a race with a DELETE request. Or we may be using an
        # old access_token without an associated device_id. Either way, we
        # need to double-check the device is registered to avoid ending up with
        # keys without a corresponding device.
        self.device_handler.check_device_registered(user_id, device_id)

        result = yield self.store.count_e2e_one_time_keys(user_id, device_id)

        defer.returnValue({"one_time_key_counts": result})
Пример #16
0
def compute_event_signature(event_dict, signature_name, signing_key):
    """Compute the signature of the event for the given name and key.

    Args:
        event_dict (dict): The event as a dict
        signature_name (str): The name of the entity signing the event
            (typically the server's hostname).
        signing_key (syutil.crypto.SigningKey): The key to sign with

    Returns:
        dict[str, dict[str, str]]: Returns a dictionary in the same format of
        an event's signatures field.
    """
    redact_json = prune_event_dict(event_dict)
    redact_json.pop("age_ts", None)
    redact_json.pop("unsigned", None)
    logger.debug("Signing event: %s", encode_canonical_json(redact_json))
    redact_json = sign_json(redact_json, signature_name, signing_key)
    logger.debug("Signed event: %s", encode_canonical_json(redact_json))
    return redact_json["signatures"]
Пример #17
0
    def send_event(self, event, context, ratelimit=True, is_guest=False):
        """
        Persists and notifies local clients and federation of an event.

        Args:
            event (FrozenEvent) the event to send.
            context (Context) the context of the event.
            ratelimit (bool): Whether to rate limit this send.
            is_guest (bool): Whether the sender is a guest.
        """
        user = UserID.from_string(event.sender)

        assert self.hs.is_mine(user), "User must be our own: %s" % (user,)

        if ratelimit:
            self.ratelimit(event.sender)

        if event.is_state():
            prev_state = context.current_state.get((event.type, event.state_key))
            if prev_state and event.user_id == prev_state.user_id:
                prev_content = encode_canonical_json(prev_state.content)
                next_content = encode_canonical_json(event.content)
                if prev_content == next_content:
                    # Duplicate suppression for state updates with same sender
                    # and content.
                    defer.returnValue(prev_state)

        if event.type == EventTypes.Member:
            member_handler = self.hs.get_handlers().room_member_handler
            yield member_handler.send_membership_event(event, context, is_guest=is_guest)
        else:
            yield self.handle_new_client_event(
                event=event,
                context=context,
            )

        if event.type == EventTypes.Message:
            presence = self.hs.get_handlers().presence_handler
            with PreserveLoggingContext():
                presence.bump_presence_active_time(user)
Пример #18
0
def test_recipe_signatures(conf, requests_session):
    r = requests_session.get(conf.getoption('server') + '/api/v1/recipe/signed/')
    r.raise_for_status()
    data = r.json()

    if len(data) == 0:
        pytest.skip('No signed recipes')

    for item in data:
        canonical_recipe = canonicaljson.encode_canonical_json(item['recipe'])
        signature = item['signature']['signature']
        pubkey = item['signature']['public_key']
        assert verify_signature(canonical_recipe, signature, pubkey)
Пример #19
0
def compute_content_hash(event, hash_algorithm):
    event_json = event.get_pdu_json()
    event_json.pop("age_ts", None)
    event_json.pop("unsigned", None)
    event_json.pop("signatures", None)
    event_json.pop("hashes", None)
    event_json.pop("outlier", None)
    event_json.pop("destinations", None)

    event_json_bytes = encode_canonical_json(event_json)

    hashed = hash_algorithm(event_json_bytes)
    return (hashed.name, hashed.digest())
Пример #20
0
    def add_pusher(
        self,
        user_id,
        access_token,
        kind,
        app_id,
        app_display_name,
        device_display_name,
        pushkey,
        pushkey_ts,
        lang,
        data,
        last_stream_ordering,
        profile_tag="",
    ):
        with self._pushers_id_gen.get_next() as stream_id:
            # no need to lock because `pushers` has a unique key on
            # (app_id, pushkey, user_name) so _simple_upsert will retry
            yield self._simple_upsert(
                table="pushers",
                keyvalues={"app_id": app_id, "pushkey": pushkey, "user_name": user_id},
                values={
                    "access_token": access_token,
                    "kind": kind,
                    "app_display_name": app_display_name,
                    "device_display_name": device_display_name,
                    "ts": pushkey_ts,
                    "lang": lang,
                    "data": encode_canonical_json(data),
                    "last_stream_ordering": last_stream_ordering,
                    "profile_tag": profile_tag,
                    "id": stream_id,
                },
                desc="add_pusher",
                lock=False,
            )

            user_has_pusher = self.get_if_user_has_pusher.cache.get(
                (user_id,), None, update_metrics=False
            )

            if user_has_pusher is not True:
                # invalidate, since we the user might not have had a pusher before
                yield self.runInteraction(
                    "add_pusher",
                    self._invalidate_cache_and_stream,
                    self.get_if_user_has_pusher,
                    (user_id,),
                )
Пример #21
0
    def delivered_txn(self, transaction_id, destination, code, response_dict):
        """Persists the response for an outgoing transaction.

        Args:
            transaction_id (str)
            destination (str)
            code (int)
            response_json (str)
        """
        return self.runInteraction(
            "delivered_txn",
            self._delivered_txn,
            transaction_id, destination, code,
            buffer(encode_canonical_json(response_dict)),
        )
Пример #22
0
    def post_json_get_json(self, uri, post_json):
        json_str = encode_canonical_json(post_json)

        logger.debug("HTTP POST %s -> %s", json_str, uri)

        response = yield self.request(
            "POST",
            uri.encode("ascii"),
            headers=Headers({b"Content-Type": [b"application/json"], b"User-Agent": [self.user_agent]}),
            bodyProducer=FileBodyProducer(StringIO(json_str)),
        )

        body = yield preserve_context_over_fn(readBody, response)

        defer.returnValue(json.loads(body))
Пример #23
0
def _check_size_limits(event):
    def too_big(field):
        raise EventSizeError("%s too large" % (field,))

    if len(event.user_id) > 255:
        too_big("user_id")
    if len(event.room_id) > 255:
        too_big("room_id")
    if event.is_state() and len(event.state_key) > 255:
        too_big("state_key")
    if len(event.type) > 255:
        too_big("type")
    if len(event.event_id) > 255:
        too_big("event_id")
    if len(encode_canonical_json(event.get_pdu_json())) > 65536:
        too_big("event")
Пример #24
0
def respond_with_json(request, code, json_object, send_cors=False,
                      response_code_message=None, pretty_print=False,
                      version_string="", canonical_json=True):
    if pretty_print:
        json_bytes = encode_pretty_printed_json(json_object) + "\n"
    else:
        if canonical_json or synapse.events.USE_FROZEN_DICTS:
            json_bytes = encode_canonical_json(json_object)
        else:
            # ujson doesn't like frozen_dicts.
            json_bytes = ujson.dumps(json_object, ensure_ascii=False)

    return respond_with_json_bytes(
        request, code, json_bytes,
        send_cors=send_cors,
        response_code_message=response_code_message,
        version_string=version_string
    )
Пример #25
0
    def put_json(self, uri, json_body, args={}, headers=None):
        """ Puts some json to the given URI.

        Args:
            uri (str): The URI to request, not including query parameters
            json_body (dict): The JSON to put in the HTTP body,
            args (dict): A dictionary used to create query strings, defaults to
                None.
                **Note**: The value of each key is assumed to be an iterable
                and *not* a string.
            headers (dict[str, List[str]]|None): If not None, a map from
               header name to a list of values for that header
        Returns:
            Deferred: Succeeds when we get *any* 2xx HTTP response, with the
            HTTP body as JSON.
        Raises:
            HttpResponseException On a non-2xx HTTP response.

            ValueError: if the response was not JSON
        """
        if len(args):
            query_bytes = urllib.parse.urlencode(args, True)
            uri = "%s?%s" % (uri, query_bytes)

        json_str = encode_canonical_json(json_body)

        actual_headers = {
            b"Content-Type": [b"application/json"],
            b"User-Agent": [self.user_agent],
        }
        if headers:
            actual_headers.update(headers)

        response = yield self.request(
            "PUT", uri, headers=Headers(actual_headers), data=json_str
        )

        body = yield make_deferred_yieldable(readBody(response))

        if 200 <= response.code < 300:
            defer.returnValue(json.loads(body))
        else:
            raise HttpResponseException(response.code, response.phrase, body)
Пример #26
0
def verify_signed_json(json_object, signature_name, verify_key):
    """Check a signature on a signed JSON object.

    Args:
        json_object (dict): The signed JSON object to check.
        signature_name (str): The name of the signature to check.
        verify_key (syutil.crypto.VerifyKey): The key to verify the signature.

    Raises:
        InvalidSignature: If the signature isn't valid
    """

    try:
        signatures = json_object["signatures"]
    except KeyError:
        raise SignatureVerifyException("No signatures on this object")

    key_id = "%s:%s" % (verify_key.alg, verify_key.version)

    try:
        signature_b64 = signatures[signature_name][key_id]
    except:
        raise SignatureVerifyException("Missing signature for %s, %s" % (signature_name, key_id))

    try:
        signature = decode_base64(signature_b64)
    except:
        raise SignatureVerifyException("Invalid signature base64 for %s, %s" % (signature_name, key_id))

    json_object_copy = dict(json_object)
    del json_object_copy["signatures"]
    json_object_copy.pop("unsigned", None)

    message = encode_canonical_json(json_object_copy)

    # logger.debug("VERIFY: %s %s %s", signature_name, key_id, message)

    try:
        verify_key.verify(message, signature)
    except:
        logger.exception("Error verifying signature")
        raise SignatureVerifyException("Unable to verify signature for %s " % signature_name)
Пример #27
0
    def put_json(self, uri, json_body, args={}):
        """ Puts some json to the given URI.

        Args:
            uri (str): The URI to request, not including query parameters
            json_body (dict): The JSON to put in the HTTP body,
            args (dict): A dictionary used to create query strings, defaults to
                None.
                **Note**: The value of each key is assumed to be an iterable
                and *not* a string.
        Returns:
            Deferred: Succeeds when we get *any* 2xx HTTP response, with the
            HTTP body as JSON.
        Raises:
            On a non-2xx HTTP response.
        """
        if len(args):
            query_bytes = urllib.urlencode(args, True)
            uri = "%s?%s" % (uri, query_bytes)

        json_str = encode_canonical_json(json_body)

        response = yield self.request(
            "PUT",
            uri.encode("ascii"),
            headers=Headers({
                b"User-Agent": [self.user_agent],
                "Content-Type": ["application/json"]
            }),
            bodyProducer=FileBodyProducer(StringIO(json_str))
        )

        body = yield preserve_context_over_fn(readBody, response)

        if 200 <= response.code < 300:
            defer.returnValue(json.loads(body))
        else:
            # NB: This is explicitly not json.loads(body)'d because the contract
            # of CodeMessageException is a *string* message. Callers can always
            # load it into JSON if they want.
            raise CodeMessageException(response.code, body)
Пример #28
0
def compute_event_reference_hash(event, hash_algorithm=hashlib.sha256):
    """Computes the event reference hash. This is the hash of the redacted
    event.

    Args:
        event (FrozenEvent)
        hash_algorithm: A hasher from `hashlib`, e.g. hashlib.sha256, to use
            to hash the event

    Returns:
        tuple[str, bytes]: A tuple of the name of hash and the hash as raw
        bytes.
    """
    tmp_event = prune_event(event)
    event_dict = tmp_event.get_pdu_json()
    event_dict.pop("signatures", None)
    event_dict.pop("age_ts", None)
    event_dict.pop("unsigned", None)
    event_json_bytes = encode_canonical_json(event_dict)
    hashed = hash_algorithm(event_json_bytes)
    return (hashed.name, hashed.digest())
Пример #29
0
    def _upload_one_time_keys_for_user(self, user_id, device_id, time_now,
                                       one_time_keys):
        logger.info(
            "Adding one_time_keys %r for device %r for user %r at %d",
            one_time_keys.keys(), device_id, user_id, time_now,
        )

        # make a list of (alg, id, key) tuples
        key_list = []
        for key_id, key_obj in one_time_keys.items():
            algorithm, key_id = key_id.split(":")
            key_list.append((
                algorithm, key_id, key_obj
            ))

        # First we check if we have already persisted any of the keys.
        existing_key_map = yield self.store.get_e2e_one_time_keys(
            user_id, device_id, [k_id for _, k_id, _ in key_list]
        )

        new_keys = []  # Keys that we need to insert. (alg, id, json) tuples.
        for algorithm, key_id, key in key_list:
            ex_json = existing_key_map.get((algorithm, key_id), None)
            if ex_json:
                if not _one_time_keys_match(ex_json, key):
                    raise SynapseError(
                        400,
                        ("One time key %s:%s already exists. "
                         "Old key: %s; new key: %r") %
                        (algorithm, key_id, ex_json, key)
                    )
            else:
                new_keys.append((
                    algorithm, key_id, encode_canonical_json(key).decode('ascii')))

        yield self.store.add_e2e_one_time_keys(
            user_id, device_id, time_now, new_keys
        )
Пример #30
0
    def post_json_get_json(self, uri, post_json, headers=None):
        """

        Args:
            uri (str):
            post_json (object):
            headers (dict[str, List[str]]|None): If not None, a map from
               header name to a list of values for that header

        Returns:
            Deferred[object]: parsed json

        Raises:
            HttpResponseException: On a non-2xx HTTP response.

            ValueError: if the response was not JSON
        """
        json_str = encode_canonical_json(post_json)

        logger.debug("HTTP POST %s -> %s", json_str, uri)

        actual_headers = {
            b"Content-Type": [b"application/json"],
            b"User-Agent": [self.user_agent],
        }
        if headers:
            actual_headers.update(headers)

        response = yield self.request(
            "POST", uri, headers=Headers(actual_headers), data=json_str
        )

        body = yield make_deferred_yieldable(readBody(response))

        if 200 <= response.code < 300:
            defer.returnValue(json.loads(body))
        else:
            raise HttpResponseException(response.code, response.phrase, body)
Пример #31
0
    def _upload_one_time_keys_for_user(self, user_id, device_id, time_now,
                                       one_time_keys):
        logger.info(
            "Adding one_time_keys %r for device %r for user %r at %d",
            one_time_keys.keys(),
            device_id,
            user_id,
            time_now,
        )

        # make a list of (alg, id, key) tuples
        key_list = []
        for key_id, key_obj in one_time_keys.items():
            algorithm, key_id = key_id.split(":")
            key_list.append((algorithm, key_id, key_obj))

        # First we check if we have already persisted any of the keys.
        existing_key_map = yield self.store.get_e2e_one_time_keys(
            user_id, device_id, [k_id for _, k_id, _ in key_list])

        new_keys = []  # Keys that we need to insert. (alg, id, json) tuples.
        for algorithm, key_id, key in key_list:
            ex_json = existing_key_map.get((algorithm, key_id), None)
            if ex_json:
                if not _one_time_keys_match(ex_json, key):
                    raise SynapseError(
                        400,
                        ("One time key %s:%s already exists. "
                         "Old key: %s; new key: %r") %
                        (algorithm, key_id, ex_json, key),
                    )
            else:
                new_keys.append((algorithm, key_id,
                                 encode_canonical_json(key).decode("ascii")))

        yield self.store.add_e2e_one_time_keys(user_id, device_id, time_now,
                                               new_keys)
Пример #32
0
    def post_json_get_json(self, uri, post_json, headers=None):
        """

        Args:
            uri (str):
            post_json (object):
            headers (dict[str, List[str]]|None): If not None, a map from
               header name to a list of values for that header

        Returns:
            Deferred[object]: parsed json
        """
        json_str = encode_canonical_json(post_json)

        logger.debug("HTTP POST %s -> %s", json_str, uri)

        actual_headers = {
            b"Content-Type": [b"application/json"],
            b"User-Agent": [self.user_agent],
        }
        if headers:
            actual_headers.update(headers)

        response = yield self.request("POST",
                                      uri.encode("ascii"),
                                      headers=Headers(actual_headers),
                                      bodyProducer=FileBodyProducer(
                                          StringIO(json_str)))

        body = yield make_deferred_yieldable(readBody(response))

        if 200 <= response.code < 300:
            defer.returnValue(json.loads(body))
        else:
            raise self._exceptionFromFailedRequest(response, body)

        defer.returnValue(json.loads(body))
Пример #33
0
 def add_pusher(self, user_id, access_token, kind, app_id,
                app_display_name, device_display_name,
                pushkey, pushkey_ts, lang, data, profile_tag=""):
     with self._pushers_id_gen.get_next() as stream_id:
         yield self._simple_upsert(
             "pushers",
             dict(
                 app_id=app_id,
                 pushkey=pushkey,
                 user_name=user_id,
             ),
             dict(
                 access_token=access_token,
                 kind=kind,
                 app_display_name=app_display_name,
                 device_display_name=device_display_name,
                 ts=pushkey_ts,
                 lang=lang,
                 data=encode_canonical_json(data),
                 profile_tag=profile_tag,
                 id=stream_id,
             ),
             desc="add_pusher",
         )
Пример #34
0
    def index(self, url, jsonld):
        headers = {'Content-type': 'application/json'}
        schema = jsonld['@type']
        solr_json = self._create_solr_json(schema, jsonld)

        # TODO: Use solr de-dupe for this
        # jsonld['id'] = str(uuid.uuid5(namespaceUuid, json.dumps(jsonld)))
        solr_json['id'] = hashlib.sha256(
            canonicaljson.encode_canonical_json(solr_json)).hexdigest()

        if self.config['post_to_solr']:
            r = requests.get(self.config['solr_query_url'] + '?q=id:' +
                             solr_json['id'])
            if r.status_code != 200:
                logger.error('Could not post to Solr: %s', r.text)

            r_json = json.loads(r.text)
            num_found = int(r_json['response']['numFound'])
            if num_found > 0:
                logger.info('Skipping %s as already indexed', url)

                if num_found > 1:
                    logger.warn(
                        '%s has %d instances which should be impossible', url,
                        num_found)

                return

            logger.debug('Posting %s', solr_json)

            r = requests.post(self.config['solr_json_doc_update_url'] +
                              '?commit=true',
                              json=solr_json,
                              headers=headers)
            if r.status_code != 200:
                logger.error('Could not post to Solr: %s', r.text)
Пример #35
0
        async def post_json(
                destination: str,
                path: str,
                data: Optional[JsonDict] = None) -> Union[JsonDict, list]:
            self.assertEqual(destination, self.hs.hostname)
            self.assertEqual(
                path,
                "/_matrix/key/v2/query",
            )

            channel = FakeChannel(self.site, self.reactor)
            # channel is a `FakeChannel` but `HTTPChannel` is expected
            req = SynapseRequest(channel, self.site)  # type: ignore[arg-type]
            req.content = BytesIO(encode_canonical_json(data))

            req.requestReceived(
                b"POST",
                path.encode("utf-8"),
                b"1.1",
            )
            channel.await_result()
            self.assertEqual(channel.code, 200)
            resp = channel.json_body
            return resp
Пример #36
0
def create_secondary_pinning_file():
  """
  Load the template pinned.json file and save a filled in version that points
  to the client's own directory. (The TUF repository that a Secondary points
  to is local, retrieved from the Primary and placed in the Secondary itself
  to validate the file internally.)

  Returns the filename of the created file.
  """

  I_TO_PRINT = TO_PRINT + uptane.YELLOW + '[create_secondary_pinning_file()]: ' + uptane.ENDCOLORS
  #TODO: Print to be deleted
  print(str('%s %s' % (I_TO_PRINT, 'Load the template pinned.json file and save a filled in version that points to the client\'s own directory')))
  #TODO: Until here

  pinnings = json.load(
      open(demo.DEMO_SECONDARY_PINNING_FNAME, 'r', encoding='utf-8'))

  fname_to_create = os.path.join(
      demo.DEMO_DIR, 'pinned.json_secondary_' + demo.get_random_string(5))
  atexit.register(clean_up_temp_file, fname_to_create)
  # To delete the temp pinned file after the script ends
  for repo_name in pinnings['repositories']:

    assert 1 == len(pinnings['repositories'][repo_name]['mirrors']), 'Config error.'

    mirror = pinnings['repositories'][repo_name]['mirrors'][0]

    mirror = mirror.replace('<full_client_dir>', CLIENT_DIRECTORY)

    pinnings['repositories'][repo_name]['mirrors'][0] = mirror

  with open(fname_to_create, 'wb') as fobj:
    fobj.write(canonicaljson.encode_canonical_json(pinnings))

  return fname_to_create
Пример #37
0
def _auth_header_for_request(
    origin: str,
    destination: str,
    signing_key: signedjson.key.SigningKey,
    method: str,
    path: str,
    content: Optional[JsonDict],
) -> str:
    """Build a suitable Authorization header for an outgoing federation request"""
    request_description: JsonDict = {
        "method": method,
        "uri": path,
        "destination": destination,
        "origin": origin,
    }
    if content is not None:
        request_description["content"] = content
    signature_base64 = unpaddedbase64.encode_base64(
        signing_key.sign(
            canonicaljson.encode_canonical_json(
                request_description)).signature)
    return (f"X-Matrix origin={origin},"
            f"key={signing_key.alg}:{signing_key.version},"
            f"sig={signature_base64}")
Пример #38
0
    def test_invalid_float_values(self) -> None:
        """Infinity/-Infinity/NaN are not allowed in canonicaljson."""

        with self.assertRaises(ValueError):
            encode_canonical_json(inf)

        with self.assertRaises(ValueError):
            encode_pretty_printed_json(inf)

        with self.assertRaises(ValueError):
            encode_canonical_json(-inf)

        with self.assertRaises(ValueError):
            encode_pretty_printed_json(-inf)

        with self.assertRaises(ValueError):
            encode_canonical_json(nan)

        with self.assertRaises(ValueError):
            encode_pretty_printed_json(nan)
Пример #39
0
    def test_encode_canonical(self) -> None:
        self.assertEqual(encode_canonical_json({}), b"{}")

        # ctrl-chars should be encoded.
        self.assertEqual(
            encode_canonical_json(u"text\u0003\r\n"),
            b'"text\\u0003\\r\\n"',
        )

        # quotes and backslashes should be escaped.
        self.assertEqual(
            encode_canonical_json(r'"\ test'),
            b'"\\"\\\\ test"',
        )

        # non-ascii should come out utf8-encoded.
        self.assertEqual(
            encode_canonical_json({u"la merde amusée": u"💩"}),
            b'{"la merde amus\xc3\xa9e":"\xF0\x9F\x92\xA9"}',
        )

        # so should U+2028 and U+2029
        self.assertEqual(
            encode_canonical_json({u"spaces": u"\u2028 \u2029"}),
            b'{"spaces":"\xe2\x80\xa8 \xe2\x80\xa9"}',
        )

        # but we need to watch out for 'u1234' after backslash, which should
        # get encoded to an escaped backslash, followed by u1234
        self.assertEqual(
            encode_canonical_json(u"\\u1234"),
            b'"\\\\u1234"',
        )

        # Iteratively encoding should work.
        self.assertEqual(list(iterencode_canonical_json({})), [b"{}"])
Пример #40
0
    def _send_request(
        self,
        request,
        retry_on_dns_fail=True,
        timeout=None,
        long_retries=False,
        ignore_backoff=False,
        backoff_on_404=False,
    ):
        """
        Sends a request to the given server.

        Args:
            request (MatrixFederationRequest): details of request to be sent

            timeout (int|None): number of milliseconds to wait for the response headers
                (including connecting to the server), *for each attempt*.
                60s by default.

            long_retries (bool): whether to use the long retry algorithm.

                The regular retry algorithm makes 4 attempts, with intervals
                [0.5s, 1s, 2s].

                The long retry algorithm makes 11 attempts, with intervals
                [4s, 16s, 60s, 60s, ...]

                Both algorithms add -20%/+40% jitter to the retry intervals.

                Note that the above intervals are *in addition* to the time spent
                waiting for the request to complete (up to `timeout` ms).

                NB: the long retry algorithm takes over 20 minutes to complete, with
                a default timeout of 60s!

            ignore_backoff (bool): true to ignore the historical backoff data
                and try the request anyway.

            backoff_on_404 (bool): Back off if we get a 404

        Returns:
            Deferred[twisted.web.client.Response]: resolves with the HTTP
            response object on success.

        Raises:
            HttpResponseException: If we get an HTTP response code >= 300
                (except 429).
            NotRetryingDestination: If we are not yet ready to retry this
                server.
            FederationDeniedError: If this destination  is not on our
                federation whitelist
            RequestSendFailed: If there were problems connecting to the
                remote, due to e.g. DNS failures, connection timeouts etc.
        """
        if timeout:
            _sec_timeout = timeout / 1000
        else:
            _sec_timeout = self.default_timeout

        if (
            self.hs.config.federation_domain_whitelist is not None
            and request.destination not in self.hs.config.federation_domain_whitelist
        ):
            raise FederationDeniedError(request.destination)

        limiter = yield synapse.util.retryutils.get_retry_limiter(
            request.destination,
            self.clock,
            self._store,
            backoff_on_404=backoff_on_404,
            ignore_backoff=ignore_backoff,
        )

        method_bytes = request.method.encode("ascii")
        destination_bytes = request.destination.encode("ascii")
        path_bytes = request.path.encode("ascii")
        if request.query:
            query_bytes = encode_query_args(request.query)
        else:
            query_bytes = b""

        # Retreive current span
        scope = opentracing.start_active_span(
            "outgoing-federation-request",
            tags={
                opentracing.tags.SPAN_KIND: opentracing.tags.SPAN_KIND_RPC_CLIENT,
                opentracing.tags.PEER_ADDRESS: request.destination,
                opentracing.tags.HTTP_METHOD: request.method,
                opentracing.tags.HTTP_URL: request.path,
            },
            finish_on_close=True,
        )

        # Inject the span into the headers
        headers_dict = {}
        opentracing.inject_active_span_byte_dict(headers_dict, request.destination)

        headers_dict[b"User-Agent"] = [self.version_string_bytes]

        with limiter, scope:
            # XXX: Would be much nicer to retry only at the transaction-layer
            # (once we have reliable transactions in place)
            if long_retries:
                retries_left = MAX_LONG_RETRIES
            else:
                retries_left = MAX_SHORT_RETRIES

            url_bytes = urllib.parse.urlunparse(
                (b"matrix", destination_bytes, path_bytes, None, query_bytes, b"")
            )
            url_str = url_bytes.decode("ascii")

            url_to_sign_bytes = urllib.parse.urlunparse(
                (b"", b"", path_bytes, None, query_bytes, b"")
            )

            while True:
                try:
                    json = request.get_json()
                    if json:
                        headers_dict[b"Content-Type"] = [b"application/json"]
                        auth_headers = self.build_auth_headers(
                            destination_bytes, method_bytes, url_to_sign_bytes, json
                        )
                        data = encode_canonical_json(json)
                        producer = QuieterFileBodyProducer(
                            BytesIO(data), cooperator=self._cooperator
                        )
                    else:
                        producer = None
                        auth_headers = self.build_auth_headers(
                            destination_bytes, method_bytes, url_to_sign_bytes
                        )

                    headers_dict[b"Authorization"] = auth_headers

                    logger.info(
                        "{%s} [%s] Sending request: %s %s; timeout %fs",
                        request.txn_id,
                        request.destination,
                        request.method,
                        url_str,
                        _sec_timeout,
                    )

                    try:
                        with Measure(self.clock, "outbound_request"):
                            # we don't want all the fancy cookie and redirect handling
                            # that treq.request gives: just use the raw Agent.
                            request_deferred = self.agent.request(
                                method_bytes,
                                url_bytes,
                                headers=Headers(headers_dict),
                                bodyProducer=producer,
                            )

                            request_deferred = timeout_deferred(
                                request_deferred,
                                timeout=_sec_timeout,
                                reactor=self.reactor,
                            )

                            response = yield request_deferred
                    except DNSLookupError as e:
                        raise_from(RequestSendFailed(e, can_retry=retry_on_dns_fail), e)
                    except Exception as e:
                        logger.info("Failed to send request: %s", e)
                        raise_from(RequestSendFailed(e, can_retry=True), e)

                    logger.info(
                        "{%s} [%s] Got response headers: %d %s",
                        request.txn_id,
                        request.destination,
                        response.code,
                        response.phrase.decode("ascii", errors="replace"),
                    )

                    opentracing.set_tag(
                        opentracing.tags.HTTP_STATUS_CODE, response.code
                    )

                    if 200 <= response.code < 300:
                        pass
                    else:
                        # :'(
                        # Update transactions table?
                        d = treq.content(response)
                        d = timeout_deferred(
                            d, timeout=_sec_timeout, reactor=self.reactor
                        )

                        try:
                            body = yield make_deferred_yieldable(d)
                        except Exception as e:
                            # Eh, we're already going to raise an exception so lets
                            # ignore if this fails.
                            logger.warn(
                                "{%s} [%s] Failed to get error response: %s %s: %s",
                                request.txn_id,
                                request.destination,
                                request.method,
                                url_str,
                                _flatten_response_never_received(e),
                            )
                            body = None

                        e = HttpResponseException(response.code, response.phrase, body)

                        # Retry if the error is a 429 (Too Many Requests),
                        # otherwise just raise a standard HttpResponseException
                        if response.code == 429:
                            raise_from(RequestSendFailed(e, can_retry=True), e)
                        else:
                            raise e

                    break
                except RequestSendFailed as e:
                    logger.warn(
                        "{%s} [%s] Request failed: %s %s: %s",
                        request.txn_id,
                        request.destination,
                        request.method,
                        url_str,
                        _flatten_response_never_received(e.inner_exception),
                    )

                    if not e.can_retry:
                        raise

                    if retries_left and not timeout:
                        if long_retries:
                            delay = 4 ** (MAX_LONG_RETRIES + 1 - retries_left)
                            delay = min(delay, 60)
                            delay *= random.uniform(0.8, 1.4)
                        else:
                            delay = 0.5 * 2 ** (MAX_SHORT_RETRIES - retries_left)
                            delay = min(delay, 2)
                            delay *= random.uniform(0.8, 1.4)

                        logger.debug(
                            "{%s} [%s] Waiting %ss before re-sending...",
                            request.txn_id,
                            request.destination,
                            delay,
                        )

                        yield self.clock.sleep(delay)
                        retries_left -= 1
                    else:
                        raise

                except Exception as e:
                    logger.warn(
                        "{%s} [%s] Request failed: %s %s: %s",
                        request.txn_id,
                        request.destination,
                        request.method,
                        url_str,
                        _flatten_response_never_received(e),
                    )
                    raise
        return response
Пример #41
0
 def reset(self, jsn):
     self.body = encode_canonical_json(jsn)
     self.length = len(self.body)
Пример #42
0
def hash_dict(value: dict) -> str:
    return sha256(
        canonicaljson.encode_canonical_json(_canonicalise_dict(value))
    ).hexdigest()
Пример #43
0
 def __init__(self, hs):
     self.version_string = hs.version_string
     self.response_body = encode_canonical_json(
         self.response_json_object(hs.config))
     Resource.__init__(self)
                                         row["endpoint"] + ".json")

for old_resource in os.listdir(resource_dir):
    old_path = Path(resource_dir) / old_resource
    if os.path.isfile(old_path):
        content = open(old_path, mode="rb").read()

    encoding = detect_encoding(io.BytesIO(content))
    if encoding:
        content = strip_variable_content(content)

    new_resource = hashlib.sha256(content).hexdigest()
    new_path = Path(resource_dir) / new_resource

    if str(new_path) != str(old_path):
        print("removing", old_path)
        os.remove(old_path)

        print("saving", new_path)
        save(new_path, content)

        for log_path in resource_log[old_resource]:
            log = json.load(open(log_path))
            if log["resource"] != old_resource:
                print("expected %s in %s", old_resource, log_path)
                exit(2)
            else:
                log["resource"] = new_resource
                print("fixing", log_path)
                save(log_path, canonicaljson.encode_canonical_json(log))
Пример #45
0
def rows_v2(server, json):
    valid_until = json["valid_until_ts"]
    key_json = encode_canonical_json(json)
    for key_id in json["verify_keys"]:
        yield (server, key_id, "-", valid_until, valid_until,
               db_type(key_json))
Пример #46
0
 def update_response_body(self, time_now_msec):
     refresh_interval = self.config.key_refresh_interval
     self.valid_until_ts = int(time_now_msec + refresh_interval)
     self.response_body = encode_canonical_json(self.response_json_object())
Пример #47
0
def dict_equals(self, other):
    me = encode_canonical_json(self._event_dict)
    them = encode_canonical_json(other._event_dict)
    return me == them
Пример #48
0
def encode_json(json_object):
    if USE_FROZEN_DICTS:
        # ujson doesn't like frozen_dicts
        return encode_canonical_json(json_object)
    else:
        return json.dumps(json_object, ensure_ascii=False)
Пример #49
0
    def _send_request(self,
                      request,
                      retry_on_dns_fail=True,
                      timeout=None,
                      long_retries=False,
                      ignore_backoff=False,
                      backoff_on_404=False):
        """
        Sends a request to the given server.

        Args:
            request (MatrixFederationRequest): details of request to be sent

            timeout (int|None): number of milliseconds to wait for the response headers
                (including connecting to the server). 60s by default.

            ignore_backoff (bool): true to ignore the historical backoff data
                and try the request anyway.

            backoff_on_404 (bool): Back off if we get a 404

        Returns:
            Deferred: resolves with the http response object on success.

            Fails with ``HTTPRequestException``: if we get an HTTP response
                code >= 300.

            Fails with ``NotRetryingDestination`` if we are not yet ready
                to retry this server.

            Fails with ``FederationDeniedError`` if this destination
                is not on our federation whitelist

            (May also fail with plenty of other Exceptions for things like DNS
                failures, connection failures, SSL failures.)
        """
        if timeout:
            _sec_timeout = timeout / 1000
        else:
            _sec_timeout = self.default_timeout

        if (self.hs.config.federation_domain_whitelist is not None
                and request.destination
                not in self.hs.config.federation_domain_whitelist):
            raise FederationDeniedError(request.destination)

        limiter = yield synapse.util.retryutils.get_retry_limiter(
            request.destination,
            self.clock,
            self._store,
            backoff_on_404=backoff_on_404,
            ignore_backoff=ignore_backoff,
        )

        method = request.method
        destination = request.destination
        path_bytes = request.path.encode("ascii")
        if request.query:
            query_bytes = encode_query_args(request.query)
        else:
            query_bytes = b""

        headers_dict = {
            "User-Agent": [self.version_string],
            "Host": [request.destination],
        }

        with limiter:
            # XXX: Would be much nicer to retry only at the transaction-layer
            # (once we have reliable transactions in place)
            if long_retries:
                retries_left = MAX_LONG_RETRIES
            else:
                retries_left = MAX_SHORT_RETRIES

            url = urllib.parse.urlunparse((
                b"matrix",
                destination.encode("ascii"),
                path_bytes,
                None,
                query_bytes,
                b"",
            )).decode('ascii')

            http_url = urllib.parse.urlunparse((
                b"",
                b"",
                path_bytes,
                None,
                query_bytes,
                b"",
            )).decode('ascii')

            while True:
                try:
                    json = request.get_json()
                    if json:
                        data = encode_canonical_json(json)
                        headers_dict["Content-Type"] = ["application/json"]
                        self.sign_request(destination, method, http_url,
                                          headers_dict, json)
                    else:
                        data = None
                        self.sign_request(destination, method, http_url,
                                          headers_dict)

                    logger.info("{%s} [%s] Sending request: %s %s",
                                request.txn_id, destination, method, url)

                    if data:
                        producer = FileBodyProducer(
                            BytesIO(data), cooperator=self._cooperator)
                    else:
                        producer = None

                    request_deferred = treq.request(
                        method,
                        url,
                        headers=Headers(headers_dict),
                        data=producer,
                        agent=self.agent,
                        reactor=self.hs.get_reactor(),
                        unbuffered=True)

                    request_deferred = timeout_deferred(
                        request_deferred,
                        timeout=_sec_timeout,
                        reactor=self.hs.get_reactor(),
                    )

                    with Measure(self.clock, "outbound_request"):
                        response = yield make_deferred_yieldable(
                            request_deferred, )

                    break
                except Exception as e:
                    logger.warn(
                        "{%s} [%s] Request failed: %s %s: %s",
                        request.txn_id,
                        destination,
                        method,
                        url,
                        _flatten_response_never_received(e),
                    )

                    if not retry_on_dns_fail and isinstance(e, DNSLookupError):
                        raise

                    if retries_left and not timeout:
                        if long_retries:
                            delay = 4**(MAX_LONG_RETRIES + 1 - retries_left)
                            delay = min(delay, 60)
                            delay *= random.uniform(0.8, 1.4)
                        else:
                            delay = 0.5 * 2**(MAX_SHORT_RETRIES - retries_left)
                            delay = min(delay, 2)
                            delay *= random.uniform(0.8, 1.4)

                        logger.debug(
                            "{%s} [%s] Waiting %ss before re-sending...",
                            request.txn_id,
                            destination,
                            delay,
                        )

                        yield self.clock.sleep(delay)
                        retries_left -= 1
                    else:
                        raise

            logger.info(
                "{%s} [%s] Got response headers: %d %s",
                request.txn_id,
                destination,
                response.code,
                response.phrase.decode('ascii', errors='replace'),
            )

            if 200 <= response.code < 300:
                pass
            else:
                # :'(
                # Update transactions table?
                d = treq.content(response)
                d = timeout_deferred(
                    d,
                    timeout=_sec_timeout,
                    reactor=self.hs.get_reactor(),
                )
                body = yield make_deferred_yieldable(d)
                raise HttpResponseException(response.code, response.phrase,
                                            body)

            defer.returnValue(response)
Пример #50
0
 def save_log(self, path, log):
     self.save(path, canonicaljson.encode_canonical_json(log))
Пример #51
0
    def index(self):
        # logging.basicConfig(
        #     level=logging.INFO, format="%(asctime)s %(levelname)s %(message)s"
        # )

        # load datasets
        # for dataset in argv[1:]:
        #     load(dataset)

        self.load_endpoint()

        # process log files
        for path in glob.glob("%s*/*.json" % (self.log_dir)):
            (date, key) = parse_log_path(path)
            h = json.load(open(path))
            self.add(path, date, key, h)

        # check resource files are in the log
        for path in glob.glob("%s*" % (self.resource_dir)):
            resource = parse_resource_path(path)
            if resource in self.resources:
                self.resources[resource] = None
            else:
                logging.error("no log for %s" % (path))

        # check resources in the log exist as files
        for resource in self.resources:
            if self.resources[resource]:
                logging.error("missing resource: %s listed in %s" %
                              (resource, ", ".join(self.resources[resource])))
            # else:
            #     self.resources[resource] = {
            #         "row-count": count_rows_in(}

        # for path in glob.glob("%s*.json" % (self.resource_dir)):
        for entry in os.scandir(self.resource_dir):
            resource = parse_resource_path(entry.path)
            if resource not in self.resources:
                logging.error("no log for %s" % (entry.path))

            self.resources[resource] = {"row-count": count_rows_in(entry.path)}

        # resources[resource] = {
        #     "media-type": v["meta_data"].get("media_type", ""),
        #     "suffix": v["meta_data"].get("suffix", ""),
        #     "valid": v["result"].get("valid", False),
        #     "error-count": v["result"].get("error-count", -1),
        #     "row-count": v["result"]["tables"][0].get("row-count", 0),
        # }

        # process validation
        # for path in glob.glob("%s*.json" % (validation_dir)):
        #     v = json.load(open(path))
        #     resource = parse_json_path(path)
        #     if resource not in resources:
        #         logging.error("no log for %s" % (path))

        #     if not os.path.isfile(os.path.join(resource_dir, resource)):
        #         logging.error("no resource file for %s" % (path))

        #     resources[resource] = {
        #         "media-type": v["meta_data"].get("media_type", ""),
        #         "suffix": v["meta_data"].get("suffix", ""),
        #         "valid": v["result"].get("valid", False),
        #         "error-count": v["result"].get("error-count", -1),
        #         "row-count": v["result"]["tables"][0].get("row-count", 0),
        #     }

        for resource, r in self.resources.items():
            if not r or "row-count" not in r:
                logging.error("%s missing" % resource)
                self.resources[resource] = {}
            # if not r or "valid" not in r:
            #     logging.error("%s%s.json missing" % (validation_dir, resource))

        # save as single JSON file
        save_json(
            "index/index.json",
            canonicaljson.encode_canonical_json({
                "key": self.idx,
                "resource": self.resources
            }),
        )

        # save index CSV files
        self.save_csv(
            "resource",
            ["resource", "row-count"],
            self.resources,
        )
        self.save_csv("link", ["link", "url"], self.idx)

        log = {}
        last_date = "0"
        for link in self.idx:
            for date, entry in self.idx[link]["log"].items():
                entry = {key.lower(): value for key, value in entry.items()}
                entry["link"] = link
                entry["date"] = date
                log["%s-%s" % (date, link)] = entry
                if entry["date"] > last_date:
                    last_date = entry["date"]

        self.save_csv(
            "log",
            [
                "datetime",
                "link",
                "status",
                "elapsed",
                "resource",
                "content-type",
                "content-length",
            ],
            log,
        )

        link_resource = {}
        for entry in log.values():
            if "resource" in entry:
                link_resource[entry["link"] + entry["resource"]] = {
                    "link": entry["link"],
                    "resource": entry["resource"],
                }
                date = entry["date"]
                if ("start-date" not in self.resources[entry["resource"]]
                        or self.resources[entry["resource"]]["start-date"] >
                        date):
                    self.resources[entry["resource"]]["start-date"] = date
                if ("end-date" not in self.resources[entry["resource"]] or
                        self.resources[entry["resource"]]["end-date"] < date):
                    self.resources[entry["resource"]]["end-date"] = date

        for resource in self.resources:
            if self.resources[resource].get("end-date", None) == last_date:
                self.resources[resource]["end-date"] = ""

        self.save_csv("link-resource", ["link", "resource"], link_resource)

        rows = {}
        for link, entry in self.idx.items():
            for organisation in entry["organisation"]:
                rows[link + organisation] = {
                    "link": link,
                    "organisation": organisation,
                }
        self.save_csv("link-organisation", ["link", "organisation"], rows)

        # index for harmonising missing OrganisationURI values
        rows = {}
        for k, entry in link_resource.items():
            link = entry["link"]
            for organisation in self.idx[link]["organisation"]:
                rows[entry["resource"] + organisation] = {
                    "resource": entry["resource"],
                    "organisation": organisation,
                    "start-date":
                    self.resources[entry["resource"]]["start-date"],
                    "end-date": self.resources[entry["resource"]]["end-date"],
                }
        self.save_csv(
            "resource-organisation",
            ["resource", "organisation", "start-date", "end-date"],
            rows,
        )
Пример #52
0
    def test_get_keys_from_server(self):
        # arbitrarily advance the clock a bit
        self.reactor.advance(100)

        SERVER_NAME = "server2"
        fetcher = ServerKeyFetcher(self.hs)
        testkey = signedjson.key.generate_signing_key("ver1")
        testverifykey = signedjson.key.get_verify_key(testkey)
        testverifykey_id = "ed25519:ver1"
        VALID_UNTIL_TS = 200 * 1000

        # valid response
        response = {
            "server_name": SERVER_NAME,
            "old_verify_keys": {},
            "valid_until_ts": VALID_UNTIL_TS,
            "verify_keys": {
                testverifykey_id: {
                    "key": signedjson.key.encode_verify_key_base64(testverifykey)
                }
            },
        }
        signedjson.sign.sign_json(response, SERVER_NAME, testkey)

        def get_json(destination, path, **kwargs):
            self.assertEqual(destination, SERVER_NAME)
            self.assertEqual(path, "/_matrix/key/v2/server/key1")
            return response

        self.http_client.get_json.side_effect = get_json

        keys_to_fetch = {SERVER_NAME: {"key1": 0}}
        keys = self.get_success(fetcher.get_keys(keys_to_fetch))
        k = keys[SERVER_NAME][testverifykey_id]
        self.assertEqual(k.valid_until_ts, VALID_UNTIL_TS)
        self.assertEqual(k.verify_key, testverifykey)
        self.assertEqual(k.verify_key.alg, "ed25519")
        self.assertEqual(k.verify_key.version, "ver1")

        # check that the perspectives store is correctly updated
        lookup_triplet = (SERVER_NAME, testverifykey_id, None)
        key_json = self.get_success(
            self.hs.get_datastore().get_server_keys_json([lookup_triplet])
        )
        res = key_json[lookup_triplet]
        self.assertEqual(len(res), 1)
        res = res[0]
        self.assertEqual(res["key_id"], testverifykey_id)
        self.assertEqual(res["from_server"], SERVER_NAME)
        self.assertEqual(res["ts_added_ms"], self.reactor.seconds() * 1000)
        self.assertEqual(res["ts_valid_until_ms"], VALID_UNTIL_TS)

        # we expect it to be encoded as canonical json *before* it hits the db
        self.assertEqual(
            bytes(res["key_json"]), canonicaljson.encode_canonical_json(response)
        )

        # change the server name: the result should be ignored
        response["server_name"] = "OTHER_SERVER"

        keys = self.get_success(fetcher.get_keys(keys_to_fetch))
        self.assertEqual(keys, {})
Пример #53
0
    def _request(self,
                 destination,
                 method,
                 path,
                 json=None,
                 json_callback=None,
                 param_bytes=b"",
                 query=None,
                 retry_on_dns_fail=True,
                 timeout=None,
                 long_retries=False,
                 ignore_backoff=False,
                 backoff_on_404=False):
        """
        Creates and sends a request to the given server.

        Args:
            destination (str): The remote server to send the HTTP request to.
            method (str): HTTP method
            path (str): The HTTP path
            json (dict or None): JSON to send in the body.
            json_callback (func or None): A callback to generate the JSON.
            query (dict or None): Query arguments.
            ignore_backoff (bool): true to ignore the historical backoff data
                and try the request anyway.
            backoff_on_404 (bool): Back off if we get a 404

        Returns:
            Deferred: resolves with the http response object on success.

            Fails with ``HTTPRequestException``: if we get an HTTP response
                code >= 300.

            Fails with ``NotRetryingDestination`` if we are not yet ready
                to retry this server.

            Fails with ``FederationDeniedError`` if this destination
                is not on our federation whitelist

            (May also fail with plenty of other Exceptions for things like DNS
                failures, connection failures, SSL failures.)
        """
        if timeout:
            _sec_timeout = timeout / 1000
        else:
            _sec_timeout = self.default_timeout

        if (self.hs.config.federation_domain_whitelist is not None and
                destination not in self.hs.config.federation_domain_whitelist):
            raise FederationDeniedError(destination)

        limiter = yield synapse.util.retryutils.get_retry_limiter(
            destination,
            self.clock,
            self._store,
            backoff_on_404=backoff_on_404,
            ignore_backoff=ignore_backoff,
        )

        headers_dict = {}
        path_bytes = path.encode("ascii")
        if query:
            query_bytes = encode_query_args(query)
        else:
            query_bytes = b""

        headers_dict = {
            "User-Agent": [self.version_string],
            "Host": [destination],
        }

        with limiter:
            url = self._create_url(destination.encode("ascii"), path_bytes,
                                   param_bytes, query_bytes).decode('ascii')

            txn_id = "%s-O-%s" % (method, self._next_id)
            self._next_id = (self._next_id + 1) % (MAXINT - 1)

            # XXX: Would be much nicer to retry only at the transaction-layer
            # (once we have reliable transactions in place)
            if long_retries:
                retries_left = MAX_LONG_RETRIES
            else:
                retries_left = MAX_SHORT_RETRIES

            http_url = urllib.parse.urlunparse(
                (b"", b"", path_bytes, param_bytes, query_bytes,
                 b"")).decode('ascii')

            log_result = None
            while True:
                try:
                    if json_callback:
                        json = json_callback()

                    if json:
                        data = encode_canonical_json(json)
                        headers_dict["Content-Type"] = ["application/json"]
                        self.sign_request(destination, method, http_url,
                                          headers_dict, json)
                    else:
                        data = None
                        self.sign_request(destination, method, http_url,
                                          headers_dict)

                    outbound_logger.info("{%s} [%s] Sending request: %s %s",
                                         txn_id, destination, method, url)

                    request_deferred = treq.request(
                        method,
                        url,
                        headers=Headers(headers_dict),
                        data=data,
                        agent=self.agent,
                        reactor=self.hs.get_reactor(),
                        unbuffered=True)
                    request_deferred.addTimeout(_sec_timeout,
                                                self.hs.get_reactor())

                    # Sometimes the timeout above doesn't work, so lets hack yet
                    # another layer of timeouts in in the vain hope that at some
                    # point the world made sense and this really really really
                    # should work.
                    request_deferred = timeout_no_seriously(
                        request_deferred,
                        timeout=_sec_timeout * 2,
                        reactor=self.hs.get_reactor(),
                    )

                    with Measure(self.clock, "outbound_request"):
                        response = yield make_deferred_yieldable(
                            request_deferred, )

                    log_result = "%d %s" % (
                        response.code,
                        response.phrase,
                    )
                    break
                except Exception as e:
                    if not retry_on_dns_fail and isinstance(e, DNSLookupError):
                        logger.warn("DNS Lookup failed to %s with %s",
                                    destination, e)
                        log_result = "DNS Lookup failed to %s with %s" % (
                            destination, e)
                        raise

                    logger.warn(
                        "{%s} Sending request failed to %s: %s %s: %s",
                        txn_id,
                        destination,
                        method,
                        url,
                        _flatten_response_never_received(e),
                    )

                    log_result = _flatten_response_never_received(e)

                    if retries_left and not timeout:
                        if long_retries:
                            delay = 4**(MAX_LONG_RETRIES + 1 - retries_left)
                            delay = min(delay, 60)
                            delay *= random.uniform(0.8, 1.4)
                        else:
                            delay = 0.5 * 2**(MAX_SHORT_RETRIES - retries_left)
                            delay = min(delay, 2)
                            delay *= random.uniform(0.8, 1.4)

                        logger.debug("{%s} Waiting %s before sending to %s...",
                                     txn_id, delay, destination)

                        yield self.clock.sleep(delay)
                        retries_left -= 1
                    else:
                        raise
                finally:
                    outbound_logger.info(
                        "{%s} [%s] Result: %s",
                        txn_id,
                        destination,
                        log_result,
                    )

            if 200 <= response.code < 300:
                pass
            else:
                # :'(
                # Update transactions table?
                with logcontext.PreserveLoggingContext():
                    d = treq.content(response)
                    d.addTimeout(_sec_timeout, self.hs.get_reactor())
                    body = yield make_deferred_yieldable(d)
                raise HttpResponseException(response.code, response.phrase,
                                            body)

            defer.returnValue(response)
Пример #54
0
import canonicaljson

organisation = {
    "development-corporation:1": "development-corporation:Q6670544",
    "development-corporation:2": "development-corporation:Q20648596",
    "national-park:1": "national-park:Q72617988",
    "national-park:10": "national-park:Q72618127",
    "national-park:2": "national-park:Q27159704",
    "national-park:3": "national-park:Q5225646",
    "national-park:4": "national-park:Q72617669",
    "national-park:5": "national-park:Q27178932",
    "national-park:6": "national-park:Q72617784",
    "national-park:7": "national-park:Q72617890",
    "national-park:8": "national-park:Q4972284",
    "national-park:9": "national-park:Q72617158",
}

for path in glob.glob("collection/log/*/*.json"):
    h = json.load(open(path))

    # migrate "body" property to "resource" in headers.json
    if "body" in h and "resource" not in h:
        h["resource"] = h.pop("body", None)

    # move development corporations and national parks to wikidata based scheme
    if "organisation" in h and h["organisation"] in organisation:
        h["organisation"] = organisation[h["organisation"]]

    with open(path, "wb") as f:
        f.write(canonicaljson.encode_canonical_json(h))
Пример #55
0

if __name__ == "__main__":
    logging.basicConfig(level=logging.INFO,
                        format="%(asctime)s %(levelname)s %(message)s")

    # load datasets
    for dataset in argv[1:]:
        load(dataset)

    # process log files
    for path in glob.glob("%s*/*.json" % (log_dir)):
        (date, key) = parse_log_path(path)
        h = json.load(open(path))
        add(path, date, key, h)

    # check resource files are in the log
    for path in glob.glob("%s*" % (resource_dir)):
        resource = parse_resource_path(path)
        if resource in resources:
            resources[resource] = False
        else:
            logging.error("no log for %s" % (path))

    # check resources in the log exist as files
    for resource in resources:
        if resources[resource]:
            logging.error("missing resource: %s" % (resource))

    save("collection/index.json", canonicaljson.encode_canonical_json(idx))
Пример #56
0
    def query_keys(self, request, query, query_remote_on_cache_miss=False):
        logger.info("Handling query for keys %r", query)

        store_queries = []
        for server_name, key_ids in query.items():
            if (
                self.federation_domain_whitelist is not None
                and server_name not in self.federation_domain_whitelist
            ):
                logger.debug("Federation denied with %s", server_name)
                continue

            if not key_ids:
                key_ids = (None,)
            for key_id in key_ids:
                store_queries.append((server_name, key_id, None))

        cached = yield self.store.get_server_keys_json(store_queries)

        json_results = set()

        time_now_ms = self.clock.time_msec()

        cache_misses = dict()
        for (server_name, key_id, from_server), results in cached.items():
            results = [(result["ts_added_ms"], result) for result in results]

            if not results and key_id is not None:
                cache_misses.setdefault(server_name, set()).add(key_id)
                continue

            if key_id is not None:
                ts_added_ms, most_recent_result = max(results)
                ts_valid_until_ms = most_recent_result["ts_valid_until_ms"]
                req_key = query.get(server_name, {}).get(key_id, {})
                req_valid_until = req_key.get("minimum_valid_until_ts")
                miss = False
                if req_valid_until is not None:
                    if ts_valid_until_ms < req_valid_until:
                        logger.debug(
                            "Cached response for %r/%r is older than requested"
                            ": valid_until (%r) < minimum_valid_until (%r)",
                            server_name,
                            key_id,
                            ts_valid_until_ms,
                            req_valid_until,
                        )
                        miss = True
                    else:
                        logger.debug(
                            "Cached response for %r/%r is newer than requested"
                            ": valid_until (%r) >= minimum_valid_until (%r)",
                            server_name,
                            key_id,
                            ts_valid_until_ms,
                            req_valid_until,
                        )
                elif (ts_added_ms + ts_valid_until_ms) / 2 < time_now_ms:
                    logger.debug(
                        "Cached response for %r/%r is too old"
                        ": (added (%r) + valid_until (%r)) / 2 < now (%r)",
                        server_name,
                        key_id,
                        ts_added_ms,
                        ts_valid_until_ms,
                        time_now_ms,
                    )
                    # We more than half way through the lifetime of the
                    # response. We should fetch a fresh copy.
                    miss = True
                else:
                    logger.debug(
                        "Cached response for %r/%r is still valid"
                        ": (added (%r) + valid_until (%r)) / 2 < now (%r)",
                        server_name,
                        key_id,
                        ts_added_ms,
                        ts_valid_until_ms,
                        time_now_ms,
                    )

                if miss:
                    cache_misses.setdefault(server_name, set()).add(key_id)
                json_results.add(bytes(most_recent_result["key_json"]))
            else:
                for ts_added, result in results:
                    json_results.add(bytes(result["key_json"]))

        if cache_misses and query_remote_on_cache_miss:
            yield self.fetcher.get_keys(cache_misses)
            yield self.query_keys(request, query, query_remote_on_cache_miss=False)
        else:
            signed_keys = []
            for key_json in json_results:
                key_json = json.loads(key_json)
                for signing_key in self.config.key_server_signing_keys:
                    key_json = sign_json(key_json, self.config.server_name, signing_key)

                signed_keys.append(key_json)

            results = {"server_keys": signed_keys}

            respond_with_json_bytes(request, 200, encode_canonical_json(results))
Пример #57
0
    def test_get_keys_from_perspectives(self):
        # arbitrarily advance the clock a bit
        self.reactor.advance(100)

        SERVER_NAME = "server2"
        kr = keyring.Keyring(self.hs)
        testkey = signedjson.key.generate_signing_key("ver1")
        testverifykey = signedjson.key.get_verify_key(testkey)
        testverifykey_id = "ed25519:ver1"
        VALID_UNTIL_TS = 200 * 1000

        # valid response
        response = {
            "server_name": SERVER_NAME,
            "old_verify_keys": {},
            "valid_until_ts": VALID_UNTIL_TS,
            "verify_keys": {
                testverifykey_id: {
                    "key":
                    signedjson.key.encode_verify_key_base64(testverifykey)
                }
            },
        }

        persp_resp = {
            "server_keys":
            [self.mock_perspective_server.get_signed_response(response)]
        }

        def post_json(destination, path, data, **kwargs):
            self.assertEqual(destination,
                             self.mock_perspective_server.server_name)
            self.assertEqual(path, "/_matrix/key/v2/query")

            # check that the request is for the expected key
            q = data["server_keys"]
            self.assertEqual(list(q[SERVER_NAME].keys()), ["key1"])
            return persp_resp

        self.http_client.post_json.side_effect = post_json

        server_name_and_key_ids = [(SERVER_NAME, ("key1", ))]
        keys = self.get_success(
            kr.get_keys_from_perspectives(server_name_and_key_ids))
        self.assertIn(SERVER_NAME, keys)
        k = keys[SERVER_NAME][testverifykey_id]
        self.assertEqual(k, testverifykey)
        self.assertEqual(k.alg, "ed25519")
        self.assertEqual(k.version, "ver1")

        # check that the perspectives store is correctly updated
        lookup_triplet = (SERVER_NAME, testverifykey_id, None)
        key_json = self.get_success(
            self.hs.get_datastore().get_server_keys_json([lookup_triplet]))
        res = key_json[lookup_triplet]
        self.assertEqual(len(res), 1)
        res = res[0]
        self.assertEqual(res["key_id"], testverifykey_id)
        self.assertEqual(res["from_server"],
                         self.mock_perspective_server.server_name)
        self.assertEqual(res["ts_added_ms"], self.reactor.seconds() * 1000)
        self.assertEqual(res["ts_valid_until_ms"], VALID_UNTIL_TS)

        self.assertEqual(
            bytes(res["key_json"]),
            canonicaljson.encode_canonical_json(persp_resp["server_keys"][0]),
        )
Пример #58
0
    if backend_type := os.environ.get('TERRAFORM_BACKEND_TYPE'):
        headers['backend_type'] = backend_type

    headers['label'] = os.environ.get('INPUT_LABEL') or None

    plan_modifier = {}
    if target := os.environ.get('INPUT_TARGET'):
        plan_modifier['target'] = sorted(t.strip() for t in target.replace(',', '\n', ).split('\n') if t.strip())

    if replace := os.environ.get('INPUT_REPLACE'):
        plan_modifier['replace'] = sorted(t.strip() for t in replace.replace(',', '\n', ).split('\n') if t.strip())

    if plan_modifier:
        debug(f'Plan modifier: {plan_modifier}')
        headers['plan_modifier'] = hashlib.sha256(canonicaljson.encode_canonical_json(plan_modifier)).hexdigest()

    return find_comment(github, issue_url, username, headers, legacy_description)

def is_approved(proposed_plan: str, comment: TerraformComment) -> bool:

    if approved_plan_hash := comment.headers.get('plan_hash'):
        debug('Approving plan based on plan hash')
        return plan_hash(proposed_plan, comment.issue_url) == approved_plan_hash
    else:
        debug('Approving plan based on plan text')
        return plan_cmp(proposed_plan, comment.body)

def format_plan_text(plan_text: str) -> Tuple[str, str]:
    """
    Format the given plan for insertion into a PR comment
Пример #59
0
 def canonical_json(self):
     return canonicaljson.encode_canonical_json(self.__dict__)
Пример #60
0
def dict_equals(self, other):
    me = encode_canonical_json(self.get_pdu_json())
    them = encode_canonical_json(other.get_pdu_json())
    return me == them