예제 #1
0
    async def _async_render_GET(self, request: SynapseRequest) -> None:
        # XXX: if get_user_by_req fails, what should we do in an async render?
        requester = await self.auth.get_user_by_req(request)
        url = parse_string(request, "url", required=True)
        ts = parse_integer(request, "ts")
        if ts is None:
            ts = self.clock.time_msec()

        # XXX: we could move this into _do_preview if we wanted.
        url_tuple = urlparse.urlsplit(url)
        for entry in self.url_preview_url_blacklist:
            match = True
            for attrib in entry:
                pattern = entry[attrib]
                value = getattr(url_tuple, attrib)
                logger.debug(
                    "Matching attrib '%s' with value '%s' against pattern '%s'",
                    attrib,
                    value,
                    pattern,
                )

                if value is None:
                    match = False
                    continue

                if pattern.startswith("^"):
                    if not re.match(pattern, getattr(url_tuple, attrib)):
                        match = False
                        continue
                else:
                    if not fnmatch.fnmatch(getattr(url_tuple, attrib),
                                           pattern):
                        match = False
                        continue
            if match:
                logger.warning("URL %s blocked by url_blacklist entry %s", url,
                               entry)
                raise SynapseError(
                    403, "URL blocked by url pattern blacklist entry",
                    Codes.UNKNOWN)

        # the in-memory cache:
        # * ensures that only one request is active at a time
        # * takes load off the DB for the thundering herds
        # * also caches any failures (unlike the DB) so we don't keep
        #    requesting the same endpoint

        observable = self._cache.get(url)

        if not observable:
            download = run_in_background(self._do_preview, url, requester.user,
                                         ts)
            observable = ObservableDeferred(download, consumeErrors=True)
            self._cache[url] = observable
        else:
            logger.info("Returning cached response")

        og = await make_deferred_yieldable(observable.observe())
        respond_with_json_bytes(request, 200, og, send_cors=True)
예제 #2
0
    def _async_render_POST(self, request):
        content = parse_json_object_from_request(request)

        for row in content["invalidate"]:
            name = row["name"]
            keys = tuple(row["keys"])

            getattr(self.store, name).invalidate(keys)

        respond_with_json_bytes(request, 200, "{}")
예제 #3
0
    def _async_render_POST(self, request):
        content = parse_json_object_from_request(request)

        process_id = content["process_id"]
        syncing_user_ids = content["syncing_users"]

        yield self.presence_handler.update_external_syncs(
            process_id, set(syncing_user_ids)
        )

        respond_with_json_bytes(request, 200, "{}")
예제 #4
0
    def _async_render_POST(self, request):
        content = parse_json_object_from_request(request)

        for remove in content["remove"]:
            yield self.store.delete_pusher_by_app_id_pushkey_user_id(
                remove["app_id"],
                remove["push_key"],
                remove["user_id"],
            )

        self.notifier.on_new_replication_data()

        respond_with_json_bytes(request, 200, "{}")
예제 #5
0
    def _async_render_POST(self, request):
        content = parse_json_object_from_request(request)

        for remove in content["remove"]:
            yield self.store.delete_pusher_by_app_id_pushkey_user_id(
                remove["app_id"],
                remove["push_key"],
                remove["user_id"],
            )

        self.notifier.on_new_replication_data()

        respond_with_json_bytes(request, 200, "{}")
예제 #6
0
    def render_GET(self, request):
        # no auth here on purpose, to allow anyone to view, even across home
        # servers.

        # TODO: A little crude here, we could do this better.
        filename = request.path.split('/')[-1]
        # be paranoid
        filename = re.sub("[^0-9A-z.-_]", "", filename)

        file_path = self.directory + "/" + filename

        logger.debug("Searching for %s", file_path)

        if os.path.isfile(file_path):
            # filename has the content type
            base64_contentype = filename.split(".")[1]
            content_type = base64.urlsafe_b64decode(base64_contentype)
            logger.info("Sending file %s", file_path)
            f = open(file_path, 'rb')
            request.setHeader('Content-Type', content_type)

            # cache for at least a day.
            # XXX: we might want to turn this off for data we don't want to
            # recommend caching as it's sensitive or private - or at least
            # select private. don't bother setting Expires as all our matrix
            # clients are smart enough to be happy with Cache-Control (right?)
            request.setHeader("Cache-Control",
                              "public,max-age=86400,s-maxage=86400")

            d = FileSender().beginFileTransfer(f, request)

            # after the file has been sent, clean up and finish the request
            def cbFinished(ignored):
                f.close()
                request.finish()

            d.addCallback(cbFinished)
        else:
            respond_with_json_bytes(request,
                                    404,
                                    json.dumps(
                                        cs_error("Not found",
                                                 code=Codes.NOT_FOUND)),
                                    send_cors=True)

        return server.NOT_DONE_YET
예제 #7
0
 def render_GET(self, request):
     time_now = self.clock.time_msec()
     # Update the expiry time if less than half the interval remains.
     if time_now + self.config.key_refresh_interval / 2 > self.valid_until_ts:
         self.update_response_body(time_now)
     return respond_with_json_bytes(
         request, 200, self.response_body,
     )
예제 #8
0
    def render_GET(self, request):
        # no auth here on purpose, to allow anyone to view, even across home
        # servers.

        # TODO: A little crude here, we could do this better.
        filename = request.path.split('/')[-1]
        # be paranoid
        filename = re.sub("[^0-9A-z.-_]", "", filename)

        file_path = self.directory + "/" + filename

        logger.debug("Searching for %s", file_path)

        if os.path.isfile(file_path):
            # filename has the content type
            base64_contentype = filename.split(".")[1]
            content_type = base64.urlsafe_b64decode(base64_contentype)
            logger.info("Sending file %s", file_path)
            f = open(file_path, 'rb')
            request.setHeader('Content-Type', content_type)

            # cache for at least a day.
            # XXX: we might want to turn this off for data we don't want to
            # recommend caching as it's sensitive or private - or at least
            # select private. don't bother setting Expires as all our matrix
            # clients are smart enough to be happy with Cache-Control (right?)
            request.setHeader(
                "Cache-Control", "public,max-age=86400,s-maxage=86400"
            )

            d = FileSender().beginFileTransfer(f, request)

            # after the file has been sent, clean up and finish the request
            def cbFinished(ignored):
                f.close()
                finish_request(request)
            d.addCallback(cbFinished)
        else:
            respond_with_json_bytes(
                request,
                404,
                json.dumps(cs_error("Not found", code=Codes.NOT_FOUND)),
                send_cors=True)

        return server.NOT_DONE_YET
예제 #9
0
    def _async_render_GET(self, request):
        try:
            server_keys, certificate = yield fetch_server_key(
                self.server_name,
                self.key_server.ssl_context_factory
            )

            resp_server_name = server_keys[u"server_name"]
            verify_key_b64 = server_keys[u"signature_verify_key"]
            tls_certificate_b64 = server_keys[u"tls_certificate"]
            verify_key = VerifyKey(decode_base64(verify_key_b64))

            if resp_server_name != self.server_name:
                raise ValueError("Wrong server name '%s' != '%s'" %
                                 (resp_server_name, self.server_name))

            x509_certificate_bytes = crypto.dump_certificate(
                crypto.FILETYPE_ASN1,
                certificate
            )

            if encode_base64(x509_certificate_bytes) != tls_certificate_b64:
                raise ValueError("TLS certificate doesn't match")

            verify_signed_json(server_keys, self.server_name, verify_key)

            signed_json = sign_json(
                server_keys,
                self.key_server.server_name,
                self.key_server.signing_key
            )

            json_bytes = encode_canonical_json(signed_json)
            respond_with_json_bytes(request, 200, json_bytes)

        except Exception as e:
            json_bytes = encode_canonical_json({
                u"error": {u"code": 502, u"message": e.message}
            })
            respond_with_json_bytes(request, 502, json_bytes)
예제 #10
0
    def _async_render(self, request):
        try:
            # TODO: The checks here are a bit late. The content will have
            # already been uploaded to a tmp file at this point
            content_length = request.getHeader("Content-Length")
            if content_length is None:
                raise SynapseError(
                    msg="Request must specify a Content-Length", code=400
                )
            if int(content_length) > self.max_upload_size:
                raise SynapseError(
                    msg="Upload request body is too large",
                    code=413,
                )

            fname = yield self.map_request_to_name(request)

            # TODO I have a suspicious feeling this is just going to block
            with open(fname, "wb") as f:
                f.write(request.content.read())

            # FIXME (erikj): These should use constants.
            file_name = os.path.basename(fname)
            # FIXME: we can't assume what the repo's public mounted path is
            # ...plus self-signed SSL won't work to remote clients anyway
            # ...and we can't assume that it's SSL anyway, as we might want to
            # serve it via the non-SSL listener...
            url = "%s/_matrix/content/%s" % (
                self.external_addr, file_name
            )

            respond_with_json_bytes(request, 200,
                                    json.dumps({"content_token": url}),
                                    send_cors=True)

        except CodeMessageException as e:
            logger.exception(e)
            respond_with_json_bytes(request, e.code,
                                    json.dumps(cs_exception(e)))
        except Exception as e:
            logger.error("Failed to store file: %s" % e)
            respond_with_json_bytes(
                request,
                500,
                json.dumps({"error": "Internal server error"}),
                send_cors=True)
예제 #11
0
    def _async_render(self, request):
        try:
            # TODO: The checks here are a bit late. The content will have
            # already been uploaded to a tmp file at this point
            content_length = request.getHeader("Content-Length")
            if content_length is None:
                raise SynapseError(
                    msg="Request must specify a Content-Length", code=400
                )
            if int(content_length) > self.max_upload_size:
                raise SynapseError(
                    msg="Upload request body is too large",
                    code=413,
                )

            fname = yield self.map_request_to_name(request)

            # TODO I have a suspicious feeling this is just going to block
            with open(fname, "wb") as f:
                f.write(request.content.read())

            # FIXME (erikj): These should use constants.
            file_name = os.path.basename(fname)
            # FIXME: we can't assume what the repo's public mounted path is
            # ...plus self-signed SSL won't work to remote clients anyway
            # ...and we can't assume that it's SSL anyway, as we might want to
            # serve it via the non-SSL listener...
            url = "%s/_matrix/content/%s" % (
                self.external_addr, file_name
            )

            respond_with_json_bytes(request, 200,
                                    json.dumps({"content_token": url}),
                                    send_cors=True)

        except CodeMessageException as e:
            logger.exception(e)
            respond_with_json_bytes(request, e.code,
                                    json.dumps(cs_exception(e)))
        except Exception as e:
            logger.error("Failed to store file: %s" % e)
            respond_with_json_bytes(
                request,
                500,
                json.dumps({"error": "Internal server error"}),
                send_cors=True)
예제 #12
0
    def _async_render_GET(self, request):

        # XXX: if get_user_by_req fails, what should we do in an async render?
        requester = yield self.auth.get_user_by_req(request)
        url = request.args.get("url")[0]
        if "ts" in request.args:
            ts = int(request.args.get("ts")[0])
        else:
            ts = self.clock.time_msec()

        url_tuple = urlparse.urlsplit(url)
        for entry in self.url_preview_url_blacklist:
            match = True
            for attrib in entry:
                pattern = entry[attrib]
                value = getattr(url_tuple, attrib)
                logger.debug((
                    "Matching attrib '%s' with value '%s' against"
                    " pattern '%s'"
                ) % (attrib, value, pattern))

                if value is None:
                    match = False
                    continue

                if pattern.startswith('^'):
                    if not re.match(pattern, getattr(url_tuple, attrib)):
                        match = False
                        continue
                else:
                    if not fnmatch.fnmatch(getattr(url_tuple, attrib), pattern):
                        match = False
                        continue
            if match:
                logger.warn(
                    "URL %s blocked by url_blacklist entry %s", url, entry
                )
                raise SynapseError(
                    403, "URL blocked by url pattern blacklist entry",
                    Codes.UNKNOWN
                )

        # first check the memory cache - good to handle all the clients on this
        # HS thundering away to preview the same URL at the same time.
        og = self.cache.get(url)
        if og:
            respond_with_json_bytes(request, 200, json.dumps(og), send_cors=True)
            return

        # then check the URL cache in the DB (which will also provide us with
        # historical previews, if we have any)
        cache_result = yield self.store.get_url_cache(url, ts)
        if (
            cache_result and
            cache_result["download_ts"] + cache_result["expires"] > ts and
            cache_result["response_code"] / 100 == 2
        ):
            respond_with_json_bytes(
                request, 200, cache_result["og"].encode('utf-8'),
                send_cors=True
            )
            return

        # Ensure only one download for a given URL is active at a time
        download = self.downloads.get(url)
        if download is None:
            download = self._download_url(url, requester.user)
            download = ObservableDeferred(
                download,
                consumeErrors=True
            )
            self.downloads[url] = download

            @download.addBoth
            def callback(media_info):
                del self.downloads[url]
                return media_info
        media_info = yield download.observe()

        # FIXME: we should probably update our cache now anyway, so that
        # even if the OG calculation raises, we don't keep hammering on the
        # remote server.  For now, leave it uncached to aid debugging OG
        # calculation problems

        logger.debug("got media_info of '%s'" % media_info)

        if self._is_media(media_info['media_type']):
            dims = yield self.media_repo._generate_local_thumbnails(
                media_info['filesystem_id'], media_info
            )

            og = {
                "og:description": media_info['download_name'],
                "og:image": "mxc://%s/%s" % (
                    self.server_name, media_info['filesystem_id']
                ),
                "og:image:type": media_info['media_type'],
                "matrix:image:size": media_info['media_length'],
            }

            if dims:
                og["og:image:width"] = dims['width']
                og["og:image:height"] = dims['height']
            else:
                logger.warn("Couldn't get dims for %s" % url)

            # define our OG response for this media
        elif self._is_html(media_info['media_type']):
            # TODO: somehow stop a big HTML tree from exploding synapse's RAM

            from lxml import etree

            file = open(media_info['filename'])
            body = file.read()
            file.close()

            # clobber the encoding from the content-type, or default to utf-8
            # XXX: this overrides any <meta/> or XML charset headers in the body
            # which may pose problems, but so far seems to work okay.
            match = re.match(r'.*; *charset=(.*?)(;|$)', media_info['media_type'], re.I)
            encoding = match.group(1) if match else "utf-8"

            try:
                parser = etree.HTMLParser(recover=True, encoding=encoding)
                tree = etree.fromstring(body, parser)
                og = yield self._calc_og(tree, media_info, requester)
            except UnicodeDecodeError:
                # blindly try decoding the body as utf-8, which seems to fix
                # the charset mismatches on https://google.com
                parser = etree.HTMLParser(recover=True, encoding=encoding)
                tree = etree.fromstring(body.decode('utf-8', 'ignore'), parser)
                og = yield self._calc_og(tree, media_info, requester)

        else:
            logger.warn("Failed to find any OG data in %s", url)
            og = {}

        logger.debug("Calculated OG for %s as %s" % (url, og))

        # store OG in ephemeral in-memory cache
        self.cache[url] = og

        # store OG in history-aware DB cache
        yield self.store.store_url_cache(
            url,
            media_info["response_code"],
            media_info["etag"],
            media_info["expires"],
            json.dumps(og),
            media_info["filesystem_id"],
            media_info["created_ts"],
        )

        respond_with_json_bytes(request, 200, json.dumps(og), send_cors=True)
예제 #13
0
 def render_GET(self, request):
     return respond_with_json_bytes(
         request, 200, self.response_body,
         version_string=self.version_string
     )
    def _async_render_GET(self, request):

        # XXX: if get_user_by_req fails, what should we do in an async render?
        requester = yield self.auth.get_user_by_req(request)
        url = request.args.get("url")[0]
        if "ts" in request.args:
            ts = int(request.args.get("ts")[0])
        else:
            ts = self.clock.time_msec()

        url_tuple = urlparse.urlsplit(url)
        for entry in self.url_preview_url_blacklist:
            match = True
            for attrib in entry:
                pattern = entry[attrib]
                value = getattr(url_tuple, attrib)
                logger.debug(("Matching attrib '%s' with value '%s' against"
                              " pattern '%s'") % (attrib, value, pattern))

                if value is None:
                    match = False
                    continue

                if pattern.startswith('^'):
                    if not re.match(pattern, getattr(url_tuple, attrib)):
                        match = False
                        continue
                else:
                    if not fnmatch.fnmatch(getattr(url_tuple, attrib),
                                           pattern):
                        match = False
                        continue
            if match:
                logger.warn("URL %s blocked by url_blacklist entry %s", url,
                            entry)
                raise SynapseError(
                    403, "URL blocked by url pattern blacklist entry",
                    Codes.UNKNOWN)

        # first check the memory cache - good to handle all the clients on this
        # HS thundering away to preview the same URL at the same time.
        og = self.cache.get(url)
        if og:
            respond_with_json_bytes(request,
                                    200,
                                    json.dumps(og),
                                    send_cors=True)
            return

        # then check the URL cache in the DB (which will also provide us with
        # historical previews, if we have any)
        cache_result = yield self.store.get_url_cache(url, ts)
        if (cache_result
                and cache_result["download_ts"] + cache_result["expires"] > ts
                and cache_result["response_code"] / 100 == 2):
            respond_with_json_bytes(request,
                                    200,
                                    cache_result["og"].encode('utf-8'),
                                    send_cors=True)
            return

        # Ensure only one download for a given URL is active at a time
        download = self.downloads.get(url)
        if download is None:
            download = self._download_url(url, requester.user)
            download = ObservableDeferred(download, consumeErrors=True)
            self.downloads[url] = download

            @download.addBoth
            def callback(media_info):
                del self.downloads[url]
                return media_info

        media_info = yield download.observe()

        # FIXME: we should probably update our cache now anyway, so that
        # even if the OG calculation raises, we don't keep hammering on the
        # remote server.  For now, leave it uncached to aid debugging OG
        # calculation problems

        logger.debug("got media_info of '%s'" % media_info)

        if _is_media(media_info['media_type']):
            dims = yield self.media_repo._generate_local_thumbnails(
                media_info['filesystem_id'],
                media_info,
                url_cache=True,
            )

            og = {
                "og:description":
                media_info['download_name'],
                "og:image":
                "mxc://%s/%s" %
                (self.server_name, media_info['filesystem_id']),
                "og:image:type":
                media_info['media_type'],
                "matrix:image:size":
                media_info['media_length'],
            }

            if dims:
                og["og:image:width"] = dims['width']
                og["og:image:height"] = dims['height']
            else:
                logger.warn("Couldn't get dims for %s" % url)

            # define our OG response for this media
        elif _is_html(media_info['media_type']):
            # TODO: somehow stop a big HTML tree from exploding synapse's RAM

            file = open(media_info['filename'])
            body = file.read()
            file.close()

            # clobber the encoding from the content-type, or default to utf-8
            # XXX: this overrides any <meta/> or XML charset headers in the body
            # which may pose problems, but so far seems to work okay.
            match = re.match(r'.*; *charset=(.*?)(;|$)',
                             media_info['media_type'], re.I)
            encoding = match.group(1) if match else "utf-8"

            og = decode_and_calc_og(body, media_info['uri'], encoding)

            # pre-cache the image for posterity
            # FIXME: it might be cleaner to use the same flow as the main /preview_url
            # request itself and benefit from the same caching etc.  But for now we
            # just rely on the caching on the master request to speed things up.
            if 'og:image' in og and og['og:image']:
                image_info = yield self._download_url(
                    _rebase_url(og['og:image'], media_info['uri']),
                    requester.user)

                if _is_media(image_info['media_type']):
                    # TODO: make sure we don't choke on white-on-transparent images
                    dims = yield self.media_repo._generate_local_thumbnails(
                        image_info['filesystem_id'],
                        image_info,
                        url_cache=True,
                    )
                    if dims:
                        og["og:image:width"] = dims['width']
                        og["og:image:height"] = dims['height']
                    else:
                        logger.warn("Couldn't get dims for %s" %
                                    og["og:image"])

                    og["og:image"] = "mxc://%s/%s" % (
                        self.server_name, image_info['filesystem_id'])
                    og["og:image:type"] = image_info['media_type']
                    og["matrix:image:size"] = image_info['media_length']
                else:
                    del og["og:image"]
        else:
            logger.warn("Failed to find any OG data in %s", url)
            og = {}

        logger.debug("Calculated OG for %s as %s" % (url, og))

        # store OG in ephemeral in-memory cache
        self.cache[url] = og

        # store OG in history-aware DB cache
        yield self.store.store_url_cache(
            url,
            media_info["response_code"],
            media_info["etag"],
            media_info["expires"],
            json.dumps(og),
            media_info["filesystem_id"],
            media_info["created_ts"],
        )

        respond_with_json_bytes(request, 200, json.dumps(og), send_cors=True)
예제 #15
0
 def render_OPTIONS(self, request):
     respond_with_json_bytes(request, 200, {}, send_cors=True)
     return server.NOT_DONE_YET
예제 #16
0
    def query_keys(self, request, query, query_remote_on_cache_miss=False):
        logger.info("Handling query for keys %r", query)
        store_queries = []
        for server_name, key_ids in query.items():
            if not key_ids:
                key_ids = (None,)
            for key_id in key_ids:
                store_queries.append((server_name, key_id, None))

        cached = yield self.store.get_server_keys_json(store_queries)

        json_results = set()

        time_now_ms = self.clock.time_msec()

        cache_misses = dict()
        for (server_name, key_id, from_server), results in cached.items():
            results = [
                (result["ts_added_ms"], result) for result in results
            ]

            if not results and key_id is not None:
                cache_misses.setdefault(server_name, set()).add(key_id)
                continue

            if key_id is not None:
                ts_added_ms, most_recent_result = max(results)
                ts_valid_until_ms = most_recent_result["ts_valid_until_ms"]
                req_key = query.get(server_name, {}).get(key_id, {})
                req_valid_until = req_key.get("minimum_valid_until_ts")
                miss = False
                if req_valid_until is not None:
                    if ts_valid_until_ms < req_valid_until:
                        logger.debug(
                            "Cached response for %r/%r is older than requested"
                            ": valid_until (%r) < minimum_valid_until (%r)",
                            server_name, key_id,
                            ts_valid_until_ms, req_valid_until
                        )
                        miss = True
                    else:
                        logger.debug(
                            "Cached response for %r/%r is newer than requested"
                            ": valid_until (%r) >= minimum_valid_until (%r)",
                            server_name, key_id,
                            ts_valid_until_ms, req_valid_until
                        )
                elif (ts_added_ms + ts_valid_until_ms) / 2 < time_now_ms:
                    logger.debug(
                        "Cached response for %r/%r is too old"
                        ": (added (%r) + valid_until (%r)) / 2 < now (%r)",
                        server_name, key_id,
                        ts_added_ms, ts_valid_until_ms, time_now_ms
                    )
                    # We more than half way through the lifetime of the
                    # response. We should fetch a fresh copy.
                    miss = True
                else:
                    logger.debug(
                        "Cached response for %r/%r is still valid"
                        ": (added (%r) + valid_until (%r)) / 2 < now (%r)",
                        server_name, key_id,
                        ts_added_ms, ts_valid_until_ms, time_now_ms
                    )

                if miss:
                    cache_misses.setdefault(server_name, set()).add(key_id)
                json_results.add(bytes(most_recent_result["key_json"]))
            else:
                for ts_added, result in results:
                    json_results.add(bytes(result["key_json"]))

        if cache_misses and query_remote_on_cache_miss:
            for server_name, key_ids in cache_misses.items():
                try:
                    yield self.keyring.get_server_verify_key_v2_direct(
                        server_name, key_ids
                    )
                except:
                    logger.exception("Failed to get key for %r", server_name)
                    pass
            yield self.query_keys(
                request, query, query_remote_on_cache_miss=False
            )
        else:
            result_io = BytesIO()
            result_io.write(b"{\"server_keys\":")
            sep = b"["
            for json_bytes in json_results:
                result_io.write(sep)
                result_io.write(json_bytes)
                sep = b","
            if sep == b"[":
                result_io.write(sep)
            result_io.write(b"]}")

            respond_with_json_bytes(
                request, 200, result_io.getvalue(),
                version_string=self.version_string
            )
예제 #17
0
    def query_keys(self, request, query, query_remote_on_cache_miss=False):
        logger.info("Handling query for keys %r", query)

        store_queries = []
        for server_name, key_ids in query.items():
            if (
                self.federation_domain_whitelist is not None
                and server_name not in self.federation_domain_whitelist
            ):
                logger.debug("Federation denied with %s", server_name)
                continue

            if not key_ids:
                key_ids = (None,)
            for key_id in key_ids:
                store_queries.append((server_name, key_id, None))

        cached = yield self.store.get_server_keys_json(store_queries)

        json_results = set()

        time_now_ms = self.clock.time_msec()

        cache_misses = dict()
        for (server_name, key_id, from_server), results in cached.items():
            results = [(result["ts_added_ms"], result) for result in results]

            if not results and key_id is not None:
                cache_misses.setdefault(server_name, set()).add(key_id)
                continue

            if key_id is not None:
                ts_added_ms, most_recent_result = max(results)
                ts_valid_until_ms = most_recent_result["ts_valid_until_ms"]
                req_key = query.get(server_name, {}).get(key_id, {})
                req_valid_until = req_key.get("minimum_valid_until_ts")
                miss = False
                if req_valid_until is not None:
                    if ts_valid_until_ms < req_valid_until:
                        logger.debug(
                            "Cached response for %r/%r is older than requested"
                            ": valid_until (%r) < minimum_valid_until (%r)",
                            server_name,
                            key_id,
                            ts_valid_until_ms,
                            req_valid_until,
                        )
                        miss = True
                    else:
                        logger.debug(
                            "Cached response for %r/%r is newer than requested"
                            ": valid_until (%r) >= minimum_valid_until (%r)",
                            server_name,
                            key_id,
                            ts_valid_until_ms,
                            req_valid_until,
                        )
                elif (ts_added_ms + ts_valid_until_ms) / 2 < time_now_ms:
                    logger.debug(
                        "Cached response for %r/%r is too old"
                        ": (added (%r) + valid_until (%r)) / 2 < now (%r)",
                        server_name,
                        key_id,
                        ts_added_ms,
                        ts_valid_until_ms,
                        time_now_ms,
                    )
                    # We more than half way through the lifetime of the
                    # response. We should fetch a fresh copy.
                    miss = True
                else:
                    logger.debug(
                        "Cached response for %r/%r is still valid"
                        ": (added (%r) + valid_until (%r)) / 2 < now (%r)",
                        server_name,
                        key_id,
                        ts_added_ms,
                        ts_valid_until_ms,
                        time_now_ms,
                    )

                if miss:
                    cache_misses.setdefault(server_name, set()).add(key_id)
                json_results.add(bytes(most_recent_result["key_json"]))
            else:
                for ts_added, result in results:
                    json_results.add(bytes(result["key_json"]))

        if cache_misses and query_remote_on_cache_miss:
            yield self.fetcher.get_keys(cache_misses)
            yield self.query_keys(request, query, query_remote_on_cache_miss=False)
        else:
            signed_keys = []
            for key_json in json_results:
                key_json = json.loads(key_json)
                for signing_key in self.config.key_server_signing_keys:
                    key_json = sign_json(key_json, self.config.server_name, signing_key)

                signed_keys.append(key_json)

            results = {"server_keys": signed_keys}

            respond_with_json_bytes(request, 200, encode_canonical_json(results))
예제 #18
0
 def render_GET(self, request):
     return respond_with_json_bytes(request, 200, self.response_body)
예제 #19
0
 def render_GET(self, request):
     return respond_with_json_bytes(request, 200, self.response_body)
    def _async_render_GET(self, request):

        # XXX: if get_user_by_req fails, what should we do in an async render?
        requester = yield self.auth.get_user_by_req(request)
        url = parse_string(request, "url")
        if b"ts" in request.args:
            ts = parse_integer(request, "ts")
        else:
            ts = self.clock.time_msec()

        # XXX: we could move this into _do_preview if we wanted.
        url_tuple = urlparse.urlsplit(url)
        for entry in self.url_preview_url_blacklist:
            match = True
            for attrib in entry:
                pattern = entry[attrib]
                value = getattr(url_tuple, attrib)
                logger.debug((
                    "Matching attrib '%s' with value '%s' against"
                    " pattern '%s'"
                ) % (attrib, value, pattern))

                if value is None:
                    match = False
                    continue

                if pattern.startswith('^'):
                    if not re.match(pattern, getattr(url_tuple, attrib)):
                        match = False
                        continue
                else:
                    if not fnmatch.fnmatch(getattr(url_tuple, attrib), pattern):
                        match = False
                        continue
            if match:
                logger.warn(
                    "URL %s blocked by url_blacklist entry %s", url, entry
                )
                raise SynapseError(
                    403, "URL blocked by url pattern blacklist entry",
                    Codes.UNKNOWN
                )

        # the in-memory cache:
        # * ensures that only one request is active at a time
        # * takes load off the DB for the thundering herds
        # * also caches any failures (unlike the DB) so we don't keep
        #    requesting the same endpoint

        observable = self._cache.get(url)

        if not observable:
            download = run_in_background(
                self._do_preview,
                url, requester.user, ts,
            )
            observable = ObservableDeferred(
                download,
                consumeErrors=True
            )
            self._cache[url] = observable
        else:
            logger.info("Returning cached response")

        og = yield make_deferred_yieldable(observable.observe())
        respond_with_json_bytes(request, 200, og, send_cors=True)
예제 #21
0
 def render_OPTIONS(self, request):
     respond_with_json_bytes(request, 200, {}, send_cors=True)
     return server.NOT_DONE_YET