class PreviewUrlResource(Resource):
    isLeaf = True

    def __init__(self, hs, media_repo):
        Resource.__init__(self)

        self.auth = hs.get_auth()
        self.clock = hs.get_clock()
        self.version_string = hs.version_string
        self.filepaths = media_repo.filepaths
        self.max_spider_size = hs.config.max_spider_size
        self.server_name = hs.hostname
        self.store = hs.get_datastore()
        self.client = SpiderHttpClient(hs)
        self.media_repo = media_repo

        self.url_preview_url_blacklist = hs.config.url_preview_url_blacklist

        # simple memory cache mapping urls to OG metadata
        self.cache = ExpiringCache(
            cache_name="url_previews",
            clock=self.clock,
            # don't spider URLs more often than once an hour
            expiry_ms=60 * 60 * 1000,
        )
        self.cache.start()

        self.downloads = {}

    def render_GET(self, request):
        self._async_render_GET(request)
        return NOT_DONE_YET

    @request_handler()
    @defer.inlineCallbacks
    def _async_render_GET(self, request):

        # XXX: if get_user_by_req fails, what should we do in an async render?
        requester = yield self.auth.get_user_by_req(request)
        url = request.args.get("url")[0]
        if "ts" in request.args:
            ts = int(request.args.get("ts")[0])
        else:
            ts = self.clock.time_msec()

        url_tuple = urlparse.urlsplit(url)
        for entry in self.url_preview_url_blacklist:
            match = True
            for attrib in entry:
                pattern = entry[attrib]
                value = getattr(url_tuple, attrib)
                logger.debug(("Matching attrib '%s' with value '%s' against"
                              " pattern '%s'") % (attrib, value, pattern))

                if value is None:
                    match = False
                    continue

                if pattern.startswith('^'):
                    if not re.match(pattern, getattr(url_tuple, attrib)):
                        match = False
                        continue
                else:
                    if not fnmatch.fnmatch(getattr(url_tuple, attrib),
                                           pattern):
                        match = False
                        continue
            if match:
                logger.warn("URL %s blocked by url_blacklist entry %s", url,
                            entry)
                raise SynapseError(
                    403, "URL blocked by url pattern blacklist entry",
                    Codes.UNKNOWN)

        # first check the memory cache - good to handle all the clients on this
        # HS thundering away to preview the same URL at the same time.
        og = self.cache.get(url)
        if og:
            respond_with_json_bytes(request,
                                    200,
                                    json.dumps(og),
                                    send_cors=True)
            return

        # then check the URL cache in the DB (which will also provide us with
        # historical previews, if we have any)
        cache_result = yield self.store.get_url_cache(url, ts)
        if (cache_result
                and cache_result["download_ts"] + cache_result["expires"] > ts
                and cache_result["response_code"] / 100 == 2):
            respond_with_json_bytes(request,
                                    200,
                                    cache_result["og"].encode('utf-8'),
                                    send_cors=True)
            return

        # Ensure only one download for a given URL is active at a time
        download = self.downloads.get(url)
        if download is None:
            download = self._download_url(url, requester.user)
            download = ObservableDeferred(download, consumeErrors=True)
            self.downloads[url] = download

            @download.addBoth
            def callback(media_info):
                del self.downloads[url]
                return media_info

        media_info = yield download.observe()

        # FIXME: we should probably update our cache now anyway, so that
        # even if the OG calculation raises, we don't keep hammering on the
        # remote server.  For now, leave it uncached to aid debugging OG
        # calculation problems

        logger.debug("got media_info of '%s'" % media_info)

        if _is_media(media_info['media_type']):
            dims = yield self.media_repo._generate_local_thumbnails(
                media_info['filesystem_id'],
                media_info,
                url_cache=True,
            )

            og = {
                "og:description":
                media_info['download_name'],
                "og:image":
                "mxc://%s/%s" %
                (self.server_name, media_info['filesystem_id']),
                "og:image:type":
                media_info['media_type'],
                "matrix:image:size":
                media_info['media_length'],
            }

            if dims:
                og["og:image:width"] = dims['width']
                og["og:image:height"] = dims['height']
            else:
                logger.warn("Couldn't get dims for %s" % url)

            # define our OG response for this media
        elif _is_html(media_info['media_type']):
            # TODO: somehow stop a big HTML tree from exploding synapse's RAM

            file = open(media_info['filename'])
            body = file.read()
            file.close()

            # clobber the encoding from the content-type, or default to utf-8
            # XXX: this overrides any <meta/> or XML charset headers in the body
            # which may pose problems, but so far seems to work okay.
            match = re.match(r'.*; *charset=(.*?)(;|$)',
                             media_info['media_type'], re.I)
            encoding = match.group(1) if match else "utf-8"

            og = decode_and_calc_og(body, media_info['uri'], encoding)

            # pre-cache the image for posterity
            # FIXME: it might be cleaner to use the same flow as the main /preview_url
            # request itself and benefit from the same caching etc.  But for now we
            # just rely on the caching on the master request to speed things up.
            if 'og:image' in og and og['og:image']:
                image_info = yield self._download_url(
                    _rebase_url(og['og:image'], media_info['uri']),
                    requester.user)

                if _is_media(image_info['media_type']):
                    # TODO: make sure we don't choke on white-on-transparent images
                    dims = yield self.media_repo._generate_local_thumbnails(
                        image_info['filesystem_id'],
                        image_info,
                        url_cache=True,
                    )
                    if dims:
                        og["og:image:width"] = dims['width']
                        og["og:image:height"] = dims['height']
                    else:
                        logger.warn("Couldn't get dims for %s" %
                                    og["og:image"])

                    og["og:image"] = "mxc://%s/%s" % (
                        self.server_name, image_info['filesystem_id'])
                    og["og:image:type"] = image_info['media_type']
                    og["matrix:image:size"] = image_info['media_length']
                else:
                    del og["og:image"]
        else:
            logger.warn("Failed to find any OG data in %s", url)
            og = {}

        logger.debug("Calculated OG for %s as %s" % (url, og))

        # store OG in ephemeral in-memory cache
        self.cache[url] = og

        # store OG in history-aware DB cache
        yield self.store.store_url_cache(
            url,
            media_info["response_code"],
            media_info["etag"],
            media_info["expires"],
            json.dumps(og),
            media_info["filesystem_id"],
            media_info["created_ts"],
        )

        respond_with_json_bytes(request, 200, json.dumps(og), send_cors=True)

    @defer.inlineCallbacks
    def _download_url(self, url, user):
        # TODO: we should probably honour robots.txt... except in practice
        # we're most likely being explicitly triggered by a human rather than a
        # bot, so are we really a robot?

        # XXX: horrible duplication with base_resource's _download_remote_file()
        file_id = random_string(24)

        fname = self.filepaths.url_cache_filepath(file_id)
        self.media_repo._makedirs(fname)

        try:
            with open(fname, "wb") as f:
                logger.debug("Trying to get url '%s'" % url)
                length, headers, uri, code = yield self.client.get_file(
                    url,
                    output_stream=f,
                    max_size=self.max_spider_size,
                )
                # FIXME: pass through 404s and other error messages nicely

            media_type = headers["Content-Type"][0]
            time_now_ms = self.clock.time_msec()

            content_disposition = headers.get("Content-Disposition", None)
            if content_disposition:
                _, params = cgi.parse_header(content_disposition[0], )
                download_name = None

                # First check if there is a valid UTF-8 filename
                download_name_utf8 = params.get("filename*", None)
                if download_name_utf8:
                    if download_name_utf8.lower().startswith("utf-8''"):
                        download_name = download_name_utf8[7:]

                # If there isn't check for an ascii name.
                if not download_name:
                    download_name_ascii = params.get("filename", None)
                    if download_name_ascii and is_ascii(download_name_ascii):
                        download_name = download_name_ascii

                if download_name:
                    download_name = urlparse.unquote(download_name)
                    try:
                        download_name = download_name.decode("utf-8")
                    except UnicodeDecodeError:
                        download_name = None
            else:
                download_name = None

            yield self.store.store_local_media(
                media_id=file_id,
                media_type=media_type,
                time_now_ms=self.clock.time_msec(),
                upload_name=download_name,
                media_length=length,
                user_id=user,
                url_cache=url,
            )

        except Exception as e:
            os.remove(fname)
            raise SynapseError(500, ("Failed to download content: %s" % e),
                               Codes.UNKNOWN)

        defer.returnValue({
            "media_type":
            media_type,
            "media_length":
            length,
            "download_name":
            download_name,
            "created_ts":
            time_now_ms,
            "filesystem_id":
            file_id,
            "filename":
            fname,
            "uri":
            uri,
            "response_code":
            code,
            # FIXME: we should calculate a proper expiration based on the
            # Cache-Control and Expire headers.  But for now, assume 1 hour.
            "expires":
            60 * 60 * 1000,
            "etag":
            headers["ETag"][0] if "ETag" in headers else None,
        })
示例#2
0
class PreviewUrlResource(Resource):
    isLeaf = True

    def __init__(self, hs, media_repo):
        Resource.__init__(self)

        self.auth = hs.get_auth()
        self.clock = hs.get_clock()
        self.version_string = hs.version_string
        self.filepaths = media_repo.filepaths
        self.max_spider_size = hs.config.max_spider_size
        self.server_name = hs.hostname
        self.store = hs.get_datastore()
        self.client = SpiderHttpClient(hs)
        self.media_repo = media_repo

        self.url_preview_url_blacklist = hs.config.url_preview_url_blacklist

        # simple memory cache mapping urls to OG metadata
        self.cache = ExpiringCache(
            cache_name="url_previews",
            clock=self.clock,
            # don't spider URLs more often than once an hour
            expiry_ms=60 * 60 * 1000,
        )
        self.cache.start()

        self.downloads = {}

    def render_GET(self, request):
        self._async_render_GET(request)
        return NOT_DONE_YET

    @request_handler()
    @defer.inlineCallbacks
    def _async_render_GET(self, request):

        # XXX: if get_user_by_req fails, what should we do in an async render?
        requester = yield self.auth.get_user_by_req(request)
        url = request.args.get("url")[0]
        if "ts" in request.args:
            ts = int(request.args.get("ts")[0])
        else:
            ts = self.clock.time_msec()

        url_tuple = urlparse.urlsplit(url)
        for entry in self.url_preview_url_blacklist:
            match = True
            for attrib in entry:
                pattern = entry[attrib]
                value = getattr(url_tuple, attrib)
                logger.debug((
                    "Matching attrib '%s' with value '%s' against"
                    " pattern '%s'"
                ) % (attrib, value, pattern))

                if value is None:
                    match = False
                    continue

                if pattern.startswith('^'):
                    if not re.match(pattern, getattr(url_tuple, attrib)):
                        match = False
                        continue
                else:
                    if not fnmatch.fnmatch(getattr(url_tuple, attrib), pattern):
                        match = False
                        continue
            if match:
                logger.warn(
                    "URL %s blocked by url_blacklist entry %s", url, entry
                )
                raise SynapseError(
                    403, "URL blocked by url pattern blacklist entry",
                    Codes.UNKNOWN
                )

        # first check the memory cache - good to handle all the clients on this
        # HS thundering away to preview the same URL at the same time.
        og = self.cache.get(url)
        if og:
            respond_with_json_bytes(request, 200, json.dumps(og), send_cors=True)
            return

        # then check the URL cache in the DB (which will also provide us with
        # historical previews, if we have any)
        cache_result = yield self.store.get_url_cache(url, ts)
        if (
            cache_result and
            cache_result["download_ts"] + cache_result["expires"] > ts and
            cache_result["response_code"] / 100 == 2
        ):
            respond_with_json_bytes(
                request, 200, cache_result["og"].encode('utf-8'),
                send_cors=True
            )
            return

        # Ensure only one download for a given URL is active at a time
        download = self.downloads.get(url)
        if download is None:
            download = self._download_url(url, requester.user)
            download = ObservableDeferred(
                download,
                consumeErrors=True
            )
            self.downloads[url] = download

            @download.addBoth
            def callback(media_info):
                del self.downloads[url]
                return media_info
        media_info = yield download.observe()

        # FIXME: we should probably update our cache now anyway, so that
        # even if the OG calculation raises, we don't keep hammering on the
        # remote server.  For now, leave it uncached to aid debugging OG
        # calculation problems

        logger.debug("got media_info of '%s'" % media_info)

        if self._is_media(media_info['media_type']):
            dims = yield self.media_repo._generate_local_thumbnails(
                media_info['filesystem_id'], media_info
            )

            og = {
                "og:description": media_info['download_name'],
                "og:image": "mxc://%s/%s" % (
                    self.server_name, media_info['filesystem_id']
                ),
                "og:image:type": media_info['media_type'],
                "matrix:image:size": media_info['media_length'],
            }

            if dims:
                og["og:image:width"] = dims['width']
                og["og:image:height"] = dims['height']
            else:
                logger.warn("Couldn't get dims for %s" % url)

            # define our OG response for this media
        elif self._is_html(media_info['media_type']):
            # TODO: somehow stop a big HTML tree from exploding synapse's RAM

            from lxml import etree

            file = open(media_info['filename'])
            body = file.read()
            file.close()

            # clobber the encoding from the content-type, or default to utf-8
            # XXX: this overrides any <meta/> or XML charset headers in the body
            # which may pose problems, but so far seems to work okay.
            match = re.match(r'.*; *charset=(.*?)(;|$)', media_info['media_type'], re.I)
            encoding = match.group(1) if match else "utf-8"

            try:
                parser = etree.HTMLParser(recover=True, encoding=encoding)
                tree = etree.fromstring(body, parser)
                og = yield self._calc_og(tree, media_info, requester)
            except UnicodeDecodeError:
                # blindly try decoding the body as utf-8, which seems to fix
                # the charset mismatches on https://google.com
                parser = etree.HTMLParser(recover=True, encoding=encoding)
                tree = etree.fromstring(body.decode('utf-8', 'ignore'), parser)
                og = yield self._calc_og(tree, media_info, requester)

        else:
            logger.warn("Failed to find any OG data in %s", url)
            og = {}

        logger.debug("Calculated OG for %s as %s" % (url, og))

        # store OG in ephemeral in-memory cache
        self.cache[url] = og

        # store OG in history-aware DB cache
        yield self.store.store_url_cache(
            url,
            media_info["response_code"],
            media_info["etag"],
            media_info["expires"],
            json.dumps(og),
            media_info["filesystem_id"],
            media_info["created_ts"],
        )

        respond_with_json_bytes(request, 200, json.dumps(og), send_cors=True)

    @defer.inlineCallbacks
    def _calc_og(self, tree, media_info, requester):
        # suck our tree into lxml and define our OG response.

        # if we see any image URLs in the OG response, then spider them
        # (although the client could choose to do this by asking for previews of those
        # URLs to avoid DoSing the server)

        # "og:type"         : "video",
        # "og:url"          : "https://www.youtube.com/watch?v=LXDBoHyjmtw",
        # "og:site_name"    : "YouTube",
        # "og:video:type"   : "application/x-shockwave-flash",
        # "og:description"  : "Fun stuff happening here",
        # "og:title"        : "RemoteJam - Matrix team hack for Disrupt Europe Hackathon",
        # "og:image"        : "https://i.ytimg.com/vi/LXDBoHyjmtw/maxresdefault.jpg",
        # "og:video:url"    : "http://www.youtube.com/v/LXDBoHyjmtw?version=3&autohide=1",
        # "og:video:width"  : "1280"
        # "og:video:height" : "720",
        # "og:video:secure_url": "https://www.youtube.com/v/LXDBoHyjmtw?version=3",

        og = {}
        for tag in tree.xpath("//*/meta[starts-with(@property, 'og:')]"):
            og[tag.attrib['property']] = tag.attrib['content']

        # TODO: grab article: meta tags too, e.g.:

        # "article:publisher" : "https://www.facebook.com/thethudonline" />
        # "article:author" content="https://www.facebook.com/thethudonline" />
        # "article:tag" content="baby" />
        # "article:section" content="Breaking News" />
        # "article:published_time" content="2016-03-31T19:58:24+00:00" />
        # "article:modified_time" content="2016-04-01T18:31:53+00:00" />

        if 'og:title' not in og:
            # do some basic spidering of the HTML
            title = tree.xpath("(//title)[1] | (//h1)[1] | (//h2)[1] | (//h3)[1]")
            og['og:title'] = title[0].text.strip() if title else None

        if 'og:image' not in og:
            # TODO: extract a favicon failing all else
            meta_image = tree.xpath(
                "//*/meta[translate(@itemprop, 'IMAGE', 'image')='image']/@content"
            )
            if meta_image:
                og['og:image'] = self._rebase_url(meta_image[0], media_info['uri'])
            else:
                # TODO: consider inlined CSS styles as well as width & height attribs
                images = tree.xpath("//img[@src][number(@width)>10][number(@height)>10]")
                images = sorted(images, key=lambda i: (
                    -1 * int(i.attrib['width']) * int(i.attrib['height'])
                ))
                if not images:
                    images = tree.xpath("//img[@src]")
                if images:
                    og['og:image'] = images[0].attrib['src']

        # pre-cache the image for posterity
        # FIXME: it might be cleaner to use the same flow as the main /preview_url request
        # itself and benefit from the same caching etc.  But for now we just rely on the
        # caching on the master request to speed things up.
        if 'og:image' in og and og['og:image']:
            image_info = yield self._download_url(
                self._rebase_url(og['og:image'], media_info['uri']), requester.user
            )

            if self._is_media(image_info['media_type']):
                # TODO: make sure we don't choke on white-on-transparent images
                dims = yield self.media_repo._generate_local_thumbnails(
                    image_info['filesystem_id'], image_info
                )
                if dims:
                    og["og:image:width"] = dims['width']
                    og["og:image:height"] = dims['height']
                else:
                    logger.warn("Couldn't get dims for %s" % og["og:image"])

                og["og:image"] = "mxc://%s/%s" % (
                    self.server_name, image_info['filesystem_id']
                )
                og["og:image:type"] = image_info['media_type']
                og["matrix:image:size"] = image_info['media_length']
            else:
                del og["og:image"]

        if 'og:description' not in og:
            meta_description = tree.xpath(
                "//*/meta"
                "[translate(@name, 'DESCRIPTION', 'description')='description']"
                "/@content")
            if meta_description:
                og['og:description'] = meta_description[0]
            else:
                # grab any text nodes which are inside the <body/> tag...
                # unless they are within an HTML5 semantic markup tag...
                # <header/>, <nav/>, <aside/>, <footer/>
                # ...or if they are within a <script/> or <style/> tag.
                # This is a very very very coarse approximation to a plain text
                # render of the page.
                text_nodes = tree.xpath("//text()[not(ancestor::header | ancestor::nav | "
                                        "ancestor::aside | ancestor::footer | "
                                        "ancestor::script | ancestor::style)]" +
                                        "[ancestor::body]")
                text = ''
                for text_node in text_nodes:
                    if len(text) < 500:
                        text += text_node + ' '
                    else:
                        break
                text = re.sub(r'[\t ]+', ' ', text)
                text = re.sub(r'[\t \r\n]*[\r\n]+', '\n', text)
                text = text.strip()[:500]
                og['og:description'] = text if text else None

        # TODO: delete the url downloads to stop diskfilling,
        # as we only ever cared about its OG
        defer.returnValue(og)

    def _rebase_url(self, url, base):
        base = list(urlparse.urlparse(base))
        url = list(urlparse.urlparse(url))
        if not url[0]:  # fix up schema
            url[0] = base[0] or "http"
        if not url[1]:  # fix up hostname
            url[1] = base[1]
            if not url[2].startswith('/'):
                url[2] = re.sub(r'/[^/]+$', '/', base[2]) + url[2]
        return urlparse.urlunparse(url)

    @defer.inlineCallbacks
    def _download_url(self, url, user):
        # TODO: we should probably honour robots.txt... except in practice
        # we're most likely being explicitly triggered by a human rather than a
        # bot, so are we really a robot?

        # XXX: horrible duplication with base_resource's _download_remote_file()
        file_id = random_string(24)

        fname = self.filepaths.local_media_filepath(file_id)
        self.media_repo._makedirs(fname)

        try:
            with open(fname, "wb") as f:
                logger.debug("Trying to get url '%s'" % url)
                length, headers, uri, code = yield self.client.get_file(
                    url, output_stream=f, max_size=self.max_spider_size,
                )
                # FIXME: pass through 404s and other error messages nicely

            media_type = headers["Content-Type"][0]
            time_now_ms = self.clock.time_msec()

            content_disposition = headers.get("Content-Disposition", None)
            if content_disposition:
                _, params = cgi.parse_header(content_disposition[0],)
                download_name = None

                # First check if there is a valid UTF-8 filename
                download_name_utf8 = params.get("filename*", None)
                if download_name_utf8:
                    if download_name_utf8.lower().startswith("utf-8''"):
                        download_name = download_name_utf8[7:]

                # If there isn't check for an ascii name.
                if not download_name:
                    download_name_ascii = params.get("filename", None)
                    if download_name_ascii and is_ascii(download_name_ascii):
                        download_name = download_name_ascii

                if download_name:
                    download_name = urlparse.unquote(download_name)
                    try:
                        download_name = download_name.decode("utf-8")
                    except UnicodeDecodeError:
                        download_name = None
            else:
                download_name = None

            yield self.store.store_local_media(
                media_id=file_id,
                media_type=media_type,
                time_now_ms=self.clock.time_msec(),
                upload_name=download_name,
                media_length=length,
                user_id=user,
            )

        except Exception as e:
            os.remove(fname)
            raise SynapseError(
                500, ("Failed to download content: %s" % e),
                Codes.UNKNOWN
            )

        defer.returnValue({
            "media_type": media_type,
            "media_length": length,
            "download_name": download_name,
            "created_ts": time_now_ms,
            "filesystem_id": file_id,
            "filename": fname,
            "uri": uri,
            "response_code": code,
            # FIXME: we should calculate a proper expiration based on the
            # Cache-Control and Expire headers.  But for now, assume 1 hour.
            "expires": 60 * 60 * 1000,
            "etag": headers["ETag"][0] if "ETag" in headers else None,
        })

    def _is_media(self, content_type):
        if content_type.lower().startswith("image/"):
            return True

    def _is_html(self, content_type):
        content_type = content_type.lower()
        if (
            content_type.startswith("text/html") or
            content_type.startswith("application/xhtml")
        ):
            return True
class PreviewUrlResource(Resource):
    isLeaf = True

    def __init__(self, hs, media_repo):
        Resource.__init__(self)

        self.auth = hs.get_auth()
        self.clock = hs.get_clock()
        self.version_string = hs.version_string
        self.filepaths = media_repo.filepaths
        self.max_spider_size = hs.config.max_spider_size
        self.server_name = hs.hostname
        self.store = hs.get_datastore()
        self.client = SpiderHttpClient(hs)
        self.media_repo = media_repo

        self.url_preview_url_blacklist = hs.config.url_preview_url_blacklist

        # simple memory cache mapping urls to OG metadata
        self.cache = ExpiringCache(
            cache_name="url_previews",
            clock=self.clock,
            # don't spider URLs more often than once an hour
            expiry_ms=60 * 60 * 1000,
        )
        self.cache.start()

        self.downloads = {}

    def render_GET(self, request):
        self._async_render_GET(request)
        return NOT_DONE_YET

    @request_handler()
    @defer.inlineCallbacks
    def _async_render_GET(self, request):

        # XXX: if get_user_by_req fails, what should we do in an async render?
        requester = yield self.auth.get_user_by_req(request)
        url = request.args.get("url")[0]
        if "ts" in request.args:
            ts = int(request.args.get("ts")[0])
        else:
            ts = self.clock.time_msec()

        url_tuple = urlparse.urlsplit(url)
        for entry in self.url_preview_url_blacklist:
            match = True
            for attrib in entry:
                pattern = entry[attrib]
                value = getattr(url_tuple, attrib)
                logger.debug((
                    "Matching attrib '%s' with value '%s' against"
                    " pattern '%s'"
                ) % (attrib, value, pattern))

                if value is None:
                    match = False
                    continue

                if pattern.startswith('^'):
                    if not re.match(pattern, getattr(url_tuple, attrib)):
                        match = False
                        continue
                else:
                    if not fnmatch.fnmatch(getattr(url_tuple, attrib), pattern):
                        match = False
                        continue
            if match:
                logger.warn(
                    "URL %s blocked by url_blacklist entry %s", url, entry
                )
                raise SynapseError(
                    403, "URL blocked by url pattern blacklist entry",
                    Codes.UNKNOWN
                )

        # first check the memory cache - good to handle all the clients on this
        # HS thundering away to preview the same URL at the same time.
        og = self.cache.get(url)
        if og:
            respond_with_json_bytes(request, 200, json.dumps(og), send_cors=True)
            return

        # then check the URL cache in the DB (which will also provide us with
        # historical previews, if we have any)
        cache_result = yield self.store.get_url_cache(url, ts)
        if (
            cache_result and
            cache_result["download_ts"] + cache_result["expires"] > ts and
            cache_result["response_code"] / 100 == 2
        ):
            respond_with_json_bytes(
                request, 200, cache_result["og"].encode('utf-8'),
                send_cors=True
            )
            return

        # Ensure only one download for a given URL is active at a time
        download = self.downloads.get(url)
        if download is None:
            download = self._download_url(url, requester.user)
            download = ObservableDeferred(
                download,
                consumeErrors=True
            )
            self.downloads[url] = download

            @download.addBoth
            def callback(media_info):
                del self.downloads[url]
                return media_info
        media_info = yield download.observe()

        # FIXME: we should probably update our cache now anyway, so that
        # even if the OG calculation raises, we don't keep hammering on the
        # remote server.  For now, leave it uncached to aid debugging OG
        # calculation problems

        logger.debug("got media_info of '%s'" % media_info)

        if self._is_media(media_info['media_type']):
            dims = yield self.media_repo._generate_local_thumbnails(
                media_info['filesystem_id'], media_info
            )

            og = {
                "og:description": media_info['download_name'],
                "og:image": "mxc://%s/%s" % (
                    self.server_name, media_info['filesystem_id']
                ),
                "og:image:type": media_info['media_type'],
                "matrix:image:size": media_info['media_length'],
            }

            if dims:
                og["og:image:width"] = dims['width']
                og["og:image:height"] = dims['height']
            else:
                logger.warn("Couldn't get dims for %s" % url)

            # define our OG response for this media
        elif self._is_html(media_info['media_type']):
            # TODO: somehow stop a big HTML tree from exploding synapse's RAM

            from lxml import etree

            file = open(media_info['filename'])
            body = file.read()
            file.close()

            # clobber the encoding from the content-type, or default to utf-8
            # XXX: this overrides any <meta/> or XML charset headers in the body
            # which may pose problems, but so far seems to work okay.
            match = re.match(r'.*; *charset=(.*?)(;|$)', media_info['media_type'], re.I)
            encoding = match.group(1) if match else "utf-8"

            try:
                parser = etree.HTMLParser(recover=True, encoding=encoding)
                tree = etree.fromstring(body, parser)
                og = yield self._calc_og(tree, media_info, requester)
            except UnicodeDecodeError:
                # blindly try decoding the body as utf-8, which seems to fix
                # the charset mismatches on https://google.com
                parser = etree.HTMLParser(recover=True, encoding=encoding)
                tree = etree.fromstring(body.decode('utf-8', 'ignore'), parser)
                og = yield self._calc_og(tree, media_info, requester)

        else:
            logger.warn("Failed to find any OG data in %s", url)
            og = {}

        logger.debug("Calculated OG for %s as %s" % (url, og))

        # store OG in ephemeral in-memory cache
        self.cache[url] = og

        # store OG in history-aware DB cache
        yield self.store.store_url_cache(
            url,
            media_info["response_code"],
            media_info["etag"],
            media_info["expires"],
            json.dumps(og),
            media_info["filesystem_id"],
            media_info["created_ts"],
        )

        respond_with_json_bytes(request, 200, json.dumps(og), send_cors=True)

    @defer.inlineCallbacks
    def _calc_og(self, tree, media_info, requester):
        # suck our tree into lxml and define our OG response.

        # if we see any image URLs in the OG response, then spider them
        # (although the client could choose to do this by asking for previews of those
        # URLs to avoid DoSing the server)

        # "og:type"         : "video",
        # "og:url"          : "https://www.youtube.com/watch?v=LXDBoHyjmtw",
        # "og:site_name"    : "YouTube",
        # "og:video:type"   : "application/x-shockwave-flash",
        # "og:description"  : "Fun stuff happening here",
        # "og:title"        : "RemoteJam - Matrix team hack for Disrupt Europe Hackathon",
        # "og:image"        : "https://i.ytimg.com/vi/LXDBoHyjmtw/maxresdefault.jpg",
        # "og:video:url"    : "http://www.youtube.com/v/LXDBoHyjmtw?version=3&autohide=1",
        # "og:video:width"  : "1280"
        # "og:video:height" : "720",
        # "og:video:secure_url": "https://www.youtube.com/v/LXDBoHyjmtw?version=3",

        og = {}
        for tag in tree.xpath("//*/meta[starts-with(@property, 'og:')]"):
            if 'content' in tag.attrib:
                og[tag.attrib['property']] = tag.attrib['content']

        # TODO: grab article: meta tags too, e.g.:

        # "article:publisher" : "https://www.facebook.com/thethudonline" />
        # "article:author" content="https://www.facebook.com/thethudonline" />
        # "article:tag" content="baby" />
        # "article:section" content="Breaking News" />
        # "article:published_time" content="2016-03-31T19:58:24+00:00" />
        # "article:modified_time" content="2016-04-01T18:31:53+00:00" />

        if 'og:title' not in og:
            # do some basic spidering of the HTML
            title = tree.xpath("(//title)[1] | (//h1)[1] | (//h2)[1] | (//h3)[1]")
            og['og:title'] = title[0].text.strip() if title else None

        if 'og:image' not in og:
            # TODO: extract a favicon failing all else
            meta_image = tree.xpath(
                "//*/meta[translate(@itemprop, 'IMAGE', 'image')='image']/@content"
            )
            if meta_image:
                og['og:image'] = self._rebase_url(meta_image[0], media_info['uri'])
            else:
                # TODO: consider inlined CSS styles as well as width & height attribs
                images = tree.xpath("//img[@src][number(@width)>10][number(@height)>10]")
                images = sorted(images, key=lambda i: (
                    -1 * float(i.attrib['width']) * float(i.attrib['height'])
                ))
                if not images:
                    images = tree.xpath("//img[@src]")
                if images:
                    og['og:image'] = images[0].attrib['src']

        # pre-cache the image for posterity
        # FIXME: it might be cleaner to use the same flow as the main /preview_url
        # request itself and benefit from the same caching etc.  But for now we
        # just rely on the caching on the master request to speed things up.
        if 'og:image' in og and og['og:image']:
            image_info = yield self._download_url(
                self._rebase_url(og['og:image'], media_info['uri']), requester.user
            )

            if self._is_media(image_info['media_type']):
                # TODO: make sure we don't choke on white-on-transparent images
                dims = yield self.media_repo._generate_local_thumbnails(
                    image_info['filesystem_id'], image_info
                )
                if dims:
                    og["og:image:width"] = dims['width']
                    og["og:image:height"] = dims['height']
                else:
                    logger.warn("Couldn't get dims for %s" % og["og:image"])

                og["og:image"] = "mxc://%s/%s" % (
                    self.server_name, image_info['filesystem_id']
                )
                og["og:image:type"] = image_info['media_type']
                og["matrix:image:size"] = image_info['media_length']
            else:
                del og["og:image"]

        if 'og:description' not in og:
            meta_description = tree.xpath(
                "//*/meta"
                "[translate(@name, 'DESCRIPTION', 'description')='description']"
                "/@content")
            if meta_description:
                og['og:description'] = meta_description[0]
            else:
                # grab any text nodes which are inside the <body/> tag...
                # unless they are within an HTML5 semantic markup tag...
                # <header/>, <nav/>, <aside/>, <footer/>
                # ...or if they are within a <script/> or <style/> tag.
                # This is a very very very coarse approximation to a plain text
                # render of the page.
                text_nodes = tree.xpath("//text()[not(ancestor::header | ancestor::nav | "
                                        "ancestor::aside | ancestor::footer | "
                                        "ancestor::script | ancestor::style)]" +
                                        "[ancestor::body]")
                text = ''
                for text_node in text_nodes:
                    if len(text) < 500:
                        text += text_node + ' '
                    else:
                        break
                text = re.sub(r'[\t ]+', ' ', text)
                text = re.sub(r'[\t \r\n]*[\r\n]+', '\n', text)
                text = text.strip()[:500]
                og['og:description'] = text if text else None

        # TODO: delete the url downloads to stop diskfilling,
        # as we only ever cared about its OG
        defer.returnValue(og)

    def _rebase_url(self, url, base):
        base = list(urlparse.urlparse(base))
        url = list(urlparse.urlparse(url))
        if not url[0]:  # fix up schema
            url[0] = base[0] or "http"
        if not url[1]:  # fix up hostname
            url[1] = base[1]
            if not url[2].startswith('/'):
                url[2] = re.sub(r'/[^/]+$', '/', base[2]) + url[2]
        return urlparse.urlunparse(url)

    @defer.inlineCallbacks
    def _download_url(self, url, user):
        # TODO: we should probably honour robots.txt... except in practice
        # we're most likely being explicitly triggered by a human rather than a
        # bot, so are we really a robot?

        # XXX: horrible duplication with base_resource's _download_remote_file()
        file_id = random_string(24)

        fname = self.filepaths.local_media_filepath(file_id)
        self.media_repo._makedirs(fname)

        try:
            with open(fname, "wb") as f:
                logger.debug("Trying to get url '%s'" % url)
                length, headers, uri, code = yield self.client.get_file(
                    url, output_stream=f, max_size=self.max_spider_size,
                )
                # FIXME: pass through 404s and other error messages nicely

            media_type = headers["Content-Type"][0]
            time_now_ms = self.clock.time_msec()

            content_disposition = headers.get("Content-Disposition", None)
            if content_disposition:
                _, params = cgi.parse_header(content_disposition[0],)
                download_name = None

                # First check if there is a valid UTF-8 filename
                download_name_utf8 = params.get("filename*", None)
                if download_name_utf8:
                    if download_name_utf8.lower().startswith("utf-8''"):
                        download_name = download_name_utf8[7:]

                # If there isn't check for an ascii name.
                if not download_name:
                    download_name_ascii = params.get("filename", None)
                    if download_name_ascii and is_ascii(download_name_ascii):
                        download_name = download_name_ascii

                if download_name:
                    download_name = urlparse.unquote(download_name)
                    try:
                        download_name = download_name.decode("utf-8")
                    except UnicodeDecodeError:
                        download_name = None
            else:
                download_name = None

            yield self.store.store_local_media(
                media_id=file_id,
                media_type=media_type,
                time_now_ms=self.clock.time_msec(),
                upload_name=download_name,
                media_length=length,
                user_id=user,
            )

        except Exception as e:
            os.remove(fname)
            raise SynapseError(
                500, ("Failed to download content: %s" % e),
                Codes.UNKNOWN
            )

        defer.returnValue({
            "media_type": media_type,
            "media_length": length,
            "download_name": download_name,
            "created_ts": time_now_ms,
            "filesystem_id": file_id,
            "filename": fname,
            "uri": uri,
            "response_code": code,
            # FIXME: we should calculate a proper expiration based on the
            # Cache-Control and Expire headers.  But for now, assume 1 hour.
            "expires": 60 * 60 * 1000,
            "etag": headers["ETag"][0] if "ETag" in headers else None,
        })

    def _is_media(self, content_type):
        if content_type.lower().startswith("image/"):
            return True

    def _is_html(self, content_type):
        content_type = content_type.lower()
        if (
            content_type.startswith("text/html") or
            content_type.startswith("application/xhtml")
        ):
            return True
示例#4
0
class PreviewUrlResource(Resource):
    isLeaf = True

    def __init__(self, hs, media_repo):
        Resource.__init__(self)

        self.auth = hs.get_auth()
        self.clock = hs.get_clock()
        self.version_string = hs.version_string
        self.filepaths = media_repo.filepaths
        self.max_spider_size = hs.config.max_spider_size
        self.server_name = hs.hostname
        self.store = hs.get_datastore()
        self.client = SpiderHttpClient(hs)
        self.media_repo = media_repo
        self.primary_base_path = media_repo.primary_base_path

        self.url_preview_url_blacklist = hs.config.url_preview_url_blacklist

        # memory cache mapping urls to an ObservableDeferred returning
        # JSON-encoded OG metadata
        self._cache = ExpiringCache(
            cache_name="url_previews",
            clock=self.clock,
            # don't spider URLs more often than once an hour
            expiry_ms=60 * 60 * 1000,
        )
        self._cache.start()

        self._cleaner_loop = self.clock.looping_call(
            self._expire_url_cache_data, 10 * 1000
        )

    def render_OPTIONS(self, request):
        return respond_with_json(request, 200, {}, send_cors=True)

    def render_GET(self, request):
        self._async_render_GET(request)
        return NOT_DONE_YET

    @request_handler()
    @defer.inlineCallbacks
    def _async_render_GET(self, request):

        # XXX: if get_user_by_req fails, what should we do in an async render?
        requester = yield self.auth.get_user_by_req(request)
        url = request.args.get("url")[0]
        if "ts" in request.args:
            ts = int(request.args.get("ts")[0])
        else:
            ts = self.clock.time_msec()

        # XXX: we could move this into _do_preview if we wanted.
        url_tuple = urlparse.urlsplit(url)
        for entry in self.url_preview_url_blacklist:
            match = True
            for attrib in entry:
                pattern = entry[attrib]
                value = getattr(url_tuple, attrib)
                logger.debug((
                    "Matching attrib '%s' with value '%s' against"
                    " pattern '%s'"
                ) % (attrib, value, pattern))

                if value is None:
                    match = False
                    continue

                if pattern.startswith('^'):
                    if not re.match(pattern, getattr(url_tuple, attrib)):
                        match = False
                        continue
                else:
                    if not fnmatch.fnmatch(getattr(url_tuple, attrib), pattern):
                        match = False
                        continue
            if match:
                logger.warn(
                    "URL %s blocked by url_blacklist entry %s", url, entry
                )
                raise SynapseError(
                    403, "URL blocked by url pattern blacklist entry",
                    Codes.UNKNOWN
                )

        # the in-memory cache:
        # * ensures that only one request is active at a time
        # * takes load off the DB for the thundering herds
        # * also caches any failures (unlike the DB) so we don't keep
        #    requesting the same endpoint

        observable = self._cache.get(url)

        if not observable:
            download = preserve_fn(self._do_preview)(
                url, requester.user, ts,
            )
            observable = ObservableDeferred(
                download,
                consumeErrors=True
            )
            self._cache[url] = observable
        else:
            logger.info("Returning cached response")

        og = yield make_deferred_yieldable(observable.observe())
        respond_with_json_bytes(request, 200, og, send_cors=True)

    @defer.inlineCallbacks
    def _do_preview(self, url, user, ts):
        """Check the db, and download the URL and build a preview

        Args:
            url (str):
            user (str):
            ts (int):

        Returns:
            Deferred[str]: json-encoded og data
        """
        # check the URL cache in the DB (which will also provide us with
        # historical previews, if we have any)
        cache_result = yield self.store.get_url_cache(url, ts)
        if (
            cache_result and
            cache_result["expires_ts"] > ts and
            cache_result["response_code"] / 100 == 2
        ):
            defer.returnValue(cache_result["og"])
            return

        media_info = yield self._download_url(url, user)

        logger.debug("got media_info of '%s'" % media_info)

        if _is_media(media_info['media_type']):
            dims = yield self.media_repo._generate_thumbnails(
                None, media_info['filesystem_id'], media_info, url_cache=True,
            )

            og = {
                "og:description": media_info['download_name'],
                "og:image": "mxc://%s/%s" % (
                    self.server_name, media_info['filesystem_id']
                ),
                "og:image:type": media_info['media_type'],
                "matrix:image:size": media_info['media_length'],
            }

            if dims:
                og["og:image:width"] = dims['width']
                og["og:image:height"] = dims['height']
            else:
                logger.warn("Couldn't get dims for %s" % url)

            # define our OG response for this media
        elif _is_html(media_info['media_type']):
            # TODO: somehow stop a big HTML tree from exploding synapse's RAM

            file = open(media_info['filename'])
            body = file.read()
            file.close()

            # clobber the encoding from the content-type, or default to utf-8
            # XXX: this overrides any <meta/> or XML charset headers in the body
            # which may pose problems, but so far seems to work okay.
            match = re.match(r'.*; *charset=(.*?)(;|$)', media_info['media_type'], re.I)
            encoding = match.group(1) if match else "utf-8"

            og = decode_and_calc_og(body, media_info['uri'], encoding)

            # pre-cache the image for posterity
            # FIXME: it might be cleaner to use the same flow as the main /preview_url
            # request itself and benefit from the same caching etc.  But for now we
            # just rely on the caching on the master request to speed things up.
            if 'og:image' in og and og['og:image']:
                image_info = yield self._download_url(
                    _rebase_url(og['og:image'], media_info['uri']), user
                )

                if _is_media(image_info['media_type']):
                    # TODO: make sure we don't choke on white-on-transparent images
                    dims = yield self.media_repo._generate_thumbnails(
                        None, image_info['filesystem_id'], image_info, url_cache=True,
                    )
                    if dims:
                        og["og:image:width"] = dims['width']
                        og["og:image:height"] = dims['height']
                    else:
                        logger.warn("Couldn't get dims for %s" % og["og:image"])

                    og["og:image"] = "mxc://%s/%s" % (
                        self.server_name, image_info['filesystem_id']
                    )
                    og["og:image:type"] = image_info['media_type']
                    og["matrix:image:size"] = image_info['media_length']
                else:
                    del og["og:image"]
        else:
            logger.warn("Failed to find any OG data in %s", url)
            og = {}

        logger.debug("Calculated OG for %s as %s" % (url, og))

        jsonog = json.dumps(og)

        # store OG in history-aware DB cache
        yield self.store.store_url_cache(
            url,
            media_info["response_code"],
            media_info["etag"],
            media_info["expires"] + media_info["created_ts"],
            jsonog,
            media_info["filesystem_id"],
            media_info["created_ts"],
        )

        defer.returnValue(jsonog)

    @defer.inlineCallbacks
    def _download_url(self, url, user):
        # TODO: we should probably honour robots.txt... except in practice
        # we're most likely being explicitly triggered by a human rather than a
        # bot, so are we really a robot?

        file_id = datetime.date.today().isoformat() + '_' + random_string(16)

        fpath = self.filepaths.url_cache_filepath_rel(file_id)
        fname = os.path.join(self.primary_base_path, fpath)
        self.media_repo._makedirs(fname)

        try:
            with open(fname, "wb") as f:
                logger.debug("Trying to get url '%s'" % url)
                length, headers, uri, code = yield self.client.get_file(
                    url, output_stream=f, max_size=self.max_spider_size,
                )
                # FIXME: pass through 404s and other error messages nicely

            yield self.media_repo.copy_to_backup(fpath)

            media_type = headers["Content-Type"][0]
            time_now_ms = self.clock.time_msec()

            content_disposition = headers.get("Content-Disposition", None)
            if content_disposition:
                _, params = cgi.parse_header(content_disposition[0],)
                download_name = None

                # First check if there is a valid UTF-8 filename
                download_name_utf8 = params.get("filename*", None)
                if download_name_utf8:
                    if download_name_utf8.lower().startswith("utf-8''"):
                        download_name = download_name_utf8[7:]

                # If there isn't check for an ascii name.
                if not download_name:
                    download_name_ascii = params.get("filename", None)
                    if download_name_ascii and is_ascii(download_name_ascii):
                        download_name = download_name_ascii

                if download_name:
                    download_name = urlparse.unquote(download_name)
                    try:
                        download_name = download_name.decode("utf-8")
                    except UnicodeDecodeError:
                        download_name = None
            else:
                download_name = None

            yield self.store.store_local_media(
                media_id=file_id,
                media_type=media_type,
                time_now_ms=self.clock.time_msec(),
                upload_name=download_name,
                media_length=length,
                user_id=user,
                url_cache=url,
            )

        except Exception as e:
            os.remove(fname)
            raise SynapseError(
                500, ("Failed to download content: %s" % e),
                Codes.UNKNOWN
            )

        defer.returnValue({
            "media_type": media_type,
            "media_length": length,
            "download_name": download_name,
            "created_ts": time_now_ms,
            "filesystem_id": file_id,
            "filename": fname,
            "uri": uri,
            "response_code": code,
            # FIXME: we should calculate a proper expiration based on the
            # Cache-Control and Expire headers.  But for now, assume 1 hour.
            "expires": 60 * 60 * 1000,
            "etag": headers["ETag"][0] if "ETag" in headers else None,
        })

    @defer.inlineCallbacks
    def _expire_url_cache_data(self):
        """Clean up expired url cache content, media and thumbnails.
        """
        # TODO: Delete from backup media store

        now = self.clock.time_msec()

        logger.info("Running url preview cache expiry")

        if not (yield self.store.has_completed_background_updates()):
            logger.info("Still running DB updates; skipping expiry")
            return

        # First we delete expired url cache entries
        media_ids = yield self.store.get_expired_url_cache(now)

        removed_media = []
        for media_id in media_ids:
            fname = self.filepaths.url_cache_filepath(media_id)
            try:
                os.remove(fname)
            except OSError as e:
                # If the path doesn't exist, meh
                if e.errno != errno.ENOENT:
                    logger.warn("Failed to remove media: %r: %s", media_id, e)
                    continue

            removed_media.append(media_id)

            try:
                dirs = self.filepaths.url_cache_filepath_dirs_to_delete(media_id)
                for dir in dirs:
                    os.rmdir(dir)
            except Exception:
                pass

        yield self.store.delete_url_cache(removed_media)

        if removed_media:
            logger.info("Deleted %d entries from url cache", len(removed_media))

        # Now we delete old images associated with the url cache.
        # These may be cached for a bit on the client (i.e., they
        # may have a room open with a preview url thing open).
        # So we wait a couple of days before deleting, just in case.
        expire_before = now - 2 * 24 * 60 * 60 * 1000
        media_ids = yield self.store.get_url_cache_media_before(expire_before)

        removed_media = []
        for media_id in media_ids:
            fname = self.filepaths.url_cache_filepath(media_id)
            try:
                os.remove(fname)
            except OSError as e:
                # If the path doesn't exist, meh
                if e.errno != errno.ENOENT:
                    logger.warn("Failed to remove media: %r: %s", media_id, e)
                    continue

            try:
                dirs = self.filepaths.url_cache_filepath_dirs_to_delete(media_id)
                for dir in dirs:
                    os.rmdir(dir)
            except Exception:
                pass

            thumbnail_dir = self.filepaths.url_cache_thumbnail_directory(media_id)
            try:
                shutil.rmtree(thumbnail_dir)
            except OSError as e:
                # If the path doesn't exist, meh
                if e.errno != errno.ENOENT:
                    logger.warn("Failed to remove media: %r: %s", media_id, e)
                    continue

            removed_media.append(media_id)

            try:
                dirs = self.filepaths.url_cache_thumbnail_dirs_to_delete(media_id)
                for dir in dirs:
                    os.rmdir(dir)
            except Exception:
                pass

        yield self.store.delete_url_cache_media(removed_media)

        logger.info("Deleted %d media from url cache", len(removed_media))
class PreviewUrlResource(Resource):
    isLeaf = True

    def __init__(self, hs, media_repo, media_storage):
        Resource.__init__(self)

        self.auth = hs.get_auth()
        self.clock = hs.get_clock()
        self.filepaths = media_repo.filepaths
        self.max_spider_size = hs.config.max_spider_size
        self.server_name = hs.hostname
        self.store = hs.get_datastore()
        self.client = SpiderHttpClient(hs)
        self.media_repo = media_repo
        self.primary_base_path = media_repo.primary_base_path
        self.media_storage = media_storage

        self.url_preview_url_blacklist = hs.config.url_preview_url_blacklist

        # memory cache mapping urls to an ObservableDeferred returning
        # JSON-encoded OG metadata
        self._cache = ExpiringCache(
            cache_name="url_previews",
            clock=self.clock,
            # don't spider URLs more often than once an hour
            expiry_ms=60 * 60 * 1000,
        )

        self._cleaner_loop = self.clock.looping_call(
            self._start_expire_url_cache_data, 10 * 1000,
        )

    def render_OPTIONS(self, request):
        return respond_with_json(request, 200, {}, send_cors=True)

    def render_GET(self, request):
        self._async_render_GET(request)
        return NOT_DONE_YET

    @wrap_json_request_handler
    @defer.inlineCallbacks
    def _async_render_GET(self, request):

        # XXX: if get_user_by_req fails, what should we do in an async render?
        requester = yield self.auth.get_user_by_req(request)
        url = parse_string(request, "url")
        if b"ts" in request.args:
            ts = parse_integer(request, "ts")
        else:
            ts = self.clock.time_msec()

        # XXX: we could move this into _do_preview if we wanted.
        url_tuple = urlparse.urlsplit(url)
        for entry in self.url_preview_url_blacklist:
            match = True
            for attrib in entry:
                pattern = entry[attrib]
                value = getattr(url_tuple, attrib)
                logger.debug((
                    "Matching attrib '%s' with value '%s' against"
                    " pattern '%s'"
                ) % (attrib, value, pattern))

                if value is None:
                    match = False
                    continue

                if pattern.startswith('^'):
                    if not re.match(pattern, getattr(url_tuple, attrib)):
                        match = False
                        continue
                else:
                    if not fnmatch.fnmatch(getattr(url_tuple, attrib), pattern):
                        match = False
                        continue
            if match:
                logger.warn(
                    "URL %s blocked by url_blacklist entry %s", url, entry
                )
                raise SynapseError(
                    403, "URL blocked by url pattern blacklist entry",
                    Codes.UNKNOWN
                )

        # the in-memory cache:
        # * ensures that only one request is active at a time
        # * takes load off the DB for the thundering herds
        # * also caches any failures (unlike the DB) so we don't keep
        #    requesting the same endpoint

        observable = self._cache.get(url)

        if not observable:
            download = run_in_background(
                self._do_preview,
                url, requester.user, ts,
            )
            observable = ObservableDeferred(
                download,
                consumeErrors=True
            )
            self._cache[url] = observable
        else:
            logger.info("Returning cached response")

        og = yield make_deferred_yieldable(observable.observe())
        respond_with_json_bytes(request, 200, og, send_cors=True)

    @defer.inlineCallbacks
    def _do_preview(self, url, user, ts):
        """Check the db, and download the URL and build a preview

        Args:
            url (str):
            user (str):
            ts (int):

        Returns:
            Deferred[str]: json-encoded og data
        """
        # check the URL cache in the DB (which will also provide us with
        # historical previews, if we have any)
        cache_result = yield self.store.get_url_cache(url, ts)
        if (
            cache_result and
            cache_result["expires_ts"] > ts and
            cache_result["response_code"] / 100 == 2
        ):
            # It may be stored as text in the database, not as bytes (such as
            # PostgreSQL). If so, encode it back before handing it on.
            og = cache_result["og"]
            if isinstance(og, six.text_type):
                og = og.encode('utf8')
            defer.returnValue(og)
            return

        media_info = yield self._download_url(url, user)

        logger.debug("got media_info of '%s'" % media_info)

        if _is_media(media_info['media_type']):
            file_id = media_info['filesystem_id']
            dims = yield self.media_repo._generate_thumbnails(
                None, file_id, file_id, media_info["media_type"],
                url_cache=True,
            )

            og = {
                "og:description": media_info['download_name'],
                "og:image": "mxc://%s/%s" % (
                    self.server_name, media_info['filesystem_id']
                ),
                "og:image:type": media_info['media_type'],
                "matrix:image:size": media_info['media_length'],
            }

            if dims:
                og["og:image:width"] = dims['width']
                og["og:image:height"] = dims['height']
            else:
                logger.warn("Couldn't get dims for %s" % url)

            # define our OG response for this media
        elif _is_html(media_info['media_type']):
            # TODO: somehow stop a big HTML tree from exploding synapse's RAM

            with open(media_info['filename'], 'rb') as file:
                body = file.read()

            # clobber the encoding from the content-type, or default to utf-8
            # XXX: this overrides any <meta/> or XML charset headers in the body
            # which may pose problems, but so far seems to work okay.
            match = re.match(
                r'.*; *charset="?(.*?)"?(;|$)',
                media_info['media_type'],
                re.I
            )
            encoding = match.group(1) if match else "utf-8"

            og = decode_and_calc_og(body, media_info['uri'], encoding)

            # pre-cache the image for posterity
            # FIXME: it might be cleaner to use the same flow as the main /preview_url
            # request itself and benefit from the same caching etc.  But for now we
            # just rely on the caching on the master request to speed things up.
            if 'og:image' in og and og['og:image']:
                image_info = yield self._download_url(
                    _rebase_url(og['og:image'], media_info['uri']), user
                )

                if _is_media(image_info['media_type']):
                    # TODO: make sure we don't choke on white-on-transparent images
                    file_id = image_info['filesystem_id']
                    dims = yield self.media_repo._generate_thumbnails(
                        None, file_id, file_id, image_info["media_type"],
                        url_cache=True,
                    )
                    if dims:
                        og["og:image:width"] = dims['width']
                        og["og:image:height"] = dims['height']
                    else:
                        logger.warn("Couldn't get dims for %s" % og["og:image"])

                    og["og:image"] = "mxc://%s/%s" % (
                        self.server_name, image_info['filesystem_id']
                    )
                    og["og:image:type"] = image_info['media_type']
                    og["matrix:image:size"] = image_info['media_length']
                else:
                    del og["og:image"]
        else:
            logger.warn("Failed to find any OG data in %s", url)
            og = {}

        logger.debug("Calculated OG for %s as %s" % (url, og))

        jsonog = json.dumps(og).encode('utf8')

        # store OG in history-aware DB cache
        yield self.store.store_url_cache(
            url,
            media_info["response_code"],
            media_info["etag"],
            media_info["expires"] + media_info["created_ts"],
            jsonog,
            media_info["filesystem_id"],
            media_info["created_ts"],
        )

        defer.returnValue(jsonog)

    @defer.inlineCallbacks
    def _download_url(self, url, user):
        # TODO: we should probably honour robots.txt... except in practice
        # we're most likely being explicitly triggered by a human rather than a
        # bot, so are we really a robot?

        file_id = datetime.date.today().isoformat() + '_' + random_string(16)

        file_info = FileInfo(
            server_name=None,
            file_id=file_id,
            url_cache=True,
        )

        with self.media_storage.store_into_file(file_info) as (f, fname, finish):
            try:
                logger.debug("Trying to get url '%s'" % url)
                length, headers, uri, code = yield self.client.get_file(
                    url, output_stream=f, max_size=self.max_spider_size,
                )
            except Exception as e:
                # FIXME: pass through 404s and other error messages nicely
                logger.warn("Error downloading %s: %r", url, e)
                raise SynapseError(
                    500, "Failed to download content: %s" % (
                        traceback.format_exception_only(sys.exc_info()[0], e),
                    ),
                    Codes.UNKNOWN,
                )
            yield finish()

        try:
            if b"Content-Type" in headers:
                media_type = headers[b"Content-Type"][0].decode('ascii')
            else:
                media_type = "application/octet-stream"
            time_now_ms = self.clock.time_msec()

            content_disposition = headers.get(b"Content-Disposition", None)
            if content_disposition:
                _, params = cgi.parse_header(content_disposition[0],)
                download_name = None

                # First check if there is a valid UTF-8 filename
                download_name_utf8 = params.get("filename*", None)
                if download_name_utf8:
                    if download_name_utf8.lower().startswith("utf-8''"):
                        download_name = download_name_utf8[7:]

                # If there isn't check for an ascii name.
                if not download_name:
                    download_name_ascii = params.get("filename", None)
                    if download_name_ascii and is_ascii(download_name_ascii):
                        download_name = download_name_ascii

                if download_name:
                    download_name = urlparse.unquote(download_name)
                    try:
                        download_name = download_name.decode("utf-8")
                    except UnicodeDecodeError:
                        download_name = None
            else:
                download_name = None

            yield self.store.store_local_media(
                media_id=file_id,
                media_type=media_type,
                time_now_ms=self.clock.time_msec(),
                upload_name=download_name,
                media_length=length,
                user_id=user,
                url_cache=url,
            )

        except Exception as e:
            logger.error("Error handling downloaded %s: %r", url, e)
            # TODO: we really ought to delete the downloaded file in this
            # case, since we won't have recorded it in the db, and will
            # therefore not expire it.
            raise

        defer.returnValue({
            "media_type": media_type,
            "media_length": length,
            "download_name": download_name,
            "created_ts": time_now_ms,
            "filesystem_id": file_id,
            "filename": fname,
            "uri": uri,
            "response_code": code,
            # FIXME: we should calculate a proper expiration based on the
            # Cache-Control and Expire headers.  But for now, assume 1 hour.
            "expires": 60 * 60 * 1000,
            "etag": headers["ETag"][0] if "ETag" in headers else None,
        })

    def _start_expire_url_cache_data(self):
        return run_as_background_process(
            "expire_url_cache_data", self._expire_url_cache_data,
        )

    @defer.inlineCallbacks
    def _expire_url_cache_data(self):
        """Clean up expired url cache content, media and thumbnails.
        """
        # TODO: Delete from backup media store

        now = self.clock.time_msec()

        logger.info("Running url preview cache expiry")

        if not (yield self.store.has_completed_background_updates()):
            logger.info("Still running DB updates; skipping expiry")
            return

        # First we delete expired url cache entries
        media_ids = yield self.store.get_expired_url_cache(now)

        removed_media = []
        for media_id in media_ids:
            fname = self.filepaths.url_cache_filepath(media_id)
            try:
                os.remove(fname)
            except OSError as e:
                # If the path doesn't exist, meh
                if e.errno != errno.ENOENT:
                    logger.warn("Failed to remove media: %r: %s", media_id, e)
                    continue

            removed_media.append(media_id)

            try:
                dirs = self.filepaths.url_cache_filepath_dirs_to_delete(media_id)
                for dir in dirs:
                    os.rmdir(dir)
            except Exception:
                pass

        yield self.store.delete_url_cache(removed_media)

        if removed_media:
            logger.info("Deleted %d entries from url cache", len(removed_media))

        # Now we delete old images associated with the url cache.
        # These may be cached for a bit on the client (i.e., they
        # may have a room open with a preview url thing open).
        # So we wait a couple of days before deleting, just in case.
        expire_before = now - 2 * 24 * 60 * 60 * 1000
        media_ids = yield self.store.get_url_cache_media_before(expire_before)

        removed_media = []
        for media_id in media_ids:
            fname = self.filepaths.url_cache_filepath(media_id)
            try:
                os.remove(fname)
            except OSError as e:
                # If the path doesn't exist, meh
                if e.errno != errno.ENOENT:
                    logger.warn("Failed to remove media: %r: %s", media_id, e)
                    continue

            try:
                dirs = self.filepaths.url_cache_filepath_dirs_to_delete(media_id)
                for dir in dirs:
                    os.rmdir(dir)
            except Exception:
                pass

            thumbnail_dir = self.filepaths.url_cache_thumbnail_directory(media_id)
            try:
                shutil.rmtree(thumbnail_dir)
            except OSError as e:
                # If the path doesn't exist, meh
                if e.errno != errno.ENOENT:
                    logger.warn("Failed to remove media: %r: %s", media_id, e)
                    continue

            removed_media.append(media_id)

            try:
                dirs = self.filepaths.url_cache_thumbnail_dirs_to_delete(media_id)
                for dir in dirs:
                    os.rmdir(dir)
            except Exception:
                pass

        yield self.store.delete_url_cache_media(removed_media)

        logger.info("Deleted %d media from url cache", len(removed_media))
示例#6
0
class PreviewUrlResource(Resource):
    isLeaf = True

    def __init__(self, hs, media_repo):
        Resource.__init__(self)

        self.auth = hs.get_auth()
        self.clock = hs.get_clock()
        self.version_string = hs.version_string
        self.filepaths = media_repo.filepaths
        self.max_spider_size = hs.config.max_spider_size
        self.server_name = hs.hostname
        self.store = hs.get_datastore()
        self.client = SpiderHttpClient(hs)
        self.media_repo = media_repo

        self.url_preview_url_blacklist = hs.config.url_preview_url_blacklist

        # simple memory cache mapping urls to OG metadata
        self.cache = ExpiringCache(
            cache_name="url_previews",
            clock=self.clock,
            # don't spider URLs more often than once an hour
            expiry_ms=60 * 60 * 1000,
        )
        self.cache.start()

        self.downloads = {}

    def render_GET(self, request):
        self._async_render_GET(request)
        return NOT_DONE_YET

    @request_handler()
    @defer.inlineCallbacks
    def _async_render_GET(self, request):

        # XXX: if get_user_by_req fails, what should we do in an async render?
        requester = yield self.auth.get_user_by_req(request)
        url = request.args.get("url")[0]
        if "ts" in request.args:
            ts = int(request.args.get("ts")[0])
        else:
            ts = self.clock.time_msec()

        url_tuple = urlparse.urlsplit(url)
        for entry in self.url_preview_url_blacklist:
            match = True
            for attrib in entry:
                pattern = entry[attrib]
                value = getattr(url_tuple, attrib)
                logger.debug((
                    "Matching attrib '%s' with value '%s' against"
                    " pattern '%s'"
                ) % (attrib, value, pattern))

                if value is None:
                    match = False
                    continue

                if pattern.startswith('^'):
                    if not re.match(pattern, getattr(url_tuple, attrib)):
                        match = False
                        continue
                else:
                    if not fnmatch.fnmatch(getattr(url_tuple, attrib), pattern):
                        match = False
                        continue
            if match:
                logger.warn(
                    "URL %s blocked by url_blacklist entry %s", url, entry
                )
                raise SynapseError(
                    403, "URL blocked by url pattern blacklist entry",
                    Codes.UNKNOWN
                )

        # first check the memory cache - good to handle all the clients on this
        # HS thundering away to preview the same URL at the same time.
        og = self.cache.get(url)
        if og:
            respond_with_json_bytes(request, 200, json.dumps(og), send_cors=True)
            return

        # then check the URL cache in the DB (which will also provide us with
        # historical previews, if we have any)
        cache_result = yield self.store.get_url_cache(url, ts)
        if (
            cache_result and
            cache_result["download_ts"] + cache_result["expires"] > ts and
            cache_result["response_code"] / 100 == 2
        ):
            respond_with_json_bytes(
                request, 200, cache_result["og"].encode('utf-8'),
                send_cors=True
            )
            return

        # Ensure only one download for a given URL is active at a time
        download = self.downloads.get(url)
        if download is None:
            download = self._download_url(url, requester.user)
            download = ObservableDeferred(
                download,
                consumeErrors=True
            )
            self.downloads[url] = download

            @download.addBoth
            def callback(media_info):
                del self.downloads[url]
                return media_info
        media_info = yield download.observe()

        # FIXME: we should probably update our cache now anyway, so that
        # even if the OG calculation raises, we don't keep hammering on the
        # remote server.  For now, leave it uncached to aid debugging OG
        # calculation problems

        logger.debug("got media_info of '%s'" % media_info)

        if _is_media(media_info['media_type']):
            dims = yield self.media_repo._generate_local_thumbnails(
                media_info['filesystem_id'], media_info
            )

            og = {
                "og:description": media_info['download_name'],
                "og:image": "mxc://%s/%s" % (
                    self.server_name, media_info['filesystem_id']
                ),
                "og:image:type": media_info['media_type'],
                "matrix:image:size": media_info['media_length'],
            }

            if dims:
                og["og:image:width"] = dims['width']
                og["og:image:height"] = dims['height']
            else:
                logger.warn("Couldn't get dims for %s" % url)

            # define our OG response for this media
        elif _is_html(media_info['media_type']):
            # TODO: somehow stop a big HTML tree from exploding synapse's RAM

            file = open(media_info['filename'])
            body = file.read()
            file.close()

            # clobber the encoding from the content-type, or default to utf-8
            # XXX: this overrides any <meta/> or XML charset headers in the body
            # which may pose problems, but so far seems to work okay.
            match = re.match(r'.*; *charset=(.*?)(;|$)', media_info['media_type'], re.I)
            encoding = match.group(1) if match else "utf-8"

            og = decode_and_calc_og(body, media_info['uri'], encoding)

            # pre-cache the image for posterity
            # FIXME: it might be cleaner to use the same flow as the main /preview_url
            # request itself and benefit from the same caching etc.  But for now we
            # just rely on the caching on the master request to speed things up.
            if 'og:image' in og and og['og:image']:
                image_info = yield self._download_url(
                    _rebase_url(og['og:image'], media_info['uri']), requester.user
                )

                if _is_media(image_info['media_type']):
                    # TODO: make sure we don't choke on white-on-transparent images
                    dims = yield self.media_repo._generate_local_thumbnails(
                        image_info['filesystem_id'], image_info
                    )
                    if dims:
                        og["og:image:width"] = dims['width']
                        og["og:image:height"] = dims['height']
                    else:
                        logger.warn("Couldn't get dims for %s" % og["og:image"])

                    og["og:image"] = "mxc://%s/%s" % (
                        self.server_name, image_info['filesystem_id']
                    )
                    og["og:image:type"] = image_info['media_type']
                    og["matrix:image:size"] = image_info['media_length']
                else:
                    del og["og:image"]
        else:
            logger.warn("Failed to find any OG data in %s", url)
            og = {}

        logger.debug("Calculated OG for %s as %s" % (url, og))

        # store OG in ephemeral in-memory cache
        self.cache[url] = og

        # store OG in history-aware DB cache
        yield self.store.store_url_cache(
            url,
            media_info["response_code"],
            media_info["etag"],
            media_info["expires"],
            json.dumps(og),
            media_info["filesystem_id"],
            media_info["created_ts"],
        )

        respond_with_json_bytes(request, 200, json.dumps(og), send_cors=True)

    @defer.inlineCallbacks
    def _download_url(self, url, user):
        # TODO: we should probably honour robots.txt... except in practice
        # we're most likely being explicitly triggered by a human rather than a
        # bot, so are we really a robot?

        # XXX: horrible duplication with base_resource's _download_remote_file()
        file_id = random_string(24)

        fname = self.filepaths.local_media_filepath(file_id)
        self.media_repo._makedirs(fname)

        try:
            with open(fname, "wb") as f:
                logger.debug("Trying to get url '%s'" % url)
                length, headers, uri, code = yield self.client.get_file(
                    url, output_stream=f, max_size=self.max_spider_size,
                )
                # FIXME: pass through 404s and other error messages nicely

            media_type = headers["Content-Type"][0]
            time_now_ms = self.clock.time_msec()

            content_disposition = headers.get("Content-Disposition", None)
            if content_disposition:
                _, params = cgi.parse_header(content_disposition[0],)
                download_name = None

                # First check if there is a valid UTF-8 filename
                download_name_utf8 = params.get("filename*", None)
                if download_name_utf8:
                    if download_name_utf8.lower().startswith("utf-8''"):
                        download_name = download_name_utf8[7:]

                # If there isn't check for an ascii name.
                if not download_name:
                    download_name_ascii = params.get("filename", None)
                    if download_name_ascii and is_ascii(download_name_ascii):
                        download_name = download_name_ascii

                if download_name:
                    download_name = urlparse.unquote(download_name)
                    try:
                        download_name = download_name.decode("utf-8")
                    except UnicodeDecodeError:
                        download_name = None
            else:
                download_name = None

            yield self.store.store_local_media(
                media_id=file_id,
                media_type=media_type,
                time_now_ms=self.clock.time_msec(),
                upload_name=download_name,
                media_length=length,
                user_id=user,
            )

        except Exception as e:
            os.remove(fname)
            raise SynapseError(
                500, ("Failed to download content: %s" % e),
                Codes.UNKNOWN
            )

        defer.returnValue({
            "media_type": media_type,
            "media_length": length,
            "download_name": download_name,
            "created_ts": time_now_ms,
            "filesystem_id": file_id,
            "filename": fname,
            "uri": uri,
            "response_code": code,
            # FIXME: we should calculate a proper expiration based on the
            # Cache-Control and Expire headers.  But for now, assume 1 hour.
            "expires": 60 * 60 * 1000,
            "etag": headers["ETag"][0] if "ETag" in headers else None,
        })
示例#7
0
class PreviewUrlResource(Resource):
    isLeaf = True

    def __init__(self, hs, media_repo, media_storage):
        Resource.__init__(self)

        self.auth = hs.get_auth()
        self.clock = hs.get_clock()
        self.filepaths = media_repo.filepaths
        self.max_spider_size = hs.config.max_spider_size
        self.server_name = hs.hostname
        self.store = hs.get_datastore()
        self.client = SpiderHttpClient(hs)
        self.media_repo = media_repo
        self.primary_base_path = media_repo.primary_base_path
        self.media_storage = media_storage

        self.url_preview_url_blacklist = hs.config.url_preview_url_blacklist

        # memory cache mapping urls to an ObservableDeferred returning
        # JSON-encoded OG metadata
        self._cache = ExpiringCache(
            cache_name="url_previews",
            clock=self.clock,
            # don't spider URLs more often than once an hour
            expiry_ms=60 * 60 * 1000,
        )

        self._cleaner_loop = self.clock.looping_call(
            self._start_expire_url_cache_data,
            10 * 1000,
        )

    def render_OPTIONS(self, request):
        return respond_with_json(request, 200, {}, send_cors=True)

    def render_GET(self, request):
        self._async_render_GET(request)
        return NOT_DONE_YET

    @wrap_json_request_handler
    @defer.inlineCallbacks
    def _async_render_GET(self, request):

        # XXX: if get_user_by_req fails, what should we do in an async render?
        requester = yield self.auth.get_user_by_req(request)
        url = parse_string(request, "url")
        if b"ts" in request.args:
            ts = parse_integer(request, "ts")
        else:
            ts = self.clock.time_msec()

        # XXX: we could move this into _do_preview if we wanted.
        url_tuple = urlparse.urlsplit(url)
        for entry in self.url_preview_url_blacklist:
            match = True
            for attrib in entry:
                pattern = entry[attrib]
                value = getattr(url_tuple, attrib)
                logger.debug(("Matching attrib '%s' with value '%s' against"
                              " pattern '%s'") % (attrib, value, pattern))

                if value is None:
                    match = False
                    continue

                if pattern.startswith('^'):
                    if not re.match(pattern, getattr(url_tuple, attrib)):
                        match = False
                        continue
                else:
                    if not fnmatch.fnmatch(getattr(url_tuple, attrib),
                                           pattern):
                        match = False
                        continue
            if match:
                logger.warn("URL %s blocked by url_blacklist entry %s", url,
                            entry)
                raise SynapseError(
                    403, "URL blocked by url pattern blacklist entry",
                    Codes.UNKNOWN)

        # the in-memory cache:
        # * ensures that only one request is active at a time
        # * takes load off the DB for the thundering herds
        # * also caches any failures (unlike the DB) so we don't keep
        #    requesting the same endpoint

        observable = self._cache.get(url)

        if not observable:
            download = run_in_background(
                self._do_preview,
                url,
                requester.user,
                ts,
            )
            observable = ObservableDeferred(download, consumeErrors=True)
            self._cache[url] = observable
        else:
            logger.info("Returning cached response")

        og = yield make_deferred_yieldable(observable.observe())
        respond_with_json_bytes(request, 200, og, send_cors=True)

    @defer.inlineCallbacks
    def _do_preview(self, url, user, ts):
        """Check the db, and download the URL and build a preview

        Args:
            url (str):
            user (str):
            ts (int):

        Returns:
            Deferred[str]: json-encoded og data
        """
        # check the URL cache in the DB (which will also provide us with
        # historical previews, if we have any)
        cache_result = yield self.store.get_url_cache(url, ts)
        if (cache_result and cache_result["expires_ts"] > ts
                and cache_result["response_code"] / 100 == 2):
            # It may be stored as text in the database, not as bytes (such as
            # PostgreSQL). If so, encode it back before handing it on.
            og = cache_result["og"]
            if isinstance(og, six.text_type):
                og = og.encode('utf8')
            defer.returnValue(og)
            return

        media_info = yield self._download_url(url, user)

        logger.debug("got media_info of '%s'" % media_info)

        if _is_media(media_info['media_type']):
            file_id = media_info['filesystem_id']
            dims = yield self.media_repo._generate_thumbnails(
                None,
                file_id,
                file_id,
                media_info["media_type"],
                url_cache=True,
            )

            og = {
                "og:description":
                media_info['download_name'],
                "og:image":
                "mxc://%s/%s" %
                (self.server_name, media_info['filesystem_id']),
                "og:image:type":
                media_info['media_type'],
                "matrix:image:size":
                media_info['media_length'],
            }

            if dims:
                og["og:image:width"] = dims['width']
                og["og:image:height"] = dims['height']
            else:
                logger.warn("Couldn't get dims for %s" % url)

            # define our OG response for this media
        elif _is_html(media_info['media_type']):
            # TODO: somehow stop a big HTML tree from exploding synapse's RAM

            with open(media_info['filename'], 'rb') as file:
                body = file.read()

            encoding = None

            # Let's try and figure out if it has an encoding set in a meta tag.
            # Limit it to the first 1kb, since it ought to be in the meta tags
            # at the top.
            match = _charset_match.search(body[:1000])

            # If we find a match, it should take precedence over the
            # Content-Type header, so set it here.
            if match:
                encoding = match.group(1).decode('ascii')

            # If we don't find a match, we'll look at the HTTP Content-Type, and
            # if that doesn't exist, we'll fall back to UTF-8.
            if not encoding:
                match = _content_type_match.match(media_info['media_type'])
                encoding = match.group(1) if match else "utf-8"

            og = decode_and_calc_og(body, media_info['uri'], encoding)

            # pre-cache the image for posterity
            # FIXME: it might be cleaner to use the same flow as the main /preview_url
            # request itself and benefit from the same caching etc.  But for now we
            # just rely on the caching on the master request to speed things up.
            if 'og:image' in og and og['og:image']:
                image_info = yield self._download_url(
                    _rebase_url(og['og:image'], media_info['uri']), user)

                if _is_media(image_info['media_type']):
                    # TODO: make sure we don't choke on white-on-transparent images
                    file_id = image_info['filesystem_id']
                    dims = yield self.media_repo._generate_thumbnails(
                        None,
                        file_id,
                        file_id,
                        image_info["media_type"],
                        url_cache=True,
                    )
                    if dims:
                        og["og:image:width"] = dims['width']
                        og["og:image:height"] = dims['height']
                    else:
                        logger.warn("Couldn't get dims for %s" %
                                    og["og:image"])

                    og["og:image"] = "mxc://%s/%s" % (
                        self.server_name, image_info['filesystem_id'])
                    og["og:image:type"] = image_info['media_type']
                    og["matrix:image:size"] = image_info['media_length']
                else:
                    del og["og:image"]
        else:
            logger.warn("Failed to find any OG data in %s", url)
            og = {}

        logger.debug("Calculated OG for %s as %s" % (url, og))

        jsonog = json.dumps(og).encode('utf8')

        # store OG in history-aware DB cache
        yield self.store.store_url_cache(
            url,
            media_info["response_code"],
            media_info["etag"],
            media_info["expires"] + media_info["created_ts"],
            jsonog,
            media_info["filesystem_id"],
            media_info["created_ts"],
        )

        defer.returnValue(jsonog)

    @defer.inlineCallbacks
    def _download_url(self, url, user):
        # TODO: we should probably honour robots.txt... except in practice
        # we're most likely being explicitly triggered by a human rather than a
        # bot, so are we really a robot?

        file_id = datetime.date.today().isoformat() + '_' + random_string(16)

        file_info = FileInfo(
            server_name=None,
            file_id=file_id,
            url_cache=True,
        )

        with self.media_storage.store_into_file(file_info) as (f, fname,
                                                               finish):
            try:
                logger.debug("Trying to get url '%s'" % url)
                length, headers, uri, code = yield self.client.get_file(
                    url,
                    output_stream=f,
                    max_size=self.max_spider_size,
                )
            except Exception as e:
                # FIXME: pass through 404s and other error messages nicely
                logger.warn("Error downloading %s: %r", url, e)
                raise SynapseError(
                    500,
                    "Failed to download content: %s" %
                    (traceback.format_exception_only(sys.exc_info()[0], e), ),
                    Codes.UNKNOWN,
                )
            yield finish()

        try:
            if b"Content-Type" in headers:
                media_type = headers[b"Content-Type"][0].decode('ascii')
            else:
                media_type = "application/octet-stream"
            time_now_ms = self.clock.time_msec()

            download_name = get_filename_from_headers(headers)

            yield self.store.store_local_media(
                media_id=file_id,
                media_type=media_type,
                time_now_ms=self.clock.time_msec(),
                upload_name=download_name,
                media_length=length,
                user_id=user,
                url_cache=url,
            )

        except Exception as e:
            logger.error("Error handling downloaded %s: %r", url, e)
            # TODO: we really ought to delete the downloaded file in this
            # case, since we won't have recorded it in the db, and will
            # therefore not expire it.
            raise

        defer.returnValue({
            "media_type":
            media_type,
            "media_length":
            length,
            "download_name":
            download_name,
            "created_ts":
            time_now_ms,
            "filesystem_id":
            file_id,
            "filename":
            fname,
            "uri":
            uri,
            "response_code":
            code,
            # FIXME: we should calculate a proper expiration based on the
            # Cache-Control and Expire headers.  But for now, assume 1 hour.
            "expires":
            60 * 60 * 1000,
            "etag":
            headers["ETag"][0] if "ETag" in headers else None,
        })

    def _start_expire_url_cache_data(self):
        return run_as_background_process(
            "expire_url_cache_data",
            self._expire_url_cache_data,
        )

    @defer.inlineCallbacks
    def _expire_url_cache_data(self):
        """Clean up expired url cache content, media and thumbnails.
        """
        # TODO: Delete from backup media store

        now = self.clock.time_msec()

        logger.info("Running url preview cache expiry")

        if not (yield self.store.has_completed_background_updates()):
            logger.info("Still running DB updates; skipping expiry")
            return

        # First we delete expired url cache entries
        media_ids = yield self.store.get_expired_url_cache(now)

        removed_media = []
        for media_id in media_ids:
            fname = self.filepaths.url_cache_filepath(media_id)
            try:
                os.remove(fname)
            except OSError as e:
                # If the path doesn't exist, meh
                if e.errno != errno.ENOENT:
                    logger.warn("Failed to remove media: %r: %s", media_id, e)
                    continue

            removed_media.append(media_id)

            try:
                dirs = self.filepaths.url_cache_filepath_dirs_to_delete(
                    media_id)
                for dir in dirs:
                    os.rmdir(dir)
            except Exception:
                pass

        yield self.store.delete_url_cache(removed_media)

        if removed_media:
            logger.info("Deleted %d entries from url cache",
                        len(removed_media))

        # Now we delete old images associated with the url cache.
        # These may be cached for a bit on the client (i.e., they
        # may have a room open with a preview url thing open).
        # So we wait a couple of days before deleting, just in case.
        expire_before = now - 2 * 24 * 60 * 60 * 1000
        media_ids = yield self.store.get_url_cache_media_before(expire_before)

        removed_media = []
        for media_id in media_ids:
            fname = self.filepaths.url_cache_filepath(media_id)
            try:
                os.remove(fname)
            except OSError as e:
                # If the path doesn't exist, meh
                if e.errno != errno.ENOENT:
                    logger.warn("Failed to remove media: %r: %s", media_id, e)
                    continue

            try:
                dirs = self.filepaths.url_cache_filepath_dirs_to_delete(
                    media_id)
                for dir in dirs:
                    os.rmdir(dir)
            except Exception:
                pass

            thumbnail_dir = self.filepaths.url_cache_thumbnail_directory(
                media_id)
            try:
                shutil.rmtree(thumbnail_dir)
            except OSError as e:
                # If the path doesn't exist, meh
                if e.errno != errno.ENOENT:
                    logger.warn("Failed to remove media: %r: %s", media_id, e)
                    continue

            removed_media.append(media_id)

            try:
                dirs = self.filepaths.url_cache_thumbnail_dirs_to_delete(
                    media_id)
                for dir in dirs:
                    os.rmdir(dir)
            except Exception:
                pass

        yield self.store.delete_url_cache_media(removed_media)

        logger.info("Deleted %d media from url cache", len(removed_media))