Exemplo n.º 1
0
def notFound(filename):
    web.expires(60)
    web.header('Content-Type', 'text/html')
    s =  """<p>You were looking for "%s".</p>""" % filename
    s += """<p>(on host %s)</p>""" % web.ctx.host
    s += """<p>(that is, URL="%s")</p>""" % web.url()
    s += """<p>alas.</p>"""
    web.notfound()
    return s
Exemplo n.º 2
0
    def GET(self, category, key, value, size):
        i = web.input(default="true")
        key = key.lower()

        def notfound():
            if config.default_image and i.default.lower(
            ) != "false" and not i.default.startswith('http://'):
                return read_file(config.default_image)
            elif i.default.startswith('http://'):
                raise web.seeother(i.default)
            else:
                raise web.notfound("")

        def redirect(id):
            size_part = size and ("-" + size) or ""
            url = "/%s/id/%s%s.jpg" % (category, id, size_part)

            query = web.ctx.env.get('QUERY_STRING')
            if query:
                url += '?' + query
            raise web.found(url)

        if key == 'isbn':
            value = value.replace("-", "").strip()  # strip hyphens from ISBN
            # Disabling ratelimit as iptables is taking care of botnets.
            #value = self.ratelimit_query(category, key, value)
            value = self.query(category, key, value)
        elif key != 'id':
            value = self.query(category, key, value)

        d = value and db.details(value)
        if not d:
            return notfound()

        # set cache-for-ever headers only when requested with ID
        if key == 'id':
            etag = "%s-%s" % (d.id, size.lower())
            if not web.modified(trim_microsecond(d.created), etag=etag):
                raise web.notmodified()

            web.header('Cache-Control', 'public')
            web.expires(
                100 * 365 * 24 *
                3600)  # this image is not going to expire in next 100 years.
        else:
            web.header('Cache-Control', 'public')
            web.expires(
                10 * 60
            )  # Allow the client to cache the image for 10 mins to avoid further requests

        web.header('Content-Type', 'image/jpeg')
        try:
            return read_image(d, size)
        except IOError:
            raise web.notfound()
Exemplo n.º 3
0
    def done(self):

        self.footer << div(
            p(
                """DISCLAIMER: This is an experimental technology demonstration, being run as a hobby by Sandro Hawke ([email protected]).    It is not supported or endorsed by his employer at this time."""
            ),
            id="disclaimer",
        )

        # self.footer << p("Page generated in %f seconds." % (time.time()-self.t0))
        self.titlebar = div([], id="title")
        self.header << self.titlebar
        self.titlebar << h1("LD", em("Reg"))
        self.titlebar << h2("Linked Data Registration ", Raw("&mdash;"), " Now you can Query the Semantic Web")

        snul = ul()
        self.nav1 << snul
        for area in (Home(), Demo(), Stats(), Blog(), Wiki(), About()):
            if area.__class__ == self.area.__class__ or area.__class__ == getattr(self.area, "parent", "").__class__:
                s = " active"
            else:
                s = ""
            snul << li(a(area.label, href=get_url(area), class_="selectable" + s))

        # fill in nav2 in the cases where it's shown...
        if self.area.__class__ == Home().__class__ or getattr(self.area, "parent", "").__class__ == Home().__class__:
            ul2 = ul()
            self.nav2 << div(ul2, class_="snav")
            for area in (Find(), Query(), Register(), Track()):
                if area.__class__ == self.area.__class__:
                    s = " active"
                else:
                    s = ""
                ul2 << li(
                    a(area.label, style="background:" + area.color + ";", href=get_url(area), class_="selectable" + s)
                )

        web.expires(60)
        web.header("Content-Type", "text/html; charset=utf-8")
        if self.title is None:
            self.title = "LDReg: " + self.area.label
        self.doc.head << title(self.title)
        self.doc.head << link(rel="stylesheet", type="text/css", href="/tables.css")
        self.doc.head << link(rel="stylesheet", type="text/css", href="/black.css")

        self.doc << self.header
        self.header << self.nav1

        d3 = div(self.col1, self.col2, self.col3, class_="colleft")
        self.doc << div(div(d3, class_="colmid"), class_="colmask threecol")

        self.doc << self.footer
        return self.doc
Exemplo n.º 4
0
 def GET(self, device_name):
     args, _ = templeton.handlers.get_request_parms()
     if args.get('cache'):
         # get state from a cache of all devices' state; this is used
         # for monitoring devices, so we don't pound the DB
         state = self.cache_get()[device_name]
         ttl = self.cache_expires - time.time()
         web.expires(datetime.timedelta(seconds=ttl))
         web.header('Cache-Control', 'public, max-age=%d' % int(ttl+1))
     else:
         # get the fresh state
         state = self.db.devices.get_machine_state(device_name)
     return { 'state' : state }
Exemplo n.º 5
0
        def GET(self):
            i = web.input(v=None)
            v = doc.md5()
            if v != i.v:
                raise web.seeother(web.changequery(v=v))

            if web.modified(etag=v):
                oneyear = 365 * 24 * 3600
                web.header("Content-Type", content_type)
                web.header("Cache-Control", "Public, max-age=%d" % oneyear)
                web.lastmodified(doc.last_modified)
                web.expires(oneyear)
                return delegate.RawText(doc.get_text())
Exemplo n.º 6
0
        def GET(self):
            i = web.input(v=None)
            v = doc.md5()
            if v != i.v:
                raise web.seeother(web.changequery(v=v))

            if web.modified(etag=v):
                oneyear = 365 * 24 * 3600
                web.header("Content-Type", content_type)
                web.header("Cache-Control", "Public, max-age=%d" % oneyear)
                web.lastmodified(doc.last_modified)
                web.expires(oneyear)
                return delegate.RawText(doc.get_text())
Exemplo n.º 7
0
 def GET(self, device_name):
     args, _ = templeton.handlers.get_request_parms()
     if args.get('cache'):
         # get state from a cache of all devices' state; this is used
         # for monitoring devices, so we don't pound the DB
         state = self.cache_get()[device_name]
         ttl = self.cache_expires - time.time()
         web.expires(datetime.timedelta(seconds=ttl))
         web.header('Cache-Control', 'public, max-age=%d' % int(ttl + 1))
     else:
         # get the fresh state
         state = self.db.devices.get_machine_state(device_name)
     return {'state': state}
Exemplo n.º 8
0
def cache_processor(handler):
    """cache and gzip processor. 
    Inspired by django.middleware.gzip.GZipMiddleware
    """
    ae = web.ctx.env.get('HTTP_ACCEPT_ENCODING', '')
    if config.cache and web.ctx.method in ["GET", "GET_json"] and re_accepts_gzip.search(ae):
        if web.ctx.path not in cache:
            data = handler()
            cache[web.ctx.fullpath] = compress(web.safestr(data))
        web.expires(3600) # one hour
        web.header('Content-Encoding', 'gzip')
        return cache[web.ctx.fullpath]
    else:
        return handler()
Exemplo n.º 9
0
    def GET(self, category, key, value, size):
        i = web.input(default="true")
        key = key.lower()
        
        def notfound():
            if config.default_image and i.default.lower() != "false" and not i.default.startswith('http://'):
                return read_file(config.default_image)
            elif i.default.startswith('http://'):
                raise web.seeother(i.default)
            else:
                raise web.notfound("")
                
        def redirect(id):
            size_part = size and ("-" + size) or ""
            url = "/%s/id/%s%s.jpg" % (category, id, size_part)
            
            query = web.ctx.env.get('QUERY_STRING')
            if query:
                url += '?' + query
            raise web.found(url)
        
        if key == 'isbn':
            value = value.replace("-", "").strip() # strip hyphens from ISBN
            # Disabling ratelimit as iptables is taking care of botnets.
            #value = self.ratelimit_query(category, key, value)
            value = self.query(category, key, value)
        elif key != 'id':
            value = self.query(category, key, value)
        
        d = value and self.get_details(value, size.lower())
        if not d:
            return notfound()
            
        # set cache-for-ever headers only when requested with ID
        if key == 'id':
            etag = "%s-%s" % (d.id, size.lower())
            if not web.modified(trim_microsecond(d.created), etag=etag):
                raise web.notmodified()

            web.header('Cache-Control', 'public')
            web.expires(100 * 365 * 24 * 3600) # this image is not going to expire in next 100 years.
        else:
            web.header('Cache-Control', 'public')
            web.expires(10*60) # Allow the client to cache the image for 10 mins to avoid further requests
        
        web.header('Content-Type', 'image/jpeg')
        try:
            return read_image(d, size)
        except IOError:
            raise web.notfound()
Exemplo n.º 10
0
def cache_processor(handler):
    """cache and gzip processor. 
    Inspired by django.middleware.gzip.GZipMiddleware
    """
    ae = web.ctx.env.get('HTTP_ACCEPT_ENCODING', '')
    if config.cache and web.ctx.method in ["GET", "GET_json"
                                           ] and re_accepts_gzip.search(ae):
        if web.ctx.path not in cache:
            data = handler()
            cache[web.ctx.fullpath] = compress(web.safestr(data))
        web.expires(3600)  # one hour
        web.header('Content-Encoding', 'gzip')
        return cache[web.ctx.fullpath]
    else:
        return handler()
Exemplo n.º 11
0
def expires(delta):
    """
    Specifies how long the response is to be cached by clients. The input must be
    of the duration variable type. For any other value the function will return
    and log an error.

    The function sets the two HTTP headers "Expires" and "Cache-Control"
    correctly. Expires is for HTTP/1.0 implementations, Cache-Control for 1.1. The
    effect is that the client knows that it can (but doesn't have to) cache the
    response for the duration period.
    
    Duration must be a datetime.timedelta object (a duration in servicegen)
    """
    delta_secs = delta.days * 86400 + delta.seconds
    web.expires(delta)
    web.header('Cache-Control', 'max-age=' + str(delta_secs))
Exemplo n.º 12
0
def serveStatic(filename):
    if ( filename.startswith("/") or
         filename.find("//") > -1 or
         filename.find("..") > -1 ):
        return notFound(filename)

    (filename, contentType) = getFileAndType(filename)
    
    web.header('Content-Type', contentType)
    web.expires(60)

    stream = open(filename, 'r')

    data = stream.read()
    stream.close()
    return data
Exemplo n.º 13
0
    def GET(self, category, key, value, size):
        i = web.input(default="true")
        key = key.lower()
        
        def notfound():
            if config.default_image and i.default.lower() != "false" and not i.default.startswith('http://'):
                return read_file(config.default_image)
            elif i.default.startswith('http://'):
                raise web.seeother(i.default)
            else:
                raise web.notfound("")
                
        def redirect(id):
            size_part = size and ("-" + size) or ""
            url = "/%s/id/%s%s.jpg" % (category, id, size_part)
            
            query = web.ctx.env.get('QUERY_STRING')
            if query:
                url += '?' + query
            raise web.found(url)
        
        if key != 'id':
            value = _query(category, key, value)
            if value is None:
                return notfound()
            else:
                return redirect(value)
        else:
            d = db.details(value)
            if not d:
                return notfound()
                
            etag = "%s-%s" % (d.id, size.lower())
            if not web.modified(trim_microsecond(d.created), etag=etag):
                raise web.notmodified()

            web.header('Cache-Control', 'public')
            web.expires(100 * 365 * 24 * 3600) # this image is not going to expire in next 100 years.
            web.header('Content-Type', 'image/jpeg')
            try:
                return read_image(d, size)
            except IOError:
                raise web.notfound()
Exemplo n.º 14
0
 def GET(self, category, key, value, size):
     i = web.input(default="true")
     key = key.lower()
     
     d = _query(category, key, value)
     if d:
         if key == 'id':
             etag = "%s-%s" % (d.id, size.lower())
             if not web.modified(trim_microsecond(d.created), etag=etag):
                 raise web.notmodified()
             web.header('Cache-Control', 'public')
             web.expires(100 * 365 * 24 * 3600) # this image is not going to expire in next 100 years.
             
         web.header('Content-Type', 'image/jpeg')
         return read_image(d, size)
     elif config.default_image and i.default.lower() != "false" and not i.default.startswith('http://'):
         return read_file(config.default_image)
     elif i.default.startswith('http://'):
         raise web.seeother(i.default)
     else:
         raise web.notfound("")
Exemplo n.º 15
0
 def GET(self, category, key, value, size):
     i = web.input(default="true")
     key = key.lower()
     
     d = _query(category, key, value)
     if d:
         if key == 'id':
             web.lastmodified(d.created)
             web.header('Cache-Control', 'public')
             web.expires(100 * 365 * 24 * 3600) # this image is not going to expire in next 100 years.
             
         web.header('Content-Type', 'image/jpeg')
         filename = _cache.get_image(d.id, size)
         if filename:
             return serve_file(filename)
     elif config.default_image and i.default.lower() != "false" and not i.default.startswith('http://'):
         return serve_file(config.default_image)
     elif i.default.startswith('http://'):
         raise web.seeother(i.default)
     else:
         raise web.notfound("")
Exemplo n.º 16
0
    def GET(self, category, key, value, size):
        i = web.input(default="true")
        key = key.lower()

        def is_valid_url(url):
            return url.startswith("http://") or url.startswith("https://")

        def notfound():
            if key in ["id", "olid"] and config.get("upstream_base_url"):
                # this is only used in development
                base = web.rstrips(config.upstream_base_url, "/")
                raise web.redirect(base + web.ctx.fullpath)
            elif config.default_image and i.default.lower() != "false" and not is_valid_url(i.default):
                return read_file(config.default_image)
            elif is_valid_url(i.default):
                raise web.seeother(i.default)
            else:
                raise web.notfound("")

        def redirect(id):
            size_part = size and ("-" + size) or ""
            url = "/%s/id/%s%s.jpg" % (category, id, size_part)

            query = web.ctx.env.get('QUERY_STRING')
            if query:
                url += '?' + query
            raise web.found(url)

        if key == 'isbn':
            value = value.replace("-", "").strip() # strip hyphens from ISBN
            # Disabling ratelimit as iptables is taking care of botnets.
            #value = self.ratelimit_query(category, key, value)
            value = self.query(category, key, value)

            # Redirect isbn requests to archive.org.
            # This will heavily reduce the load on coverstore server.
            # The max_coveritem_index config parameter specifies the latest
            # olcovers items uploaded to archive.org.
            if value and self.is_cover_in_cluster(value):
                url = zipview_url_from_id(int(value), size)
                raise web.found(url)
        elif key == 'ia':
            url = self.get_ia_cover_url(value, size)
            if url:
                raise web.found(url)
            else:
                value = None # notfound or redirect to default. handled later.
        elif key != 'id':
            value = self.query(category, key, value)

        if value and safeint(value) in config.blocked_covers:
            raise web.notfound()

        # redirect to archive.org cluster for large size and original images whenever possible
        if value and (size == "L" or size == "") and self.is_cover_in_cluster(value):
            url = zipview_url_from_id(int(value), size)
            raise web.found(url)

        d = value and self.get_details(value, size.lower())
        if not d:
            return notfound()

        # set cache-for-ever headers only when requested with ID
        if key == 'id':
            etag = "%s-%s" % (d.id, size.lower())
            if not web.modified(trim_microsecond(d.created), etag=etag):
                raise web.notmodified()

            web.header('Cache-Control', 'public')
            web.expires(100 * 365 * 24 * 3600) # this image is not going to expire in next 100 years.
        else:
            web.header('Cache-Control', 'public')
            web.expires(10*60) # Allow the client to cache the image for 10 mins to avoid further requests

        web.header('Content-Type', 'image/jpeg')
        try:
            return read_image(d, size)
        except IOError:
            raise web.notfound()
Exemplo n.º 17
0
 def GET(self, device_name):
     state = self.cache_get()[device_name]
     ttl = self.cache_expires - time.time()
     web.expires(datetime.timedelta(seconds=ttl))
     web.header('Cache-Control', 'public, max-age=%d' % int(ttl+1))
     return { 'state' : state }