def GET(self, category, key, value, size): i = web.input(default="true") key = key.lower() def notfound(): if config.default_image and i.default.lower( ) != "false" and not i.default.startswith('http://'): return read_file(config.default_image) elif i.default.startswith('http://'): raise web.seeother(i.default) else: raise web.notfound("") def redirect(id): size_part = size and ("-" + size) or "" url = "/%s/id/%s%s.jpg" % (category, id, size_part) query = web.ctx.env.get('QUERY_STRING') if query: url += '?' + query raise web.found(url) if key == 'isbn': value = value.replace("-", "").strip() # strip hyphens from ISBN # Disabling ratelimit as iptables is taking care of botnets. #value = self.ratelimit_query(category, key, value) value = self.query(category, key, value) elif key != 'id': value = self.query(category, key, value) d = value and db.details(value) if not d: return notfound() # set cache-for-ever headers only when requested with ID if key == 'id': etag = "%s-%s" % (d.id, size.lower()) if not web.modified(trim_microsecond(d.created), etag=etag): raise web.notmodified() web.header('Cache-Control', 'public') web.expires( 100 * 365 * 24 * 3600) # this image is not going to expire in next 100 years. else: web.header('Cache-Control', 'public') web.expires( 10 * 60 ) # Allow the client to cache the image for 10 mins to avoid further requests web.header('Content-Type', 'image/jpeg') try: return read_image(d, size) except IOError: raise web.notfound()
def GET(self, category, key, value, size): i = web.input(default="true") key = key.lower() def notfound(): if config.default_image and i.default.lower() != "false" and not i.default.startswith('http://'): return read_file(config.default_image) elif i.default.startswith('http://'): raise web.seeother(i.default) else: raise web.notfound("") def redirect(id): size_part = size and ("-" + size) or "" url = "/%s/id/%s%s.jpg" % (category, id, size_part) query = web.ctx.env.get('QUERY_STRING') if query: url += '?' + query raise web.found(url) if key == 'isbn': value = value.replace("-", "").strip() # strip hyphens from ISBN # Disabling ratelimit as iptables is taking care of botnets. #value = self.ratelimit_query(category, key, value) value = self.query(category, key, value) elif key != 'id': value = self.query(category, key, value) d = value and self.get_details(value, size.lower()) if not d: return notfound() # set cache-for-ever headers only when requested with ID if key == 'id': etag = "%s-%s" % (d.id, size.lower()) if not web.modified(trim_microsecond(d.created), etag=etag): raise web.notmodified() web.header('Cache-Control', 'public') web.expires(100 * 365 * 24 * 3600) # this image is not going to expire in next 100 years. else: web.header('Cache-Control', 'public') web.expires(10*60) # Allow the client to cache the image for 10 mins to avoid further requests web.header('Content-Type', 'image/jpeg') try: return read_image(d, size) except IOError: raise web.notfound()
def GET(self, category, key, value, size): i = web.input(default="true") key = key.lower() def notfound(): if config.default_image and i.default.lower() != "false" and not i.default.startswith('http://'): return read_file(config.default_image) elif i.default.startswith('http://'): raise web.seeother(i.default) else: raise web.notfound("") def redirect(id): size_part = size and ("-" + size) or "" url = "/%s/id/%s%s.jpg" % (category, id, size_part) query = web.ctx.env.get('QUERY_STRING') if query: url += '?' + query raise web.found(url) if key != 'id': value = _query(category, key, value) if value is None: return notfound() else: return redirect(value) else: d = db.details(value) if not d: return notfound() etag = "%s-%s" % (d.id, size.lower()) if not web.modified(trim_microsecond(d.created), etag=etag): raise web.notmodified() web.header('Cache-Control', 'public') web.expires(100 * 365 * 24 * 3600) # this image is not going to expire in next 100 years. web.header('Content-Type', 'image/jpeg') try: return read_image(d, size) except IOError: raise web.notfound()
def GET(self, category, key, value, size): i = web.input(default="true") key = key.lower() d = _query(category, key, value) if d: if key == 'id': etag = "%s-%s" % (d.id, size.lower()) if not web.modified(trim_microsecond(d.created), etag=etag): raise web.notmodified() web.header('Cache-Control', 'public') web.expires(100 * 365 * 24 * 3600) # this image is not going to expire in next 100 years. web.header('Content-Type', 'image/jpeg') return read_image(d, size) elif config.default_image and i.default.lower() != "false" and not i.default.startswith('http://'): return read_file(config.default_image) elif i.default.startswith('http://'): raise web.seeother(i.default) else: raise web.notfound("")
def GET(self, category, key, value, size): i = web.input(default="true") key = key.lower() def is_valid_url(url): return url.startswith("http://") or url.startswith("https://") def notfound(): if key in ["id", "olid"] and config.get("upstream_base_url"): # this is only used in development base = web.rstrips(config.upstream_base_url, "/") raise web.redirect(base + web.ctx.fullpath) elif config.default_image and i.default.lower() != "false" and not is_valid_url(i.default): return read_file(config.default_image) elif is_valid_url(i.default): raise web.seeother(i.default) else: raise web.notfound("") def redirect(id): size_part = size and ("-" + size) or "" url = "/%s/id/%s%s.jpg" % (category, id, size_part) query = web.ctx.env.get('QUERY_STRING') if query: url += '?' + query raise web.found(url) if key == 'isbn': value = value.replace("-", "").strip() # strip hyphens from ISBN # Disabling ratelimit as iptables is taking care of botnets. #value = self.ratelimit_query(category, key, value) value = self.query(category, key, value) # Redirect isbn requests to archive.org. # This will heavily reduce the load on coverstore server. # The max_coveritem_index config parameter specifies the latest # olcovers items uploaded to archive.org. if value and self.is_cover_in_cluster(value): url = zipview_url_from_id(int(value), size) raise web.found(url) elif key == 'ia': url = self.get_ia_cover_url(value, size) if url: raise web.found(url) else: value = None # notfound or redirect to default. handled later. elif key != 'id': value = self.query(category, key, value) if value and safeint(value) in config.blocked_covers: raise web.notfound() # redirect to archive.org cluster for large size and original images whenever possible if value and (size == "L" or size == "") and self.is_cover_in_cluster(value): url = zipview_url_from_id(int(value), size) raise web.found(url) d = value and self.get_details(value, size.lower()) if not d: return notfound() # set cache-for-ever headers only when requested with ID if key == 'id': etag = "%s-%s" % (d.id, size.lower()) if not web.modified(trim_microsecond(d.created), etag=etag): raise web.notmodified() web.header('Cache-Control', 'public') web.expires(100 * 365 * 24 * 3600) # this image is not going to expire in next 100 years. else: web.header('Cache-Control', 'public') web.expires(10*60) # Allow the client to cache the image for 10 mins to avoid further requests web.header('Content-Type', 'image/jpeg') try: return read_image(d, size) except IOError: raise web.notfound()