def cache_valid(cache_time): last_time_str = web.ctx.env.get('HTTP_IF_MODIFIED_SINCE', '') last_time = web.net.parsehttpdate(last_time_str) now = datetime.now() if last_time and last_time + timedelta(seconds=cache_time) > now: web.notmodified() return True else: web.lastmodified(now) return False
def wrapped_f(*args): last_time_str = web.ctx.env.get('HTTP_IF_MODIFIED_SINCE', '') last_time = web.net.parsehttpdate(last_time_str) now = datetime.datetime.now() if last_time and last_time + datetime.timedelta(seconds = seconds) > now: web.notmodified() else: web.lastmodified(now) web.header('Cache-Control', 'max-age='+str(seconds)) yield f(*args)
def wrapped_f(*args): last_time_str = web.ctx.env.get('HTTP_IF_MODIFIED_SINCE', '') last_time = web.net.parsehttpdate(last_time_str) now = datetime.datetime.now() if last_time and\ last_time + datetime.timedelta(seconds = seconds) > now: web.notmodified() else: web.lastmodified(now) web.header('Cache-Control', 'max-age='+str(seconds)) yield f(*args)
def GET(self): ''' render = web.template.render("templates") return render.server() ''' currenttime = int(time.time()) if currenttime%self.waittime == 0: '''web.header("Content-type","image/x-png")''' return serverImage.createImage() else: web.notmodified()
def cache_valid(path): return False last_time_str = web.ctx.env.get('HTTP_IF_MODIFIED_SINCE', '') last_time = web.net.parsehttpdate(last_time_str) if last_time: mtime = os.path.getmtime(path) if last_time < mtime: web.notmodified() return True web.lastmodified(datetime.datetime.now()) return False
def cache_valid(self): #return False last_time_str = web.ctx.env.get('HTTP_IF_MODIFIED_SINCE', '') last_time = web.net.parsehttpdate(last_time_str) if last_time: nowtick = int(time.time()) tick = time.mktime(last_time.timetuple()) nexttick = tick+(300-tick % 300) #print "weiwei"+datetime.datetime.fromtimestamp(nexttick).strftime("%m-%d %H %M %S") if nexttick > nowtick: web.notmodified() return True web.lastmodified(datetime.datetime.now()) return False
def GET(self, theme, filename): mime_type, encoding = mimetypes.guess_type(filename) if not mime_type: mime_type = 'application/octet-stream' memcache_key = 'theme:%s:%s' % (theme, filename) body = memcache.get(memcache_key) if not body: theme_file_query = ThemeFile.all().filter('theme_name =', theme) theme_file_query.filter('filename =', filename) theme_file_query.filter('filetype =', 'file') f = theme_file_query.get() if not f: raise web.notfound() body = str(f.filecontent) memcache.set(memcache_key, body) etag = str(binascii.crc32(body)) self.SetCacheHeader(etag) match = web.ctx.env.get('HTTP_IF_NONE_MATCH') if match and match == etag: raise web.notmodified() web.header('Content-Type', mime_type) return body
def GET(self, path): _debug_toolbar_path = os.path.dirname(__file__) abspath = os.path.join(_debug_toolbar_path, path) stat_result = os.stat(abspath) modified = datetime.datetime.fromtimestamp(stat_result[stat.ST_MTIME]) web.header("Last-Modified", modified) mime_type, encoding = mimetypes.guess_type(abspath) if mime_type: web.header("Content-Type", mime_type) cache_time = 86400 * 365 * 10 web.header("Expires", datetime.datetime.now() + \ datetime.timedelta(seconds=cache_time)) web.header("Cache-Control", "max-age=%s" % cache_time) ims_value = web.ctx.env.get("HTTP_IF_MODIFIED_SINCE") if ims_value is not None: since = datetime.datetime.strptime(ims_value, '%Y-%m-%d %H:%M:%S') if since >= modified: raise web.notmodified() with open(abspath, "rb") as f: data = f.read() hasher = hashlib.sha1() hasher.update(data) web.header("Etag", '"%s"' % hasher.hexdigest()) return data
def GET(self, path): import mimetypes import stat import hashlib abspath = os.path.join(settings.STATIC_DIR, path) stat_result = os.stat(abspath) modified = datetime.datetime.fromtimestamp(stat_result[stat.ST_MTIME]) web.header("Last-Modified", modified.strftime('%a, %d %b %Y %H:%M:%S GMT')) mime_type, encoding = mimetypes.guess_type(abspath) if mime_type: web.header("Content-Type", mime_type) # 缓存10年 cache_time = 86400 * 365 * 10 web.header( "Expires", datetime.datetime.now() + datetime.timedelta(seconds=cache_time)) web.header("Cache-Control", "max-age=%s" % cache_time) ims_value = web.ctx.env.get("HTTP_IF_MODIFIED_SINCE") if ims_value is not None: # ie的ims值不标准,所以导致不能正常产生缓存,这里解决 # IE的是Sat, 02 Feb 2013 14:44:34 GMT; length=4285 # 标准的为Sat, 02 Feb 2013 14:44:34 GMT stupid = ims_value.find(';') if stupid != -1: ims_value = ims_value[:stupid] since = datetime.datetime.strptime(ims_value, '%a, %d %b %Y %H:%M:%S %Z') if since >= modified: # 如果是调试模式,那么强制加载所有非第三方js文件 if not (settings.DEBUG and abspath.endswith('.js') and '3rd' not in abspath): raise web.notmodified() with open(abspath, "rb") as f: data = f.read() hasher = hashlib.sha1() hasher.update(data) web.header("Etag", '"%s"' % hasher.hexdigest()) # 合并js文件[第三方库不压缩] if abspath.endswith('.js') and not '3rd' in abspath: libs = re.findall(r'(.*?@import "([^"]+\.js)".*?)', data) for line, lib in libs: lib = os.path.join(settings.STATIC_DIR, lib) data = data.replace(line, file(lib).read()) # mangle: 局部变量压缩 # mangle_toplevel: 整个文件压缩[即函数名也压缩] #data = slimit.minify(data, mangle=True) if not settings.DEBUG: import slimit data = slimit.minify(data, mangle=True, mangle_toplevel=True) return data
def GET(self, path): import mimetypes import stat import hashlib abspath = os.path.join(settings.STATIC_DIR, path) stat_result = os.stat(abspath) modified = datetime.datetime.fromtimestamp(stat_result[stat.ST_MTIME]) web.header( "Last-Modified", modified.strftime('%a, %d %b %Y %H:%M:%S GMT')) mime_type, encoding = mimetypes.guess_type(abspath) if mime_type: web.header("Content-Type", mime_type) # 缓存10年 cache_time = 86400 * 365 * 10 web.header("Expires", datetime.datetime.now() + datetime.timedelta(seconds=cache_time)) web.header("Cache-Control", "max-age=%s" % cache_time) ims_value = web.ctx.env.get("HTTP_IF_MODIFIED_SINCE") if ims_value is not None: # ie的ims值不标准,所以导致不能正常产生缓存,这里解决 # IE的是Sat, 02 Feb 2013 14:44:34 GMT; length=4285 # 标准的为Sat, 02 Feb 2013 14:44:34 GMT stupid = ims_value.find(';') if stupid != -1: ims_value = ims_value[:stupid] since = datetime.datetime.strptime( ims_value, '%a, %d %b %Y %H:%M:%S %Z') if since >= modified: # 如果是调试模式,那么强制加载所有非第三方js文件 if not (settings.DEBUG and abspath.endswith('.js') and '3rd' not in abspath): raise web.notmodified() with open(abspath, "rb") as f: data = f.read() hasher = hashlib.sha1() hasher.update(data) web.header("Etag", '"%s"' % hasher.hexdigest()) # 合并js文件[第三方库不压缩] if abspath.endswith('.js') and not '3rd' in abspath: libs = re.findall(r'(.*?@import "([^"]+\.js)".*?)', data) for line, lib in libs: lib = os.path.join(settings.STATIC_DIR, lib) data = data.replace(line, file(lib).read()) # mangle: 局部变量压缩 # mangle_toplevel: 整个文件压缩[即函数名也压缩] #data = slimit.minify(data, mangle=True) if not settings.DEBUG: import slimit data = slimit.minify( data, mangle=True, mangle_toplevel=True) return data
def GET(self, category, key, value, size): i = web.input(default="true") key = key.lower() def notfound(): if config.default_image and i.default.lower( ) != "false" and not i.default.startswith('http://'): return read_file(config.default_image) elif i.default.startswith('http://'): raise web.seeother(i.default) else: raise web.notfound("") def redirect(id): size_part = size and ("-" + size) or "" url = "/%s/id/%s%s.jpg" % (category, id, size_part) query = web.ctx.env.get('QUERY_STRING') if query: url += '?' + query raise web.found(url) if key == 'isbn': value = value.replace("-", "").strip() # strip hyphens from ISBN # Disabling ratelimit as iptables is taking care of botnets. #value = self.ratelimit_query(category, key, value) value = self.query(category, key, value) elif key != 'id': value = self.query(category, key, value) d = value and db.details(value) if not d: return notfound() # set cache-for-ever headers only when requested with ID if key == 'id': etag = "%s-%s" % (d.id, size.lower()) if not web.modified(trim_microsecond(d.created), etag=etag): raise web.notmodified() web.header('Cache-Control', 'public') web.expires( 100 * 365 * 24 * 3600) # this image is not going to expire in next 100 years. else: web.header('Cache-Control', 'public') web.expires( 10 * 60 ) # Allow the client to cache the image for 10 mins to avoid further requests web.header('Content-Type', 'image/jpeg') try: return read_image(d, size) except IOError: raise web.notfound()
def redirect(self, url, status=303): if status == 302: return web.found(url) elif status == 303: return web.seeother(url) elif status == 304: return web.notmodified(url) elif status == 306: return web.tempredirect(url) else: return web.seeother(url)
def wrapper(*args, **kwds): rsp_data = func(*args, **kwds) etag = '"%s"' % md5(rsp_data.encode('utf-8')).hexdigest() #格式参见:<http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.26> n = set([x.strip().lstrip('W/') for x in web.ctx.env.get('HTTP_IF_NONE_MATCH', '').split(',')]) if etag in n: raise web.notmodified() else: web.header('ETag', etag) web.header('Cache-Control', 'no-cache') return rsp_data
def GET(self, path, filename): try: etag = str( os.path.getmtime(dirname + 'files/' + path + '/' + filename)) last_modified = datetime.datetime.fromtimestamp( os.path.getmtime(dirname + 'files/' + path + '/' + filename)) if web.modified(last_modified, etag): f = open(dirname + 'files/' + path + '/' + filename, 'rb') web.header('Content-type', mimetypes.guess_type(filename)[0]) return f.read() else: return web.notmodified() except (FileNotFoundError, OSError): return web.notfound()
def wrapper(*args, **kwds): rsp_data = func(*args, **kwds) if type(rsp_data) is unicode: etag = '"%s"' % md5(rsp_data.encode('utf-8', 'ignore')).hexdigest() else: etag = '"%s"' % md5(rsp_data).hexdigest() #格式参见:<http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.26> n = set([x.strip().lstrip('W/') for x in web.ctx.env.get('HTTP_IF_NONE_MATCH', '').split(',')]) if etag in n: raise web.notmodified() else: web.header('ETag', etag) web.header('Cache-Control', 'no-cache') return rsp_data
def GET(self, category, key, value, size): i = web.input(default="true") key = key.lower() def notfound(): if config.default_image and i.default.lower() != "false" and not i.default.startswith('http://'): return read_file(config.default_image) elif i.default.startswith('http://'): raise web.seeother(i.default) else: raise web.notfound("") def redirect(id): size_part = size and ("-" + size) or "" url = "/%s/id/%s%s.jpg" % (category, id, size_part) query = web.ctx.env.get('QUERY_STRING') if query: url += '?' + query raise web.found(url) if key == 'isbn': value = value.replace("-", "").strip() # strip hyphens from ISBN # Disabling ratelimit as iptables is taking care of botnets. #value = self.ratelimit_query(category, key, value) value = self.query(category, key, value) elif key != 'id': value = self.query(category, key, value) d = value and self.get_details(value, size.lower()) if not d: return notfound() # set cache-for-ever headers only when requested with ID if key == 'id': etag = "%s-%s" % (d.id, size.lower()) if not web.modified(trim_microsecond(d.created), etag=etag): raise web.notmodified() web.header('Cache-Control', 'public') web.expires(100 * 365 * 24 * 3600) # this image is not going to expire in next 100 years. else: web.header('Cache-Control', 'public') web.expires(10*60) # Allow the client to cache the image for 10 mins to avoid further requests web.header('Content-Type', 'image/jpeg') try: return read_image(d, size) except IOError: raise web.notfound()
def GET(self): """ Port of evercookie php simple cache code. """ try: web.header("Content-Type", "image/png") web.header("Last-Modified", "Wed, 30 Jun 2010 21:36:48 GMT") web.header("Expires", "Tue, 31 Dec 2030 23:30:45 GMT") web.header("Cache-Control", "private, max-age=630720000") return web.cookies().evercookie_cache # no cookie except: web.header("Content-Type", "image/png") raise web.notmodified()
def GET(self, name): import binascii theme = Theme.get_by_key_name(name) screenshot = str(theme.screenshot) etag = str(binascii.crc32(screenshot)) match = web.ctx.env.get('HTTP_IF_NONE_MATCH') if match and match == etag: raise web.notmodified() web.header('ETag', etag) web.header('Content-Type', 'image/png') return screenshot
def GET(self, path): import mimetypes import stat import hashlib import os import settings import datetime abspath = os.path.join(settings.STATIC_DIR, path) stat_result = os.stat(abspath) modified = datetime.datetime.fromtimestamp(stat_result[stat.ST_MTIME]) web.header( "Last-Modified", modified.strftime('%a, %d %b %Y %H:%M:%S GMT')) mime_type, encoding = mimetypes.guess_type(abspath) if mime_type: web.header("Content-Type", mime_type) # 缓存1年 cache_time = 86400 * 365 * 1 web.header("Expires", datetime.datetime.now() + datetime.timedelta(seconds=cache_time)) web.header("Cache-Control", "max-age=%s" % cache_time) ims_value = web.ctx.env.get("HTTP_IF_MODIFIED_SINCE") if ims_value is not None: # ie的ims值不标准,所以导致不能正常产生缓存,这里解决 # IE的是Sat, 02 Feb 2013 14:44:34 GMT; length=4285 # 标准的为Sat, 02 Feb 2013 14:44:34 GMT stupid = ims_value.find(';') if stupid != -1: ims_value = ims_value[:stupid] since = datetime.datetime.strptime( ims_value, '%a, %d %b %Y %H:%M:%S %Z') if since >= modified: # 如果是调试模式,那么强制加载所有非第三方js文件 if not (settings.DEBUG and abspath.endswith('.js') and '3rd' not in abspath): raise web.notmodified() with open(abspath, "rb") as f: data = f.read() hasher = hashlib.sha1() hasher.update(data) web.header("Etag", '"%s"' % hasher.hexdigest()) if settings.GZIP_STATIC_FILE: data = gzipData(data) return data
def GET(self, key=''): etag = get_header('HTTP_IF_NONE_MATCH') if key in self.kvdb: if etag == key: raise web.notmodified() else: attrs = self.kvdb[key] if not 'viewcount' in attrs: attrs['viewcount'] = 0 attrs['viewcount'] = attrs['viewcount']+1 self.kvdb[key] = attrs web.header('View-Count', attrs['viewcount']) web.header('Content-Type', attrs['Content-Type']) web.header('ETag', key) web.header('Content-Disposition', 'filename="%s"' % attrs['filename']) return self.kvdbval[key] else: return 'no thing'
def GET(self, category, key, value, size): i = web.input(default="true") key = key.lower() def notfound(): if config.default_image and i.default.lower() != "false" and not i.default.startswith('http://'): return read_file(config.default_image) elif i.default.startswith('http://'): raise web.seeother(i.default) else: raise web.notfound("") def redirect(id): size_part = size and ("-" + size) or "" url = "/%s/id/%s%s.jpg" % (category, id, size_part) query = web.ctx.env.get('QUERY_STRING') if query: url += '?' + query raise web.found(url) if key != 'id': value = _query(category, key, value) if value is None: return notfound() else: return redirect(value) else: d = db.details(value) if not d: return notfound() etag = "%s-%s" % (d.id, size.lower()) if not web.modified(trim_microsecond(d.created), etag=etag): raise web.notmodified() web.header('Cache-Control', 'public') web.expires(100 * 365 * 24 * 3600) # this image is not going to expire in next 100 years. web.header('Content-Type', 'image/jpeg') try: return read_image(d, size) except IOError: raise web.notfound()
def GET(self, category, key, value, size): i = web.input(default="true") key = key.lower() d = _query(category, key, value) if d: if key == 'id': etag = "%s-%s" % (d.id, size.lower()) if not web.modified(trim_microsecond(d.created), etag=etag): raise web.notmodified() web.header('Cache-Control', 'public') web.expires(100 * 365 * 24 * 3600) # this image is not going to expire in next 100 years. web.header('Content-Type', 'image/jpeg') return read_image(d, size) elif config.default_image and i.default.lower() != "false" and not i.default.startswith('http://'): return read_file(config.default_image) elif i.default.startswith('http://'): raise web.seeother(i.default) else: raise web.notfound("")
def static_file_handler(static_dir, path): '''静态文件改变了就重新加载,否则读取缓存''' import mimetypes import stat import hashlib abspath = os.path.join(static_dir, path) stat_result = os.stat(abspath) modified = datetime.datetime.fromtimestamp(stat_result[stat.ST_MTIME]) web.header( "Last-Modified", modified.strftime('%a, %d %b %Y %H:%M:%S GMT')) mime_type, encoding = mimetypes.guess_type(abspath) if mime_type: web.header("Content-Type", mime_type) # 缓存N年 N = .5 cache_time = 86400 * 365 * N web.header("Expires", datetime.datetime.now() + datetime.timedelta(seconds=cache_time)) web.header("Cache-Control", "max-age=%s" % cache_time) ims_value = web.ctx.env.get("HTTP_IF_MODIFIED_SINCE") if ims_value is not None: # ie的ims值不标准,所以导致不能正常产生缓存,这里解决 # IE的是Sat, 02 Feb 2013 14:44:34 GMT; length=4285 # 标准的为Sat, 02 Feb 2013 14:44:34 GMT ims_value = ims_value.split(';')[0] since = datetime.datetime.strptime( ims_value, '%a, %d %b %Y %H:%M:%S %Z') if since >= modified: raise web.notmodified() with open(abspath, "rb") as f: data = f.read() hasher = hashlib.sha1() hasher.update(data) web.header("Etag", '"%s"' % hasher.hexdigest()) return data
def GET(self): """ Port of evercookie php png code. """ try: ec_png = web.cookies().evercookie_png rgb = tuple([ord(x) for x in ec_png]) i = Image.new("RGB", (200, 1)) px = i.load() px[0, 0] = rgb sio = StringIO.StringIO() i.save(sio, "PNG") web.header("Content-Type", "image/png") web.header("Last-Modified", "Wed, 30 Jun 2010 21:36:48 GMT") web.header("Expires", "Tue, 31 Dec 2030 23:30:45 GMT") web.header("Cache-Control", "private, max-age=630720000") return sio.getvalue() except Exception as e: # no cookie found, force a read from the cache raise web.notmodified()
def GET(self, category, key, value, size): i = web.input(default="true") key = key.lower() def is_valid_url(url): return url.startswith("http://") or url.startswith("https://") def notfound(): if key in ["id", "olid"] and config.get("upstream_base_url"): # this is only used in development base = web.rstrips(config.upstream_base_url, "/") raise web.redirect(base + web.ctx.fullpath) elif config.default_image and i.default.lower() != "false" and not is_valid_url(i.default): return read_file(config.default_image) elif is_valid_url(i.default): raise web.seeother(i.default) else: raise web.notfound("") def redirect(id): size_part = size and ("-" + size) or "" url = "/%s/id/%s%s.jpg" % (category, id, size_part) query = web.ctx.env.get('QUERY_STRING') if query: url += '?' + query raise web.found(url) if key == 'isbn': value = value.replace("-", "").strip() # strip hyphens from ISBN # Disabling ratelimit as iptables is taking care of botnets. #value = self.ratelimit_query(category, key, value) value = self.query(category, key, value) # Redirect isbn requests to archive.org. # This will heavily reduce the load on coverstore server. # The max_coveritem_index config parameter specifies the latest # olcovers items uploaded to archive.org. if value and self.is_cover_in_cluster(value): url = zipview_url_from_id(int(value), size) raise web.found(url) elif key == 'ia': url = self.get_ia_cover_url(value, size) if url: raise web.found(url) else: value = None # notfound or redirect to default. handled later. elif key != 'id': value = self.query(category, key, value) if value and safeint(value) in config.blocked_covers: raise web.notfound() # redirect to archive.org cluster for large size and original images whenever possible if value and (size == "L" or size == "") and self.is_cover_in_cluster(value): url = zipview_url_from_id(int(value), size) raise web.found(url) d = value and self.get_details(value, size.lower()) if not d: return notfound() # set cache-for-ever headers only when requested with ID if key == 'id': etag = "%s-%s" % (d.id, size.lower()) if not web.modified(trim_microsecond(d.created), etag=etag): raise web.notmodified() web.header('Cache-Control', 'public') web.expires(100 * 365 * 24 * 3600) # this image is not going to expire in next 100 years. else: web.header('Cache-Control', 'public') web.expires(10*60) # Allow the client to cache the image for 10 mins to avoid further requests web.header('Content-Type', 'image/jpeg') try: return read_image(d, size) except IOError: raise web.notfound()