def POST(self, category): i = web.input(id=None, redirect_url=None) redirect_url = i.redirect_url or web.ctx.get('HTTP_REFERRER') id = i.id and safeint(i.id, None) if id: db.touch(id) raise web.seeother(redirect_url) else: return 'no such id: %s' % id
def GET(self, category): i = web.input(olid=None, offset=0, limit=10, callback=None, details="false", cmd=None) offset = safeint(i.offset, 0) limit = safeint(i.limit, 10) details = i.details.lower() == "true" if limit > 100: limit = 100 if i.olid and ',' in i.olid: i.olid = i.olid.split(',') result = db.query(category, i.olid, offset=offset, limit=limit) if i.cmd == "ids": result = dict((r.olid, r.id) for r in result) elif not details: result = [r.id for r in result] else: def process(r): return { 'id': r.id, 'olid': r.olid, 'created': r.created.isoformat(), 'last_modified': r.last_modified.isoformat(), 'source_url': r.source_url, 'width': r.width, 'height': r.height } result = [process(r) for r in result] json = simplejson.dumps(result) web.header('Content-Type', 'text/javascript') if i.callback: return "%s(%s);" % (i.callback, json) else: return json
def POST(self, category): i = web.input(id=None, redirect_url=None) redirect_url = i.redirect_url id = i.id and safeint(i.id, None) if id: db.delete(id) if redirect_url: raise web.seeother(redirect_url) else: return 'cover has been deleted successfully.' else: return 'no such id: %s' % id
def GET(self, category): i = web.input(olid=None, offset=0, limit=10, callback=None, details="false", cmd=None) offset = safeint(i.offset, 0) limit = safeint(i.limit, 10) details = i.details.lower() == "true" if limit > 100: limit = 100 if i.olid and "," in i.olid: i.olid = i.olid.split(",") result = db.query(category, i.olid, offset=offset, limit=limit) if i.cmd == "ids": result = dict((r.olid, r.id) for r in result) elif not details: result = [r.id for r in result] else: def process(r): return { "id": r.id, "olid": r.olid, "created": r.created.isoformat(), "last_modified": r.last_modified.isoformat(), "source_url": r.source_url, "width": r.width, "height": r.height, } result = [process(r) for r in result] json = simplejson.dumps(result) web.header("Content-Type", "text/javascript") if i.callback: return "%s(%s);" % (i.callback, json) else: return json
def _query(category, key, value): if key == 'id': result = db.details(safeint(value)) return result and result[0] or None elif key == 'olid': result = db.query(category, value, limit=1) return result and result[0] or None else: if category == 'b' and key in ['isbn', 'lccn', 'oclc', 'ocaid']: if key == 'isbn': if len(value.replace('-', '')) == 13: key = 'isbn_13' else: key = 'isbn_10' if key == 'oclc': key = 'oclc_numbers' olids = ol_things(key, value) if olids: return _query(category, 'olid', olids) return None
def GET(self, category, key, value, size): i = web.input(default="true") key = key.lower() def is_valid_url(url): return url.startswith("http://") or url.startswith("https://") def notfound(): if key in ["id", "olid"] and config.get("upstream_base_url"): # this is only used in development base = web.rstrips(config.upstream_base_url, "/") raise web.redirect(base + web.ctx.fullpath) elif config.default_image and i.default.lower() != "false" and not is_valid_url(i.default): return read_file(config.default_image) elif is_valid_url(i.default): raise web.seeother(i.default) else: raise web.notfound("") def redirect(id): size_part = size and ("-" + size) or "" url = "/%s/id/%s%s.jpg" % (category, id, size_part) query = web.ctx.env.get('QUERY_STRING') if query: url += '?' + query raise web.found(url) if key == 'isbn': value = value.replace("-", "").strip() # strip hyphens from ISBN # Disabling ratelimit as iptables is taking care of botnets. #value = self.ratelimit_query(category, key, value) value = self.query(category, key, value) # Redirect isbn requests to archive.org. # This will heavily reduce the load on coverstore server. # The max_coveritem_index config parameter specifies the latest # olcovers items uploaded to archive.org. if value and self.is_cover_in_cluster(value): url = zipview_url_from_id(int(value), size) raise web.found(url) elif key == 'ia': url = self.get_ia_cover_url(value, size) if url: raise web.found(url) else: value = None # notfound or redirect to default. handled later. elif key != 'id': value = self.query(category, key, value) if value and safeint(value) in config.blocked_covers: raise web.notfound() # redirect to archive.org cluster for large size and original images whenever possible if value and (size == "L" or size == "") and self.is_cover_in_cluster(value): url = zipview_url_from_id(int(value), size) raise web.found(url) d = value and self.get_details(value, size.lower()) if not d: return notfound() # set cache-for-ever headers only when requested with ID if key == 'id': etag = "%s-%s" % (d.id, size.lower()) if not web.modified(trim_microsecond(d.created), etag=etag): raise web.notmodified() web.header('Cache-Control', 'public') web.expires(100 * 365 * 24 * 3600) # this image is not going to expire in next 100 years. else: web.header('Cache-Control', 'public') web.expires(10*60) # Allow the client to cache the image for 10 mins to avoid further requests web.header('Content-Type', 'image/jpeg') try: return read_image(d, size) except IOError: raise web.notfound()