def GET(self, category, key, value, size): i = web.input(default="true") key = key.lower() def notfound(): if config.default_image and i.default.lower( ) != "false" and not i.default.startswith('http://'): return read_file(config.default_image) elif i.default.startswith('http://'): raise web.seeother(i.default) else: raise web.notfound("") def redirect(id): size_part = size and ("-" + size) or "" url = "/%s/id/%s%s.jpg" % (category, id, size_part) query = web.ctx.env.get('QUERY_STRING') if query: url += '?' + query raise web.found(url) if key == 'isbn': value = value.replace("-", "").strip() # strip hyphens from ISBN # Disabling ratelimit as iptables is taking care of botnets. #value = self.ratelimit_query(category, key, value) value = self.query(category, key, value) elif key != 'id': value = self.query(category, key, value) d = value and db.details(value) if not d: return notfound() # set cache-for-ever headers only when requested with ID if key == 'id': etag = "%s-%s" % (d.id, size.lower()) if not web.modified(trim_microsecond(d.created), etag=etag): raise web.notmodified() web.header('Cache-Control', 'public') web.expires( 100 * 365 * 24 * 3600) # this image is not going to expire in next 100 years. else: web.header('Cache-Control', 'public') web.expires( 10 * 60 ) # Allow the client to cache the image for 10 mins to avoid further requests web.header('Content-Type', 'image/jpeg') try: return read_image(d, size) except IOError: raise web.notfound()
def GET(self, category, key, value, size): i = web.input(default="true") key = key.lower() def notfound(): if config.default_image and i.default.lower() != "false" and not i.default.startswith("http://"): return read_file(config.default_image) elif i.default.startswith("http://"): raise web.seeother(i.default) else: raise web.notfound("") def redirect(id): size_part = size and ("-" + size) or "" url = "/%s/id/%s%s.jpg" % (category, id, size_part) query = web.ctx.env.get("QUERY_STRING") if query: url += "?" + query raise web.found(url) if key == "isbn": value = value.replace("-", "").strip() # strip hyphens from ISBN # Disabling ratelimit as iptables is taking care of botnets. # value = self.ratelimit_query(category, key, value) value = self.query(category, key, value) elif key != "id": value = self.query(category, key, value) d = value and db.details(value) if not d: return notfound() # set cache-for-ever headers only when requested with ID if key == "id": etag = "%s-%s" % (d.id, size.lower()) if not web.modified(trim_microsecond(d.created), etag=etag): raise web.notmodified() web.header("Cache-Control", "public") web.expires(100 * 365 * 24 * 3600) # this image is not going to expire in next 100 years. else: web.header("Cache-Control", "public") web.expires(10 * 60) # Allow the client to cache the image for 10 mins to avoid further requests web.header("Content-Type", "image/jpeg") try: return read_image(d, size) except IOError: raise web.notfound()
def GET(self, category, key, value, size): i = web.input(default="true") key = key.lower() def notfound(): if config.default_image and i.default.lower() != "false" and not i.default.startswith('http://'): return read_file(config.default_image) elif i.default.startswith('http://'): raise web.seeother(i.default) else: raise web.notfound("") def redirect(id): size_part = size and ("-" + size) or "" url = "/%s/id/%s%s.jpg" % (category, id, size_part) query = web.ctx.env.get('QUERY_STRING') if query: url += '?' + query raise web.found(url) if key != 'id': value = _query(category, key, value) if value is None: return notfound() d = db.details(value) if not d: return notfound() # set cache-for-ever headers only when requested with ID if key == 'id': etag = "%s-%s" % (d.id, size.lower()) if not web.modified(trim_microsecond(d.created), etag=etag): raise web.notmodified() web.header('Cache-Control', 'public') web.expires(100 * 365 * 24 * 3600) # this image is not going to expire in next 100 years. else: web.header('Cache-Control', 'public') web.expires(10*60) # Allow the client to cache the image for 10 mins to avoid further requests web.header('Content-Type', 'image/jpeg') try: return read_image(d, size) except IOError: raise web.notfound()
def get_details(self, coverid, size=""): try: coverid = int(coverid) except ValueError: return None # Use tar index if available to avoid db query. We have 0-6M images in tar balls. if isinstance(coverid, int) and coverid < 6000000 and size in "sml": path = self.get_tar_filename(coverid, size) if path: if size: key = "filename_%s" % size else: key = "filename" return web.storage({"id": coverid, key: path, "created": datetime.datetime(2010, 1, 1)}) return db.details(coverid)
def GET(self, category, key, value): d = _query(category, key, value) if key == 'id': web.header('Content-Type', 'application/json') d = db.details(value) if d: if isinstance(d['created'], datetime.datetime): d['created'] = d['created'].isoformat() d['last_modified'] = d['last_modified'].isoformat() return simplejson.dumps(d) else: raise web.notfound("") else: value = _query(category, key, value) if value is None: return web.notfound("") else: return web.found("/%s/id/%s.json" % (category, value))
def _query(category, key, value): if key == 'id': result = db.details(safeint(value)) return result and result[0] or None elif key == 'olid': result = db.query(category, value, limit=1) return result and result[0] or None else: if category == 'b' and key in ['isbn', 'lccn', 'oclc', 'ocaid']: if key == 'isbn': if len(value.replace('-', '')) == 13: key = 'isbn_13' else: key = 'isbn_10' if key == 'oclc': key = 'oclc_numbers' olids = ol_things(key, value) if olids: return _query(category, 'olid', olids) return None
def productdetails(request): url=json.loads(request.body) img=url['obj'] productdetail=details(obj=img) return JsonResponse({'response':productdetail})