def GET(self): i = web.input(page=1, pagesize=200) total_loans = len(web.ctx.site.store.keys(type="/type/loan", limit=100000)) pdf_loans = len(web.ctx.site.store.keys(type="/type/loan", name="resource_type", value="pdf", limit=100000)) epub_loans = len(web.ctx.site.store.keys(type="/type/loan", name="resource_type", value="epub", limit=100000)) pagesize = h.safeint(i.pagesize, 200) pagecount = 1 + (total_loans-1) / pagesize pageindex = max(h.safeint(i.page, 1), 1) begin = (pageindex-1) * pagesize # pagecount starts from 1 end = min(begin + pagesize, total_loans) loans = web.ctx.site.store.values(type="/type/loan", offset=begin, limit=pagesize) stats = { "total_loans": total_loans, "pdf_loans": pdf_loans, "epub_loans": epub_loans, "bookreader_loans": total_loans - pdf_loans - epub_loans, "begin": begin+1, # We count from 1, not 0. "end": end } # Preload books web.ctx.site.get_many([loan['book'] for loan in loans]) return render_template("admin/loans", loans, None, pagecount=pagecount, pageindex=pageindex, stats=stats)
def GET(self): i = web.input(offset=0, limit=12, details="false", has_fulltext="false") filters = {} if i.get("has_fulltext") == "true": filters["has_fulltext"] = "true" if i.get("published_in"): if "-" in i.published_in: begin, end = i.published_in.split("-", 1) if h.safeint(begin, None) is not None and h.safeint(end, None) is not None: filters["publish_year"] = [begin, end] else: y = h.safeint(i.published_in, None) if y is not None: filters["publish_year"] = i.published_in i.limit = h.safeint(i.limit, 12) i.offset = h.safeint(i.offset, 0) subject = get_lending_library(web.ctx.site, offset=i.offset, limit=i.limit, details=i.details.lower() == "true", inlibrary=inlibrary.get_library() is not None, **filters) return simplejson.dumps(subject)
def GET(self, key): doc = web.ctx.site.get(key) if not doc or doc.type.key != "/type/author": raise web.notfound('') else: i = web.input(limit=50, offset=0) limit = h.safeint(i.limit) or 50 offset = h.safeint(i.offset) or 0 data = self.get_works_data(doc, limit=limit, offset=offset) return delegate.RawText(simplejson.dumps(data), content_type="application/json")
def GET(self, key): doc = web.ctx.site.get(key) if not doc or doc.type.key != "/type/work": raise web.notfound('') else: i = web.input(limit=50, offset=0) limit = h.safeint(i.limit) or 50 offset = h.safeint(i.offset) or 0 data = self.get_editions_data(doc, limit=limit, offset=offset) return delegate.RawText(simplejson.dumps(data), content_type="application/json")
def GET(self, key): list = web.ctx.site.get(key) if not list: raise web.notfound() i = web.input(limit=20, offset=0) limit = h.safeint(i.limit, 20) offset = h.safeint(i.offset, 0) data = list.get_editions(limit=limit, offset=offset, _raw=True) text = formats.dump(data, self.encoding) return delegate.RawText(text, content_type=self.content_type)
def GET(self, path): doc = web.ctx.site.get(path) if not doc: raise web.notfound() i = web.input(offset=0, limit=20) i.offset = h.safeint(i.offset, 0) i.limit = h.safeint(i.limit, 20) i.limit = min(i.limit, 100) i.offset = max(i.offset, 0) lists = doc.get_lists(limit=i.limit, offset=i.offset) return self.render(doc, lists, i)
def GET(self, path): lst = web.ctx.site.get(path) if not lst: raise web.notfound() i = web.input(limit=50, page=1) limit = h.safeint(i.limit, 50) page = h.safeint(i.page, 1) - 1 offset = page * limit editions = lst.get_editions(limit=limit, offset=offset) lst.preload_authors(editions['editions']) lst.load_changesets(editions['editions']) return render_template("type/list/editions.html", lst, editions)
def GET(self): i = web.input(page=1, pagesize=200) total_loans = len( web.ctx.site.store.keys(type="/type/loan", limit=100000)) pdf_loans = len( web.ctx.site.store.keys(type="/type/loan", name="resource_type", value="pdf", limit=100000)) epub_loans = len( web.ctx.site.store.keys(type="/type/loan", name="resource_type", value="epub", limit=100000)) pagesize = h.safeint(i.pagesize, 200) pagecount = 1 + (total_loans - 1) // pagesize pageindex = max(h.safeint(i.page, 1), 1) begin = (pageindex - 1) * pagesize # pagecount starts from 1 end = min(begin + pagesize, total_loans) loans = web.ctx.site.store.values(type="/type/loan", offset=begin, limit=pagesize) stats = { "total_loans": total_loans, "pdf_loans": pdf_loans, "epub_loans": epub_loans, "bookreader_loans": total_loans - pdf_loans - epub_loans, "begin": begin + 1, # We count from 1, not 0. "end": end, } # Preload books web.ctx.site.get_many([loan['book'] for loan in loans]) return render_template( "admin/loans", loans, None, pagecount=pagecount, pageindex=pageindex, stats=stats, )
def GET(self, path): if path.startswith("/subjects/"): doc = subjects.get_subject(path) else: doc = web.ctx.site.get(path) if not doc: raise web.notfound() i = web.input(offset=0, limit=50) i.offset = h.safeint(i.offset, 0) i.limit = h.safeint(i.limit, 50) i.limit = min(i.limit, 100) i.offset = max(i.offset, 0) lists = self.get_lists(doc, limit=i.limit, offset=i.offset) return delegate.RawText(self.dumps(lists))
def GET(self, key): lst = web.ctx.site.get(key) if not lst: raise web.notfound() i = web.input(limit=50, offset=0) limit = h.safeint(i.limit, 50) offset = h.safeint(i.offset, 0) editions = lst.get_editions(limit=limit, offset=offset, _raw=True) data = make_collection( size=editions['count'], entries=[self.process_edition(e) for e in editions['editions']], limit=limit, offset=offset) data['links']['list'] = key text = formats.dump(data, self.encoding) return delegate.RawText(text, content_type=self.content_type)
def GET(self, key): lst = web.ctx.site.get(key) if not lst: raise web.notfound() i = web.input(limit=50, offset=0) limit = h.safeint(i.limit, 50) offset = h.safeint(i.offset, 0) editions = lst.get_editions(limit=limit, offset=offset, _raw=True) data = make_collection( size=editions['count'], entries=[self.process_edition(e) for e in editions['editions']], limit=limit, offset=offset ) data['links']['list'] = key text = formats.dump(data, self.encoding) return delegate.RawText(text, content_type=self.content_type)
def GET(self): i = web.input(offset=0, limit=12, rand=-1, details="false", has_fulltext="false") filters = {} if i.get("has_fulltext") == "true": filters["has_fulltext"] = "true" if i.get("published_in"): if "-" in i.published_in: begin, end = i.published_in.split("-", 1) if h.safeint(begin, None) is not None and h.safeint(end, None) is not None: filters["publish_year"] = [begin, end] else: y = h.safeint(i.published_in, None) if y is not None: filters["publish_year"] = i.published_in i.limit = h.safeint(i.limit, 12) i.offset = h.safeint(i.offset, 0) i.rand = h.safeint(i.rand, -1) if i.rand > 0: sort = 'random_%d desc' % i.rand filters['sort'] = sort subject = get_readable_books(web.ctx.site, offset=i.offset, limit=i.limit, details=i.details.lower() == "true", **filters) return simplejson.dumps(subject)
def GET(self, key): lst = web.ctx.site.get(key) if not lst: raise web.notfound() i = web.input(limit=20) limit = h.safeint(i.limit, 20) data = self.get_subjects(lst, limit=limit) data['links'] = {"self": key + "/subjects", "list": key} text = formats.dump(data, self.encoding) return delegate.RawText(text, content_type=self.content_type)
def ol_get_many_as_dict(keys): keys_with_revisions = [k for k in keys if '@' in k] keys2 = [k for k in keys if '@' not in k] result = dict((doc['key'], doc) for doc in ol_get_many(keys2)) for k in keys_with_revisions: key, revision = k.split('@', 1) revision = h.safeint(revision, None) doc = web.ctx.site.get(key, revision) result[k] = doc and doc.dict() return result
def row(r): if isinstance(r, six.string_types): level = 0 label = "" title = r pagenum = "" else: level = h.safeint(r.get('level', '0'), 0) label = r.get('label', '') title = r.get('title', '') pagenum = r.get('pagenum', '') r = dict(level=level, label=label, title=title, pagenum=pagenum) return r
def row(r): if isinstance(r, basestring): level = 0 label = "" title = r pagenum = "" else: level = h.safeint(r.get('level', '0'), 0) label = r.get('label', '') title = r.get('title', '') pagenum = r.get('pagenum', '') r = dict(level=level, label=label, title=title, pagenum=pagenum) return r
def POST(self, work_id): """Registers new ratings for this work""" user = accounts.get_current_user() i = web.input( edition_id=None, rating=None, redir=False, redir_url=None, page=None, ajax=False, ) key = (i.redir_url if i.redir_url else i.edition_id if i.edition_id else ('/works/OL%sW' % work_id)) edition_id = (int(extract_numeric_id_from_olid(i.edition_id)) if i.edition_id else None) if not user: raise web.seeother('/account/login?redirect=%s' % key) username = user.key.split('/')[2] def response(msg, status="success"): return delegate.RawText(json.dumps({status: msg}), content_type="application/json") if i.rating is None: models.Ratings.remove(username, work_id) r = response('removed rating') else: try: rating = int(i.rating) if rating not in models.Ratings.VALID_STAR_RATINGS: raise ValueError except ValueError: return response('invalid rating', status="error") models.Ratings.add(username=username, work_id=work_id, rating=rating, edition_id=edition_id) r = response('rating added') if i.redir and not i.ajax: p = h.safeint(i.page, 1) query_params = f'?page={p}' if p > 1 else '' if i.page: raise web.seeother(f'{key}{query_params}') raise web.seeother(key) return r
def GET(self, key): lst = web.ctx.site.get(key) if not lst: raise web.notfound() i = web.input(limit=20) limit = h.safeint(i.limit, 20) data = self.get_subjects(lst, limit=limit) data['links'] = { "self": key + "/subjects", "list": key } text = formats.dump(data, self.encoding) return delegate.RawText(text, content_type=self.content_type)
def ol_get_many_as_dict(keys: Iterable[str]) -> dict: """ Ex.: ol_get_many_as_dict(['/books/OL2058361M', '/works/OL54120W']) """ keys_with_revisions = [k for k in keys if '@' in k] keys2 = [k for k in keys if '@' not in k] result = {doc['key']: doc for doc in ol_get_many(keys2)} for k in keys_with_revisions: key, revision = k.split('@', 1) revision = h.safeint(revision, None) doc = web.ctx.site.get(key, revision) result[k] = doc and doc.dict() return result
def GET(self, rest='loans'): i = web.input(page=1) user = accounts.get_current_user() username = user.key.split('/')[-1] query_params = f'?page={i.page}' if h.safeint(i.page) > 1 else '' raise web.seeother(f'/people/{username}/books/{rest}{query_params}')