def GET(self, key): # If the key is not in the normalized form, redirect to the normalized form. nkey = self.normalize_key(key) if nkey != key: raise web.redirect(nkey) # Does the key requires any processing before passing using it to query solr? key = self.process_key(key) i = web.input(offset=0, limit=12, details='false', has_fulltext='false', sort='editions') filters = {} if i.get('has_fulltext') == 'true': filters['has_fulltext'] = 'true' if i.get('published_in'): if '-' in i.published_in: begin, end = i.published_in.split('-', 1) if safeint(begin, None) is not None and safeint(end, None) is not None: filters['publish_year'] = (begin, end) # range else: y = safeint(i.published_in, None) if y is not None: filters['publish_year'] = i.published_in i.limit = safeint(i.limit, 12) i.offset = safeint(i.offset, 0) subject = get_subject(key, offset=i.offset, limit=i.limit, sort=i.sort, details=i.details.lower() == 'true', **filters) if i.has_fulltext: subject['ebook_count'] = subject['work_count'] return json.dumps(subject)
def GET(self, key): if key.lower() != key: raise web.redirect(key.lower()) i = web.input(offset=0, limit=12, details="false", has_fulltext="false") filters = {} if i.get("has_fulltext") == "true": filters["has_fulltext"] = "true" if i.get("published_in"): if "-" in i.published_in: begin, end = i.published_in.split("-", 1) if safeint(begin, None) is not None and safeint(end, None) is not None: filters["publish_year"] = [begin, end] else: y = safeint(i.published_in, None) if y is not None: filters["publish_year"] = i.published_in i.limit = safeint(i.limit, 12) i.offset = safeint(i.offset, 0) subject = get_subject(key, offset=i.offset, limit=i.limit, details=i.details.lower() == "true", **filters) return json.dumps(subject)
def GET(self, key): if key.lower() != key: raise web.redirect(key.lower()) i = web.input(offset=0, limit=12, details='false', has_fulltext='false', sort='editions') filters = {} if i.get('has_fulltext') == 'true': filters['has_fulltext'] = 'true' if i.get('published_in'): if '-' in i.published_in: begin, end = i.published_in.split('-', 1) if safeint(begin, None) is not None and safeint(end, None) is not None: filters['publish_year'] = [begin, end] else: y = safeint(i.published_in, None) if y is not None: filters['publish_year'] = i.published_in i.limit = safeint(i.limit, 12) i.offset = safeint(i.offset, 0) subject = get_subject(key, offset=i.offset, limit=i.limit, sort=i.sort, details=i.details.lower() == 'true', **filters) return json.dumps(subject)
def GET(self, path): i = web.input(b=None, a=None) # default value of b is latest revision and default value of a is b-1 def get(path, revision): if revision == 0: page = web.ctx.site.new(path, {'revision': 0, 'type': {'key': '/type/object'}, 'key': path}) else: page = web.ctx.site.get(path, revision) return page def is_int(n): return n is None or safeint(n, None) is not None # if either or i.a or i.b is bad, then redirect to latest diff if not is_int(i.b) or not is_int(i.a): return web.redirect(web.changequery(b=None, a=None)) b = get(path, safeint(i.b, None)) # if the page is not there go to view page if b is None: raise web.seeother(web.changequery(query={})) a = get(path, safeint(i.a, b.revision-1)) return render.diff(a, b)
def GET(self, key): # If the key is not in the normalized form, redirect to the normalized form. nkey = self.normalize_key(key) if nkey != key: raise web.redirect(nkey) # Does the key requires any processing before passing using it to query solr? key = self.process_key(key) i = web.input(offset=0, limit=12, details="false", has_fulltext="false", sort="editions") filters = {} if i.get("has_fulltext") == "true": filters["has_fulltext"] = "true" if i.get("published_in"): if "-" in i.published_in: begin, end = i.published_in.split("-", 1) if safeint(begin, None) is not None and safeint(end, None) is not None: filters["publish_year"] = (begin, end) # range else: y = safeint(i.published_in, None) if y is not None: filters["publish_year"] = i.published_in i.limit = safeint(i.limit, 12) i.offset = safeint(i.offset, 0) subject = get_subject( key, offset=i.offset, limit=i.limit, sort=i.sort, details=i.details.lower() == "true", **filters ) return json.dumps(subject)
def GET(self): i = web.input(q='', offset=0, limit=100) offset = safeint(i.offset, 0) limit = safeint(i.limit, 100) limit = min(1000, limit) # limit limit to 1000. response = self.get_results(i.q, offset=offset, limit=limit)['response'] web.header('Content-Type', 'application/json') return delegate.RawText(json.dumps(response))
def GET(self): i = web.input(q="", limit=5) i.limit = safeint(i.limit, 5) solr = get_authors_solr() name = solr.escape(i.q) + "*" q = 'name:(%s) OR alternate_names:(%s)' % (name, name) params = { 'q_op': 'AND', 'sort': 'work_count desc' } if config.get('single_core_solr'): params['fq'] = 'type:author' data = solr.select(q, **params) docs = data['docs'] for d in docs: if not config.get('single_core_solr'): d.key = "/authors/" + d.key if 'top_work' in d: d['works'] = [d.pop('top_work')] else: d['works'] = [] d['subjects'] = d.pop('top_subjects', []) return to_json(docs)
def POST(self, key): i = web.input(v=None, _method="GET") v = i.v and safeint(i.v, None) edition = web.ctx.site.get(key, v) if edition is None: raise web.notfound() if edition.works: work = edition.works[0] else: work = None add = (edition.revision == 1 and work and work.revision == 1 and work.edition_count == 1) try: helper = SaveBookHelper(work, edition) helper.save(web.input()) if add: add_flash_message("info", utils.get_message("flash_book_added")) else: add_flash_message("info", utils.get_message("flash_book_updated")) raise web.seeother(edition.url()) except (ClientException, ValidationException), e: raise add_flash_message('error', str(e)) return self.GET(key)
def POST(self, key): i = web.input(v=None, _method="GET") if spamcheck.is_spam(): return render_template("message.html", "Oops", 'Something went wrong. Please try again later.') recap = get_recaptcha() if recap and not recap.validate(): return render_template("message.html", 'Recaptcha solution was incorrect', 'Please <a href="javascript:history.back()">go back</a> and try again.' ) v = i.v and safeint(i.v, None) work = web.ctx.site.get(key, v) if work is None: raise web.notfound() try: helper = SaveBookHelper(work, None) helper.save(web.input()) add_flash_message("info", utils.get_message("flash_work_updated")) raise web.seeother(work.url()) except (ClientException, ValidationException) as e: add_flash_message('error', str(e)) return self.GET(key)
def GET(self): i = web.input(q="", limit=5) i.limit = safeint(i.limit, 5) solr = get_solr() q = solr.escape(i.q).strip() if is_work_olid(q.upper()): # ensure uppercase; key is case sensitive in solr solr_q = 'key:"/works/%s"' % q.upper() else: solr_q = 'title:"%s"^2 OR title:(%s*)' % (q, q) params = { 'q_op': 'AND', 'sort': 'edition_count desc', 'rows': i.limit, 'fq': 'type:work', # limit the fields returned for better performance 'fl': 'key,title,subtitle,cover_i,first_publish_year,author_name,edition_count' } data = solr.select(solr_q, **params) # exclude fake works that actually have an edition key docs = [d for d in data['docs'] if d['key'][-1] == 'W'] for d in docs: # Required by the frontend d['name'] = d['title'] d['full_title'] = d['title'] if 'subtitle' in d: d['full_title'] += ": " + d['subtitle'] return to_json(docs)
def GET(self): i = web.input(q="", limit=5) i.limit = safeint(i.limit, 5) solr = get_solr() q = solr.escape(i.q).strip() solr_q = '' if is_author_olid(q.upper()): # ensure uppercase; key is case sensitive in solr solr_q = 'key:"/authors/%s"' % q.upper() else: prefix_q = q + "*" solr_q = 'name:(%s) OR alternate_names:(%s)' % (prefix_q, prefix_q) params = { 'q_op': 'AND', 'sort': 'work_count desc', 'rows': i.limit, 'fq': 'type:author' } data = solr.select(solr_q, **params) docs = data['docs'] for d in docs: if 'top_work' in d: d['works'] = [d.pop('top_work')] else: d['works'] = [] d['subjects'] = d.pop('top_subjects', []) return to_json(docs)
def POST(self, key): i = web.input(v=None, _method="GET") recap_plugin_active = 'recaptcha' in config.get('plugins') if recap_plugin_active: public_key = config.plugin_recaptcha.public_key private_key = config.plugin_recaptcha.private_key recap = recaptcha.Recaptcha(public_key, private_key) if not recap.validate(): return 'Recaptcha solution was incorrect. Please <a href="javascript:history.back()">go back</a> and try again.' v = i.v and safeint(i.v, None) work = web.ctx.site.get(key, v) if work is None: raise web.notfound() try: helper = SaveBookHelper(work, None) helper.save(web.input()) add_flash_message("info", utils.get_message("flash_work_updated")) raise web.seeother(work.url()) except (ClientException, ValidationException), e: add_flash_message('error', str(e)) return self.GET(key)
def handle_encoding(self, query, encoding): i = web.input(bot="", limit=100, offset=0, text="false") # The bot stuff is handled in the template for the regular path. # We need to handle it here for api. if i.bot.lower() == "true": query['bot'] = True elif i.bot.lower() == "false": query['bot'] = False # and limit and offset business too limit = safeint(i.limit, 100) offset = safeint(i.offset, 0) def constrain(value, low, high): if value < low: return low elif value > high: return high else: return value # constrain limit and offset for performance reasons limit = constrain(limit, 0, 1000) offset = constrain(offset, 0, 10000) query['limit'] = limit query['offset'] = offset result = [c.dict() for c in web.ctx.site.recentchanges(query)] if encoding == "json": response = simplejson.dumps(result) content_type = "application/json" elif encoding == "yml": response = self.yaml_dump(result) content_type = "text/x-yaml" else: response = "" content_type = "text/plain" if i.text.lower() == "true": web.header('Content-Type', 'text/plain') else: web.header('Content-Type', content_type) return delegate.RawText(response)
def POST(self, key): i = web.input("v", _comment=None) v = i.v and safeint(i.v, None) if v is None: raise web.seeother(web.changequery({})) user = accounts.get_current_user() is_admin = user and user.key in [m.key for m in web.ctx.site.get('/usergroup/admin').members] if not (is_admin and web.ctx.site.can_write(key)): return render.permission_denied(web.ctx.fullpath, "Permission denied to edit " + key + ".") thing = web.ctx.site.get(key, i.v) if not thing: raise web.notfound() def revert(thing): if thing.type.key == "/type/delete" and thing.revision > 1: prev = web.ctx.site.get(thing.key, thing.revision-1) if prev.type.key in ["/type/delete", "/type/redirect"]: return revert(prev) else: prev._save("revert to revision %d" % prev.revision) return prev elif thing.type.key == "/type/redirect": redirect = web.ctx.site.get(thing.location) if redirect and redirect.type.key not in ["/type/delete", "/type/redirect"]: return redirect else: # bad redirect. Try the previous revision prev = web.ctx.site.get(thing.key, thing.revision-1) return revert(prev) else: return thing def process(value): if isinstance(value, list): return [process(v) for v in value] elif isinstance(value, client.Thing): if value.key: if value.type.key in ['/type/delete', '/type/revert']: return revert(value) else: return value else: for k in value.keys(): value[k] = process(value[k]) return value else: return value for k in thing.keys(): thing[k] = process(thing[k]) comment = i._comment or "reverted to revision %d" % v thing._save(comment) raise web.seeother(key)
def GET(self, key): i = web.input(v=None, _method="GET") v = i.v and safeint(i.v, None) work = web.ctx.site.get(key, v) if work is None: raise web.notfound() return render_template('books/edit', work)
def GET(self): i = web.input(q="", limit=5) i.limit = safeint(i.limit, 5) languages = [ lang for lang in utils.get_languages() if lang.name.lower().startswith(i.q.lower()) ] return to_json(languages[:i.limit])
def POST(self, key): i = web.input("v", _comment=None) v = i.v and safeint(i.v, None) if v is None: raise web.seeother(web.changequery({})) user = web.ctx.site.get_user() if not user and (user.is_admin() or user.is_librarian() and web.ctx.site.can_write(key)): return render.permission_denied(web.ctx.fullpath, "Permission denied to edit " + key + ".") thing = web.ctx.site.get(key, i.v) if not thing: raise web.notfound() def revert(thing): if thing.type.key == "/type/delete" and thing.revision > 1: prev = web.ctx.site.get(thing.key, thing.revision-1) if prev.type.key in ["/type/delete", "/type/redirect"]: return revert(prev) else: prev._save("revert to revision %d" % prev.revision) return prev elif thing.type.key == "/type/redirect": redirect = web.ctx.site.get(thing.location) if redirect and redirect.type.key not in ["/type/delete", "/type/redirect"]: return redirect else: # bad redirect. Try the previous revision prev = web.ctx.site.get(thing.key, thing.revision-1) return revert(prev) else: return thing def process(value): if isinstance(value, list): return [process(v) for v in value] elif isinstance(value, client.Thing): if value.key: if value.type.key in ['/type/delete', '/type/revert']: return revert(value) else: return value else: for k in value: value[k] = process(value[k]) return value else: return value for k in thing: thing[k] = process(thing[k]) comment = i._comment or "reverted to revision %d" % v thing._save(comment) raise web.seeother(key)
def GET(self, path): page = web.ctx.site.get(path) if not page: raise web.seeother(path) i = web.input(page=0) offset = 20 * safeint(i.page) limit = 20 history = db.get_recent_changes(key=path, limit=limit, offset=offset) return render.history(page, history)
def POST(self, key): i = web.input(v=None, _method="GET") if spamcheck.is_spam(): return render_template( "message.html", "Oops", 'Something went wrong. Please try again later.') recap_plugin_active = is_plugin_enabled('recaptcha') #check to see if account is more than two years old old_user = False user = web.ctx.site.get_user() account = user and user.get_account() if account: create_dt = account.creation_time() now_dt = datetime.datetime.utcnow() delta = now_dt - create_dt if delta.days > 365 * 2: old_user = True if recap_plugin_active and not old_user: public_key = config.plugin_recaptcha.public_key private_key = config.plugin_recaptcha.private_key recap = recaptcha.Recaptcha(public_key, private_key) if not recap.validate(): return 'Recaptcha solution was incorrect. Please <a href="javascript:history.back()">go back</a> and try again.' v = i.v and safeint(i.v, None) edition = web.ctx.site.get(key, v) if edition is None: raise web.notfound() if edition.works: work = edition.works[0] else: work = None add = (edition.revision == 1 and work and work.revision == 1 and work.edition_count == 1) try: helper = SaveBookHelper(work, edition) helper.save(web.input()) if add: add_flash_message("info", utils.get_message("flash_book_added")) else: add_flash_message("info", utils.get_message("flash_book_updated")) raise web.seeother(edition.url()) except (ClientException, ValidationException), e: add_flash_message('error', str(e)) return self.GET(key)
def GET(self, key): # If the key is not in the normalized form, redirect to the normalized form. nkey = self.normalize_key(key) if nkey != key: raise web.redirect(nkey) # Does the key requires any processing before passing using it to query solr? key = self.process_key(key) i = web.input(offset=0, limit=12, details='false', has_fulltext='true', sort='editions', available='false') filters = {} if i.get('has_fulltext') == 'true': filters['has_fulltext'] = 'true' if i.get('published_in'): if '-' in i.published_in: begin, end = i.published_in.split('-', 1) if safeint(begin, None) is not None and safeint( end, None) is not None: filters['publish_year'] = (begin, end) # range else: y = safeint(i.published_in, None) if y is not None: filters['publish_year'] = i.published_in i.limit = safeint(i.limit, 12) i.offset = safeint(i.offset, 0) subject_results = get_subject(key, offset=i.offset, limit=i.limit, sort=i.sort, details=i.details.lower() == 'true', **filters) if i.has_fulltext: subject_results['ebook_count'] = subject_results['work_count'] return json.dumps(subject_results)
def GET(self): i = web.input(status="WAITING_FOR_BOOK", p=None) options = ["NOT_SCANNED", "WAITING_FOR_BOOK", "BOOK_NOT_SCANNED", "SCAN_IN_PROGRESS", "SCAN_COMPLETE"] if i.status not in options: raise web.seeother(web.changequery({})) offset = safeint(i.p, 0) * 50 records = get_scan_queue(i.status, limit=50, offset=offset) return render.scan_queue(records)
def GET(self): i = web.input() if 'query' in i: query = json.loads(i.query) else: query = i sorts = dict( editions='edition_count desc', old='first_publish_year asc', new='first_publish_year desc', scans='ia_count desc') sort_name = query.get('sort', None) sort_value = sort_name and sorts[sort_name] or None limit = safeint(query.pop("limit", "100"), default=100) if "offset" in query: offset = safeint(query.pop("offset", 0), default=0) page = None else: offset = None page = safeint(query.pop("page", "1"), default=1) query['wt'] = 'json' try: (reply, solr_select, q_list) = run_solr_query(query, rows=limit, page=page, sort=sort_value, offset=offset, fields="*") response = json.loads(reply)['response'] or '' except (ValueError, IOError) as e: logger.error("Error in processing search API.") response = dict(start=0, numFound=0, docs=[], error=str(e)) # backward compatibility response['num_found'] = response['numFound'] web.header('Content-Type', 'application/json') return delegate.RawText(json.dumps(response, indent=True))
def GET(self): i = web.input(author_key=[], subject_facet=[], person_facet=[], place_facet=[], time_facet=[], first_publish_year=[], publisher_facet=[], language=[], public_scan_b=[]) if 'query' in i: query = json.loads(i.query) else: query = i sort = query.get('sort', None) limit = safeint(query.pop("limit", "100"), default=100) if "offset" in query: offset = safeint(query.pop("offset", 0), default=0) page = None else: offset = None page = safeint(query.pop("page", "1"), default=1) fields = query.pop('fields', '*').split(',') facet = query.pop('_facet', 'true').lower() in ['true'] spellcheck_count = safeint(query.pop("_spellcheck_count", default_spellcheck_count), default=default_spellcheck_count) response = work_search(query, sort=sort, page=page, offset=offset, limit=limit, fields=fields, facet=facet, spellcheck_count=spellcheck_count) web.header('Content-Type', 'application/json') return delegate.RawText(json.dumps(response, indent=4))
def GET(self, key): i = web.input(v=None) v = i.v and safeint(i.v, None) edition = web.ctx.site.get(key, v) if edition is None: raise web.notfound() work = edition.works and edition.works[0] # HACK: create dummy work when work is not available to make edit form work work = work or web.ctx.site.new('', {'key': '', 'type': {'key': '/type/work'}, 'title': edition.title}) return render_template('books/edit', work, edition)
def POST(self, key): i = web.input(v=None, _method="GET") if spamcheck.is_spam(): return render_template("message.html", "Oops", 'Something went wrong. Please try again later.') recap_plugin_active = is_plugin_enabled('recaptcha') #check to see if account is more than two years old old_user = False user = web.ctx.site.get_user() account = user and user.get_account() if account: create_dt = account.creation_time() now_dt = datetime.datetime.utcnow() delta = now_dt - create_dt if delta.days > 365*2: old_user = True if recap_plugin_active and not old_user: public_key = config.plugin_recaptcha.public_key private_key = config.plugin_recaptcha.private_key recap = recaptcha.Recaptcha(public_key, private_key) if not recap.validate(): return 'Recaptcha solution was incorrect. Please <a href="javascript:history.back()">go back</a> and try again.' v = i.v and safeint(i.v, None) edition = web.ctx.site.get(key, v) if edition is None: raise web.notfound() if edition.works: work = edition.works[0] else: work = None add = (edition.revision == 1 and work and work.revision == 1 and work.edition_count == 1) try: helper = SaveBookHelper(work, edition) helper.save(web.input()) if add: add_flash_message("info", utils.get_message("flash_book_added")) else: add_flash_message("info", utils.get_message("flash_book_updated")) raise web.seeother(edition.url()) except (ClientException, ValidationException), e: add_flash_message('error', str(e)) return self.GET(key)
def GET(self, key): i = web.input(v=None) v = i.v and safeint(i.v, None) edition = web.ctx.site.get(key, v) if edition is None: raise web.notfound() work = edition.works and edition.works[0] # HACK: create dummy work when work is not available to make edit form work work = work or web.ctx.site.new("", {"key": "", "type": {"key": "/type/work"}, "title": edition.title}) return render_template("books/edit", work, edition)
def GET(self, key): i = web.input(v=None) v = i.v and safeint(i.v, None) if not web.ctx.site.can_write(key): return render_template("permission_denied", web.ctx.fullpath, "Permission denied to edit " + key + ".") edition = web.ctx.site.get(key, v) if edition is None: raise web.notfound() work = edition.works and edition.works[0] if not work: # HACK: create dummy work when work is not available to make edit form work work = web.ctx.site.new( '', { 'key': '', 'type': { 'key': '/type/work' }, 'title': edition.title, 'authors': [{ 'type': '/type/author_role', 'author': { 'key': a['key'] } } for a in edition.get('authors', [])] }) recap_plugin_active = is_plugin_enabled('recaptcha') #check to see if account is more than two years old old_user = False user = web.ctx.site.get_user() account = user and user.get_account() if account: create_dt = account.creation_time() now_dt = datetime.datetime.utcnow() delta = now_dt - create_dt if delta.days > 365 * 2: old_user = True if recap_plugin_active and not old_user: public_key = config.plugin_recaptcha.public_key private_key = config.plugin_recaptcha.private_key recap = recaptcha.Recaptcha(public_key, private_key) else: recap = None return render_template('books/edit', work, edition, recaptcha=recap)
def GET(self): i = web.input() if 'query' in i: query = json.loads(i.query) else: query = i sorts = dict( editions='edition_count desc', old='first_publish_year asc', new='first_publish_year desc', scans='ia_count desc') sort_name = query.get('sort', None) sort_value = sort_name and sorts[sort_name] or None limit = safeint(query.pop("limit", "100"), default=100) if "offset" in query: offset = safeint(query.pop("offset", 0), default=0) page = None else: offset = None page = safeint(query.pop("page", "1"), default=1) query['wt'] = 'json' try: (reply, solr_select, q_list) = run_solr_query(query, rows=limit, page=page, sort=sort_value, offset=offset, fields="*") response = json.loads(reply)['response'] or '' except (ValueError, IOError) as e: logger.error("Error in processing search API.") response = dict(start=0, numFound=0, docs=[], error=str(e)) # backward compatibility response['num_found'] = response['numFound'] return delegate.RawText(json.dumps(response, indent=True))
def GET(self, key): i = web.input(v=None, _method="GET") v = i.v and safeint(i.v, None) if not web.ctx.site.can_write(key): return render_template("permission_denied", web.ctx.fullpath, "Permission denied to edit " + key + ".") work = web.ctx.site.get(key, v) if work is None: raise web.notfound() return render_template('books/edit', work)
def GET(self, key): i = web.input(v=None, _method="GET") v = i.v and safeint(i.v, None) if not web.ctx.site.can_write(key): return render_template("permission_denied", web.ctx.fullpath, "Permission denied to edit " + key + ".") work = web.ctx.site.get(key, v) if work is None: raise web.notfound() return render_template('books/edit', work, recaptcha=get_recaptcha())
def get_data(self, key): i = web.input(v=None) v = safeint(i.v, None) data = dict(key=key, revision=v) try: d = api.request('/get', data=data) except client.ClientException, e: if e.json: msg = self.dump(simplejson.loads(e.json)) else: msg = e.message raise web.HTTPError(e.status, data=msg)
def row(r): if isinstance(r, basestring): level = 0 label = "" title = r pagenum = "" else: level = safeint(r.get('level', '0'), 0) label = r.get('label', '') title = r.get('title', '') pagenum = r.get('pagenum', '') return web.storage(level=level, label=label, title=title, pagenum=pagenum)
def GET(self): i = web.input() if 'query' in i: query = json.loads(i.query) else: query = i sort = query.get('sort', None) limit = safeint(query.pop("limit", "100"), default=100) if "offset" in query: offset = safeint(query.pop("offset", 0), default=0) page = None else: offset = None page = safeint(query.pop("page", "1"), default=1) response = work_search(query, sort=sort, page=page, offset=offset, limit=limit) web.header('Content-Type', 'application/json') return delegate.RawText(json.dumps(response, indent=True))
def POST(self, key): i = web.input("v", _comment=None) v = i.v and safeint(i.v, None) if v is None: raise web.seeother(web.changequery({})) thing = web.ctx.site.get(key, i.v) if not thing: raise web.notfound() def revert(thing): if thing.type.key == "/type/delete" and thing.revision > 1: prev = web.ctx.site.get(thing.key, thing.revision-1) if prev.type.key in ["/type/delete", "/type/redirect"]: return revert(prev) else: prev._save("revert to revision %d" % prev.revision) return prev elif thing.type.key == "/type/redirect": redirect = web.ctx.site.get(thing.location) if redirect and redirect.type.key not in ["/type/delete", "/type/redirect"]: return redirect else: # bad redirect. Try the previous revision prev = web.ctx.site.get(thing.key, thing.revision-1) return revert(prev) else: return thing def process(value): if isinstance(value, list): return [process(v) for v in value] elif isinstance(value, client.Thing): if value.key: if value.type.key in ['/type/delete', '/type/revert']: return revert(value) else: return value else: for k in value.keys(): value[k] = process(value[k]) return value else: return value for k in thing.keys(): thing[k] = process(thing[k]) comment = i._comment or "reverted to revision %d" % v thing._save(comment) raise web.seeother(key)
def GET(self): i = web.input(page=1, sort="diff", prefix="") page = safeint(i.page, 1) end = page * 50 begin = end - 50 if i.sort not in ["count", "mark", "diff"]: i.sort = "diff" counts = [c for c in memory.get_counts() if c.type.startswith(i.prefix)] counts.sort(key=lambda c: c[i.sort], reverse=True) return render_template("admin/memory/index", counts[begin:end], page, sort=i.sort)
def GET(self, key): web.header('Content-Type', 'application/json') # If the key is not in the normalized form, redirect to the normalized form. nkey = self.normalize_key(key) if nkey != key: raise web.redirect(nkey) # Does the key requires any processing before passing using it to query solr? key = self.process_key(key) i = web.input(offset=0, limit=DEFAULT_RESULTS, details='false', has_fulltext='false', sort='editions', available='false') i.limit = safeint(i.limit, DEFAULT_RESULTS) i.offset = safeint(i.offset, 0) if i.limit > MAX_RESULTS: msg = json.dumps({'error': 'Specified limit exceeds maximum of %s.' % MAX_RESULTS}) raise web.HTTPError('400 Bad Request', data=msg) filters = {} if i.get('has_fulltext') == 'true': filters['has_fulltext'] = 'true' if i.get('published_in'): if '-' in i.published_in: begin, end = i.published_in.split('-', 1) if safeint(begin, None) is not None and safeint(end, None) is not None: filters['publish_year'] = (begin, end) # range else: y = safeint(i.published_in, None) if y is not None: filters['publish_year'] = i.published_in subject_results = get_subject(key, offset=i.offset, limit=i.limit, sort=i.sort, details=i.details.lower() == 'true', **filters) if i.has_fulltext: subject_results['ebook_count'] = subject_results['work_count'] return json.dumps(subject_results)
def row(r): if isinstance(r, basestring): level = 0 label = "" title = r pagenum = "" else: level = safeint(r.get("level", "0"), 0) label = r.get("label", "") title = r.get("title", "") pagenum = r.get("pagenum", "") r = web.storage(level=level, label=label, title=title, pagenum=pagenum) return r
def row(r): if isinstance(r, basestring): # there might be some legacy docs in the system with table-of-contents # represented as list of strings. level = 0 label = "" title = r page = "" else: level = safeint(r.get('level', '0'), 0) label = r.get('label', '') title = r.get('title', '') page = r.get('pagenum', '') return "*" * level + " " + " | ".join([label, title, page])
def row(r): if isinstance(r, six.string_types): level = 0 label = "" title = r pagenum = "" else: level = safeint(r.get('level', '0'), 0) label = r.get('label', '') title = r.get('title', '') pagenum = r.get('pagenum', '') r = web.storage(level=level, label=label, title=title, pagenum=pagenum) return r
def GET(self): i = web.input() if 'query' in i: query = json.loads(i.query) else: query = i sorts = dict( editions='edition_count desc', old='first_publish_year asc', new='first_publish_year desc', scans='ia_count desc') sort_name = query.get('sort', None) sort_value = sort_name and sorts[sort_name] or None limit = safeint(query.pop("limit", "100"), default=100) if "offset" in query: offset = safeint(query.pop("offset", 0), default=0) page = None else: offset = None page = safeint(query.pop("page", "1"), default=1) query['wt'] = 'json' (reply, solr_select, q_list) = run_solr_query(query, rows=limit, page=page, sort=sort_value, offset=offset, fields="*") response = json.loads(reply)['response'] # backward compatibility response['num_found'] = response['numFound'] return delegate.RawText(json.dumps(response, indent=True))
def GET(self): i = web.input() if 'query' in i: query = json.loads(i.query) else: query = i sorts = dict( editions='edition_count desc', old='first_publish_year asc', new='first_publish_year desc', scans='ia_count desc') sort_name = query.get('sort', None) sort_value = sort_name and sorts[sort_name] or None limit = safeint(query.pop("limit", "100"), default=100) if "offset" in query: offset = safeint(query.pop("offset", 0), default=0) page = None else: offset = None page = safeint(query.pop("page", "1"), default=1) query['wt'] = 'json' try: (reply, solr_select, q_list) = run_solr_query(query, rows=limit, page=page, sort=sort_value, offset=offset, fields="*") response = json.loads(reply)['response'] or '' except (ValueError, IOError), e: logger.error("Error in processing search API.") response = dict(start=0, numFound=0, docs=[], error=str(e))
def GET(self, key): if key.lower() != key: raise web.redirect(key.lower()) i = web.input(offset=0, limit=12, details='false', has_fulltext='false', sort='editions') filters = {} if i.get('has_fulltext') == 'true': filters['has_fulltext'] = 'true' if i.get('published_in'): if '-' in i.published_in: begin, end = i.published_in.split('-', 1) if safeint(begin, None) is not None and safeint( end, None) is not None: filters['publish_year'] = [begin, end] else: y = safeint(i.published_in, None) if y is not None: filters['publish_year'] = i.published_in i.limit = safeint(i.limit, 12) i.offset = safeint(i.offset, 0) subject = get_subject(key, offset=i.offset, limit=i.limit, sort=i.sort, details=i.details.lower() == 'true', **filters) return json.dumps(subject)
def GET(self, key): i = web.input(v=None) v = i.v and safeint(i.v, None) if not web.ctx.site.can_write(key): return render_template("permission_denied", web.ctx.fullpath, "Permission denied to edit " + key + ".") edition = web.ctx.site.get(key, v) if edition is None: raise web.notfound() work = (edition.works and edition.works[0] or edition.make_work_from_orphaned_edition()) return render_template('books/edit', work, edition, recaptcha=get_recaptcha())
def GET(self, key): # If the key is not in the normalized form, redirect to the normalized form. nkey = self.normalize_key(key) if nkey != key: raise web.redirect(nkey) # Does the key requires any processing before passing using it to query solr? key = self.process_key(key) i = web.input(offset=0, limit=12, has_fulltext="false") filters = {} if i.get("has_fulltext") == "true": filters["has_fulltext"] = "true" if i.get("published_in"): if "-" in i.published_in: begin, end = i.published_in.split("-", 1) if safeint(begin, None) is not None and safeint( end, None) is not None: filters["publish_year"] = (begin, end) else: y = safeint(i.published_in, None) if y is not None: filters["publish_year"] = i.published_in i.limit = safeint(i.limit, 12) i.offset = safeint(i.offset, 0) results = get_subject(key, offset=i.offset, limit=i.limit, details=False, **filters) return json.dumps(results)
def POST(self, key): i = web.input(v=None, _method="GET") v = i.v and safeint(i.v, None) work = web.ctx.site.get(key, v) if work is None: raise web.notfound() try: helper = SaveBookHelper(work, None) helper.save(web.input()) add_flash_message("info", utils.get_message("flash_work_updated")) raise web.seeother(work.url()) except (ClientException, ValidationException), e: add_flash_message('error', str(e)) return self.GET(key)
def POST(self, key): i = web.input(v=None, _method="GET") if spamcheck.is_spam(): return render_template( "message.html", "Oops", 'Something went wrong. Please try again later.' ) recap = get_recaptcha() if recap and not recap.validate(): return render_template( "message.html", 'Recaptcha solution was incorrect', 'Please <a href="javascript:history.back()">go back</a> and try again.', ) v = i.v and safeint(i.v, None) edition = web.ctx.site.get(key, v) if edition is None: raise web.notfound() if edition.works: work = edition.works[0] else: work = None add = ( edition.revision == 1 and work and work.revision == 1 and work.edition_count == 1 ) try: helper = SaveBookHelper(work, edition) helper.save(web.input()) if add: add_flash_message("info", utils.get_message("flash_book_added")) else: add_flash_message("info", utils.get_message("flash_book_updated")) raise safe_seeother(edition.url()) except ClientException as e: add_flash_message('error', e.args[-1] or e.json) return self.GET(key) except ValidationException as e: add_flash_message('error', str(e)) return self.GET(key)
def GET(self, type): objects = memory.get_objects_by_type(type) i = web.input(page=1, diff="false") page = safeint(i.page, 1) end = page * 50 begin = end - 50 objects = [Object(obj) for obj in memory.get_objects_by_type(type)] if i.diff == "true": marked = memory._mark_ids.get(type, []) objects = [obj for obj in objects if obj.get_id() not in marked] return render_template("admin/memory/type", type, objects, page)
def GET(self, path): i = web.input(v=None) if i.v is not None and safeint(i.v, None) is None: raise web.seeother(web.changequery(v=None)) p = db.get_version(path, i.v) if p is None: return notfound(path) elif p.type.key == '/type/delete': web.ctx.status = '404 Not Found' return render.viewpage(p) elif p.type.key == "/type/redirect": web.redirect(p.location) else: return render.viewpage(p)
def GET(self): i = web.input(q="", limit=5) i.limit = safeint(i.limit, 5) solr = get_solr() q = solr.escape(i.q).strip() query_is_key = is_work_olid(q.upper()) if query_is_key: # ensure uppercase; key is case sensitive in solr solr_q = 'key:"/works/%s"' % q.upper() else: solr_q = f'title:"{q}"^2 OR title:({q}*)' params = { 'q_op': 'AND', 'sort': 'edition_count desc', 'rows': i.limit, 'fq': 'type:work', # limit the fields returned for better performance 'fl': 'key,title,subtitle,cover_i,first_publish_year,author_name,edition_count', } data = solr.select(solr_q, **params) # exclude fake works that actually have an edition key docs = [d for d in data['docs'] if d['key'][-1] == 'W'] if query_is_key and not docs: # Grumble! Work not in solr yet. Create a dummy. key = '/works/%s' % q.upper() work = web.ctx.site.get(key) if work: docs = [work.as_fake_solr_record()] for d in docs: # Required by the frontend d['name'] = d['key'].split('/')[-1] d['full_title'] = d['title'] if 'subtitle' in d: d['full_title'] += ": " + d['subtitle'] return to_json(docs)
def GET(self): i = web.input() if 'query' in i: query = simplejson.loads(i.query) else: query = i limit = query.pop("limit", None) offset = safeint(query.pop("offset", 0)) from openlibrary.utils.solr import Solr import simplejson solr = Solr("http://%s/solr/works" % solr_host) result = solr.select(query, rows=limit, start=offset) web.header('Content-Type', 'application/json') return delegate.RawText(simplejson.dumps(result, indent=True))
def GET(self, path): i = web.input(v=None) if i.v is not None and safeint(i.v, None) is None: raise web.seeother(web.changequery(v=None)) p = db.get_version(path, i.v) if p is None: return notfound(path) elif p.type.key == '/type/delete': web.ctx.status = '404 Not Found' return render.viewpage(p) elif (p.type.key == "/type/redirect" and p.location and not p.location.startswith('http://') and not p.location.startswith('://')): web.redirect(p.location) else: return render.viewpage(p)
def GET(self, path): i = web.input(v=None, t=None) if not web.ctx.site.can_write(path): return render.permission_denied(web.ctx.fullpath, "Permission denied to edit " + path + ".") if i.v is not None and safeint(i.v, None) is None: raise web.seeother(web.changequery(v=None)) p = db.get_version(path, i.v) or db.new_version(path, types.guess_type(path)) if i.t: type = db.get_type(i.t) if type is None: add_flash_message('error', 'Unknown type: ' + i.t) else: p.type = type return render.editpage(p)
def row(r): if isinstance(r, basestring): level = 0 label = "" title = web.safeunicode(r) pagenum = "" elif 'value' in r: level = 0 label = "" title = web.safeunicode(r['value']) pagenum = "" else: level = safeint(r.get('level', '0'), 0) label = r.get('label', '') title = r.get('title', '') pagenum = r.get('pagenum', '') r = web.storage(level=level, label=label, title=title, pagenum=pagenum) return r
def GET(self): i = web.input(q="", limit=5) i.limit = safeint(i.limit, 5) solr = get_authors_solr() name = solr.escape(i.q) + "*" q = 'name:(%s) OR alternate_names:(%s)' % (name, name) data = solr.select(q, q_op="AND", sort="work_count desc") docs = data['docs'] for d in docs: d.key = "/authors/" + d.key if 'top_work' in d: d['works'] = [d.pop('top_work')] else: d['works'] = [] d['subjects'] = d.pop('top_subjects', []) return to_json(docs)