def do_search(param, sort, page=1, rows=100): (reply, solr_select, q_list) = run_solr_query(param, rows, page, sort) is_bad = False if reply.startswith('<html'): is_bad = True if not is_bad: try: root = XML(reply) except XMLSyntaxError: is_bad = True if is_bad: m = re_pre.search(reply) return web.storage( facet_counts = None, docs = [], is_advanced = bool(param.get('q', 'None')), num_found = None, solr_select = solr_select, q_list = q_list, error = (web.htmlunquote(m.group(1)) if m else reply), ) docs = root.find('result') return web.storage( facet_counts = read_facets(root), docs = docs, is_advanced = bool(param.get('q', 'None')), num_found = (int(docs.attrib['numFound']) if docs is not None else None), solr_select = solr_select, q_list = q_list, error = None, )
def create_account_manager(self): # Hack to use the accounts stuff from Infogami infobase_config.user_root = "/people" store = web.storage(store=self.store) site = web.storage(store=store, save_many=self.save_many) return account.AccountManager(site, config.infobase['secret_key'])
def work_wrapper(w): d = web.storage( key="/works/" + w["key"], title=w["title"], edition_count=w["edition_count"] ) if "cover_id" in w: d.cover_id = w["cover_id"] elif "cover_edition_key" in w: book = web.ctx.site.get("/books/" + w["cover_edition_key"]) cover = book and book.get_cover() d.cover_id = cover and cover.id or None d.cover_edition_key = w['cover_edition_key'] else: d.cover_id = None # special care to handle missing author_key/author_name in the solr record w.setdefault('author_key', []) w.setdefault('author_name', []) d.authors = [web.storage(key='/authors/' + k, name=n) for k, n in zip(w['author_key'], w['author_name'])] d.first_publish_year = (w['first_publish_year'][0] if 'first_publish_year' in w else None) d.ia = w.get('ia', []) d.has_fulltext = w.get('has_fulltext', "false") return d
def get_voter_details_old(voterid): # ignore voterids like "yes" etc. if len(voterid) <= 4: return logger.info("get_voter_details %s", voterid) try: b = web.Browser() b.open(URL) b.select_form(index=0) b['ctl00$ContentPlaceHolder1$ddlDistrict'] = ['21'] b['ctl00$ContentPlaceHolder1$txtEpic'] = voterid b.submit() except Exception: logger.error("failed to request voterid details for %s", voterid, exc_info=True) return web.storage() soup = b.get_soup() table = soup.find("table", {"id": "ctl00_ContentPlaceHolder1_GridView1"}) if not table: return None last_row = table.findAll("tr")[-1] data = [td.getText() for td in last_row.findAll(("td", "tr"))] # skip the first one, which is a button data = data[1:] cols = "ac_num ac_name part_no sl_no first_name last_name rel_firstname rel_lastname sex age".split() d = dict(zip(cols, data)) d['voterid'] = voterid logger.info("voter info %s %s", voterid, d) return web.storage(d)
def get_meta_xml(itemid): """Returns the contents of meta_xml as JSON. """ itemid = itemid.strip() url = 'http://www.archive.org/download/%s/%s_meta.xml' % (itemid, itemid) try: stats.begin("archive.org", url=url) metaxml = urllib2.urlopen(url).read() stats.end() except IOError: stats.end() return web.storage() # archive.org returns html on internal errors. # Checking for valid xml before trying to parse it. if not metaxml.strip().startswith("<?xml"): return web.storage() try: defaults = {"collection": [], "external-identifier": []} return web.storage(xml2dict(metaxml, **defaults)) except Exception, e: print >> web.debug, "Failed to parse metaxml for %s: %s" % (itemid, str(e)) return web.storage()
def work_wrapper(w): d = web.storage(key="/works/" + w["key"], title=w["title"], edition_count=w["edition_count"]) if "cover_id" in w: d.cover_id = w["cover_id"] elif "cover_edition_key" in w: book = web.ctx.site.get("/books/" + w["cover_edition_key"]) cover = book and book.get_cover() d.cover_id = cover and cover.id or None d.cover_edition_key = w["cover_edition_key"] else: d.cover_id = None d.subject = w.get("subject", []) ia_collection = w["ia_collection_s"].split(";") if "ia_collection_s" in w else [] d.ia_collection = ia_collection d.lendinglibrary = "lendinglibrary" in ia_collection d.printdisabled = "printdisabled" in ia_collection d.lending_edition = w.get("lending_edition_s", "") d.overdrive = w["overdrive_s"].split(";")[0] if "overdrive_s" in w else "" # special care to handle missing author_key/author_name in the solr record w.setdefault("author_key", []) w.setdefault("author_name", []) d.authors = [web.storage(key="/authors/" + k, name=n) for k, n in zip(w["author_key"], w["author_name"])] d.first_publish_year = w["first_publish_year"][0] if "first_publish_year" in w else None d.ia = w.get("ia", []) d.public_scan = w.get("public_scan_b", bool(d.ia)) d.has_fulltext = w.get("has_fulltext", "false") return d
def GET(self, page_path): try: page = get_page_by_path(page_path) if not page.is_published and not auth.get_user(): raise flash.redirect(_(page_access_forbidden_text), "/login") load_page_data(page) if auth.has_role("admin"): json_data = web.storage( page=page_to_json(page), pages=pages_to_json(get_pages_in_tree_order()), ) else: json_data = web.storage() if "edit" in web.input() and auth.has_role("admin"): json_data.update( page_block=block_to_json( get_page_block_by_page_id(page.id)), template_blocks=template_blocks_to_json() ) else: load_page_blocks(page.id) return render.pages.page(json_data) except IndexError: raise web.notfound()
def work_object(w): # called by works_by_author ia = w.get('ia', []) if config.get("single_core_solr"): key = w['key'] else: key = '/works/' + w['key'] obj = dict( authors = [web.storage(key='/authors/' + k, name=n) for k, n in zip(w['author_key'], w['author_name'])], edition_count = w['edition_count'], key = key, title = w['title'], public_scan = w.get('public_scan_b', bool(ia)), lending_edition = w.get('lending_edition_s', ''), lending_identifier = w.get('lending_identifier_s', ''), overdrive = (w['overdrive_s'].split(';') if 'overdrive_s' in w else []), collections = set(w['ia_collection_s'].split(';') if 'ia_collection_s' in w else []), url = key + '/' + urlsafe(w['title']), cover_edition_key = w.get('cover_edition_key'), first_publish_year = (w['first_publish_year'] if 'first_publish_year' in w else None), ia = w.get('ia', []), cover_i = w.get('cover_i') ) if obj['lending_identifier']: doc = web.ctx.site.store.get("ebooks/" + obj['lending_identifier']) or {} obj['checked_out'] = doc.get("borrowed") == "true" else: obj['checked_out'] = False for f in 'has_fulltext', 'subtitle': if w.get(f): obj[f] = w[f] return web.storage(obj)
def _old_get_meta_xml(itemid): """Returns the contents of meta_xml as JSON. """ itemid = web.safestr(itemid.strip()) url = 'http://www.archive.org/download/%s/%s_meta.xml' % (itemid, itemid) try: stats.begin('archive.org', url=url) metaxml = urllib2.urlopen(url).read() stats.end() except IOError: logger.error("Failed to download _meta.xml for %s", itemid, exc_info=True) stats.end() return web.storage() # archive.org returns html on internal errors. # Checking for valid xml before trying to parse it. if not metaxml.strip().startswith("<?xml"): return web.storage() try: defaults = {"collection": [], "external-identifier": []} return web.storage(xml2dict(metaxml, **defaults)) except Exception as e: logger.error("Failed to parse metaxml for %s", itemid, exc_info=True) return web.storage()
def POST(self, path): i = web.input(_method='post') i = web.storage(helpers.unflatten(i)) i.key = path _ = web.storage((k, i.pop(k)) for k in i.keys() if k.startswith('_')) action = self.get_action(_) comment = _.get('_comment', None) for k, v in i.items(): i[k] = self.trim(v) p = web.ctx.site.get(path) or web.ctx.site.new(path, {}) p.update(i) if action == 'preview': p['comment_'] = comment return render.editpage(p, preview=True) elif action == 'save': try: p._save(comment) path = web.input(_method='GET', redirect=None).redirect or web.changequery(query={}) raise web.seeother(path) except (ClientException, db.ValidationException), e: add_flash_message('error', str(e)) p['comment_'] = comment return render.editpage(p)
def f(): web.ctx.disable_permission_check = True d = web.storage({"key": key, "type": {"key": "/type/user"}}) d.update(data) self.site.save(key, d, timestamp=timestamp, author=d, comment="Created new account") q = make_query(d) account_bot = config.get('account_bot') account_bot = account_bot and web.storage({"key": account_bot, "type": {"key": "/type/user"}}) self.site.save_many(q, ip=ip, timestamp=timestamp, author=account_bot, action='register', comment="Setup new account") self.site.store.register(key, email, enc_password) self.update_user_details(username, verified=True, active=True) # Add account doc to store olddoc = self.site.store.store.get("account/" + username) or {} doc = { "_key": "account/" + username, "_rev": olddoc.get("_rev"), "type": "account", "registered_on": olddoc['registered_on'], "activated_on": timestamp.isoformat(), "last_login": timestamp.isoformat(), } self.site.store.store.put("account/" + username, doc)
def __init__(self): init_dbcontext() web.cache=web.Storage() web.cache.sku_properid_datas=web.storage() web.cache.sku_properval_datas=web.storage() web.cache.cate_attr_datas=web.storage() self.access_token='' self.get_token_from_db()
def load_extensions(): from common import db db.init(get_conn()) web.extensions = web.storage() web.extensions.db = db web.extensions.ensure_login = ensure_login web.app_extensions = web.storage()
def get_doc(doc): # called from work_search template e_ia = doc.find("arr[@name='ia']") first_pub = None e_first_pub = doc.find("int[@name='first_publish_year']") if e_first_pub is not None: first_pub = e_first_pub.text e_first_edition = doc.find("str[@name='first_edition']") first_edition = None if e_first_edition is not None: first_edition = e_first_edition.text work_subtitle = None e_subtitle = doc.find("str[@name='subtitle']") if e_subtitle is not None: work_subtitle = e_subtitle.text if doc.find("arr[@name='author_key']") is None: assert doc.find("arr[@name='author_name']") is None authors = [] else: ak = [e.text for e in doc.find("arr[@name='author_key']")] an = [e.text for e in doc.find("arr[@name='author_name']")] authors = [web.storage(key=key, name=name, url="/authors/%s/%s" % (key, (urlsafe(name) if name is not None else 'noname'))) for key, name in zip(ak, an)] cover = doc.find("str[@name='cover_edition_key']") e_public_scan = doc.find("bool[@name='public_scan_b']") e_overdrive = doc.find("str[@name='overdrive_s']") e_lending_edition = doc.find("str[@name='lending_edition_s']") e_collection = doc.find("str[@name='ia_collection_s']") collections = set() if e_collection is not None: collections = set(e_collection.text.split(';')) doc = web.storage( key = doc.find("str[@name='key']").text, title = doc.find("str[@name='title']").text, edition_count = int(doc.find("int[@name='edition_count']").text), ia = [e.text for e in (e_ia if e_ia is not None else [])], has_fulltext = (doc.find("bool[@name='has_fulltext']").text == 'true'), public_scan = ((e_public_scan.text == 'true') if e_public_scan is not None else (e_ia is not None)), overdrive = (e_overdrive.text.split(';') if e_overdrive is not None else []), lending_edition = (e_lending_edition.text if e_lending_edition is not None else None), collections = collections, authors = authors, first_publish_year = first_pub, first_edition = first_edition, subtitle = work_subtitle, cover_edition_key = (cover.text if cover is not None else None), ) doc.url = '/works/' + doc.key + '/' + urlsafe(doc.title) if not doc.public_scan and doc.lending_edition: store_doc = web.ctx.site.store.get("ebooks/books/" + doc.lending_edition) or {} doc.checked_out = store_doc.get("borrowed") == "true" else: doc.checked_out = "false" return doc
def get_talk(id): try: talk = web.ctx.site.store["talks/" + str(id)] except KeyError: return None talk['key'] = 'talks/' + id talk['files'] = [web.storage(f) for f in talk.get('files', [])] return web.storage(talk)
def _get_subjects(self): """Returns list of subjects inferred from the seeds. Each item in the list will be a storage object with title and url. """ # sample subjects return [ web.storage(title="Cheese", url="/subjects/cheese"), web.storage(title="San Francisco", url="/subjects/place:san_francisco") ]
def GET(self): rows = [] with open('trend5upradiocount.csv', 'rb') as csvfile: spamreader = csv.reader(csvfile, delimiter=',', quotechar='|') for row in spamreader: if row[0]=='trend5':continue rows.append(web.storage(trend=row[0],p=row[1],count=row[2])) r = web.storage(rows=rows,query='',count=len(rows)) return render.trend(r)
def render(self, book): # Anand: sorry for the hack. print sys._getframe(1) render_template = sys._getframe(1).f_locals['render_template'] if "authors" in book: book["authors"] = [web.storage(a) for a in book['authors']] return unicode(render_template("books/carousel_item", web.storage(book)))
def get_property(self, type, name): if name == 'type': return web.storage(name='type', expected_type=web.storage(key='/type/type', kind="regular"), unique=True) elif name in ['permission', 'child_permission']: return web.storage(name=name, expected_type=web.storage(key='/type/permission', kind="regular"), unique=True) else: for p in type.get('properties', []): if p.get('name') == name: return p
def get_paging(start, max_results, query=False, results_per_page=15, window_size=15, max_allowed_results=1000): max_allowed_pages = max_allowed_results / results_per_page c_page = start / results_per_page + 1 if not start: c_page = 1 nb_pages = max_results / results_per_page if max_results % results_per_page != 0: nb_pages += 1 left_a = right_a = False if c_page > 1: left_a = (c_page - 2) * results_per_page if c_page < nb_pages: right_a = start + results_per_page if right_a > max_allowed_pages: right_a = False left = c_page - window_size / 2 if left < 1: left = 1 right = left + window_size - 1 max_pages = (nb_pages > max_allowed_pages) and max_allowed_pages or nb_pages if right > max_pages: left = left - (right - max_pages) if left < 1: left = 1 right = max_pages pages = [] for i in range(left, right + 1): pages.append(web.storage( number=i, start=(i - 1) * results_per_page )) leftmost_a = rightmost_a = False if pages and pages[0].number > 1: leftmost_a = web.storage(number=1, start=0) if pages and pages[-1].number < nb_pages and nb_pages < max_allowed_pages: rightmost_a = web.storage( number=nb_pages, start=(nb_pages - 1) * results_per_page) return web.storage( start=start, max_results=max_results, c_page=c_page, nb_pages=nb_pages, pages=pages, leftmost_a=leftmost_a, left_a=left_a, right_a=right_a, rightmost_a=rightmost_a, query_enc=query and urllib.quote(query) or '' )
def read_schema(self, db): rows = db.query("SELECT table_name, column_name, data_type " + " FROM information_schema.columns" + " WHERE table_schema = 'public'") schema = web.storage() for row in rows: t = schema.setdefault(row.table_name, web.storage()) t[row.column_name] = row return schema
def process(name, value): if value: if not isinstance(value, list): value = [value] id = id_map.get(name) or web.storage(name=name, label=name, url_format=None) for v in value: d[id.name] = web.storage( name=id.name, label=id.label, value=v, url=id.get("url") and id.url.replace("@@@", v) )
def browserid(): c = web.cookies() if c.get('browserid_assertion'): out = urllib.urlencode(dict(audience=web.ctx.host, assertion=c.browserid_assertion)) o = json.loads(urllib.urlopen('https://browserid.org/verify', out).read()) if o['status'] == 'failure': return FalseStorage(o) else: return web.storage(o) else: return web.storage()
def get_property(self, type, name): if name == "type": return web.storage(name="type", expected_type=web.storage(key="/type/type", kind="regular"), unique=True) elif name in ["permission", "child_permission"]: return web.storage( name=name, expected_type=web.storage(key="/type/permission", kind="regular"), unique=True ) else: for p in type.get("properties", []): if p.get("name") == name: return p
def get_market_codes(stock_no): #深圳股票代码“002”开头的是中小板,“000”开头的是主板,“3”开头的是创业板;上海股票代码“6”开头的,全部的上海股票都为主板 if stock_no[:3]=='002': return web.storage(yahoo='sz',plate='zxb',pinyin='sz') if stock_no[:3]=='000': return web.storage(yahoo='sz',plate='sa',pinyin='sz') if stock_no[:1]=='3': return web.storage(yahoo='sz',plate='cyb',pinyin='sz') if stock_no[:1]=='6': return web.storage(yahoo='ss',plate='ha',pinyin='sh') return web.storage(yahoo='',plate='',pinyin='')
def _get_edition_config(): """Returns the edition config. The results are cached on the first invocation. Any changes to /config/edition page require restarting the app. This is is cached because fetching and creating the Thing object was taking about 20ms of time for each book request. """ thing = web.ctx.site.get('/config/edition') classifications = [web.storage(t.dict()) for t in thing.classifications if 'name' in t] identifiers = [web.storage(t.dict()) for t in thing.identifiers if 'name' in t] roles = thing.roles return web.storage(classifications=classifications, identifiers=identifiers, roles=roles)
def archive(): """Move files from local disk to tar files and update the paths in the db.""" tar_manager = TarManager() _db = db.getdb() try: covers = _db.select('cover', where='archived=$f', order='id', vars={'f': False}) for cover in covers: id = "%010d" % cover.id print 'archiving', cover files = { 'filename': web.storage(name=id + '.jpg', filename=cover.filename), 'filename_s': web.storage(name=id + '-S.jpg', filename=cover.filename_s), 'filename_m': web.storage(name=id + '-M.jpg', filename=cover.filename_m), 'filename_l': web.storage(name=id + '-L.jpg', filename=cover.filename_l), } # required until is coverstore is completely migrated to new code. ensure_thumbnail_created(cover.id, find_image_path(cover.filename)) for d in files.values(): d.path = d.filename and os.path.join(config.data_root, "localdisk", d.filename) if any(d.path is None or not os.path.exists(d.path) for d in files.values()): print >> web.debug, "Missing image file for %010d" % cover.id continue if isinstance(cover.created, basestring): from infogami.infobase import utils cover.created = utils.parse_datetime(cover.created) timestamp = time.mktime(cover.created.timetuple()) for d in files.values(): d.newname = tar_manager.add_file(d.name, open(d.path), timestamp) _db.update('cover', where="id=$cover.id", archived=True, filename=files['filename'].newname, filename_s=files['filename_s'].newname, filename_m=files['filename_m'].newname, filename_l=files['filename_l'].newname, vars=locals() ) for d in files.values(): print 'removing', d.path os.remove(d.path) finally: #logfile.close() tar_manager.close()
def f(): web.ctx.disable_permission_check = True d = web.storage({"key": key, "type": {"key": "/type/user"}}) d.update(data) self.site.save(key, d, timestamp=timestamp, author=d, comment="Created new account") q = make_query(d) account_bot = config.get('account_bot') account_bot = account_bot and web.storage({"key": account_bot, "type": {"key": "/type/user"}}) self.site.save_many(q, ip=ip, timestamp=timestamp, author=account_bot, action='register', comment="Setup new account") self.site.store.register(key, email, enc_password)
def add_index(self, id, key, data): if isinstance(data, dict): type = data.get("type", "") else: type = "" d = [web.storage(store_id=id, type=type, name="_key", value=key)] ignored = ["type"] for name, value in set(self.indexer.index(data)): if not name.startswith("_") and name not in ignored: d.append(web.storage(store_id=id, type=type, name=name, value=value)) if d: self.db.multiple_insert('store_index', d)
def test_get_cates(): top_cates = [web.storage(id=1, name='1'), web.storage(id=2, name='2')] clips.get_top_cates = Mock(return_value=top_cates) sub_cates = [web.storage(id=11, name='11'), web.storage(id=22, name='22')] clips.get_subcates = Mock(return_value=sub_cates) cates = clips.get_cates() assert len(cates) == 2 assert len(cates[0].subcates) == 2 assert_equal(cates[0].subcates[0].id, 11)
def setup(): d = web.storage(name="language", key="languages", prefix="/languages/", facet="language", facet_key="language", engine=LanguageEngine) subjects.SUBJECTS.append(d)
def get_authors(self): d = self._get_solr_result() return [web.storage(name=a, key='/authors/OL1A', count=count) for a, count in d['facets']['authors']]
def register_admin_page(path, cls, label=None, visible=True): label = label or cls.__name__ t = web.storage(path=path, cls=cls, label=label, visible=visible) admin_tasks.append(t)
def parse(self, s): """Parse the string and return storage object with specified fields and units.""" pattern = "^" + " *x *".join("([0-9.]*)" for f in self.fields) + " *(.*)$" rx = web.re_compile(pattern) m = rx.match(s) return m and web.storage(zip(self.fields + ["units"], m.groups()))
def __init__(self): self.names = collections.defaultdict(lambda: web.storage(time=0.0, count=0))
def get_languages(): global _languages if _languages is None: keys = web.ctx.site.things({"type": "/type/language", "key~": "/languages/*", "limit": 1000}) _languages = sorted([web.storage(name=d.name, code=d.code, key=d.key) for d in web.ctx.site.get_many(keys)], key=lambda d: d.name.lower()) return _languages
def process(v): v = web.storage(v) v.created = parse_datetime(v.created) v.author = v.author and web.ctx.site.get(v.author, lazy=True) return v
# These two are available in .code module. Importing it here will result in a # circular import. To avoid that, these values are set by the code.setup # function. read_author_facet = None solr_select_url = None logger = logging.getLogger("openlibrary.worksearch") re_chars = re.compile("([%s])" % re.escape(r'+-!(){}[]^"~*?:\\')) re_year = re.compile(r'\b(\d+)$') SUBJECTS = [ web.storage(name="person", key="people", prefix="/subjects/person:", facet="person_facet", facet_key="person_key"), web.storage(name="place", key="places", prefix="/subjects/place:", facet="place_facet", facet_key="place_key"), web.storage(name="time", key="times", prefix="/subjects/time:", facet="time_facet", facet_key="time_key"), web.storage(name="subject", key="subjects", prefix="/subjects/",
def trim_doc(doc): """Replace empty values in the document with Nones. """ return web.storage((k, trim_value(v)) for k, v in doc.items() if k[:1] not in "_{")
def get(self, key, revision=None): data = self.docs.get(key) data = data and web.storage(common.parse_query(data)) return data and client.create_thing(self, key, self._process_dict(data))
def process(p): return web.storage( name=p.value, key="/languages/" + p.value.replace(" ", "_"), count=solr.select({"language": p.value}, rows=0)['num_found'] )
#!/usr/bin/python # -*- coding: utf-8 -*- import web import os ##设置为debug模式 web.config.debug = True web.config['work_dir'] = os.getcwd() ##模板文件夹定义 render = web.template.render('templates/') config = web.storage( email='*****@*****.**', site_name='dataguru 课程', site_desc='', site_auther='XiaowuChen', resources='/static', ) ##设置模板全局变量 web.template.Template.globals['render'] = render web.template.Template.globals['site_config'] = config #web.template.Template.globals['db'] = db #web.template.Template.globals['session'] = web.config._session
Macro extension to markdown. Macros take argument string as input and returns result as markdown text. """ import os import web from infogami.utils import storage, template from infogami.utils.markdown import markdown # macros loaded from disk diskmacros = template.DiskTemplateSource() # macros specified in the code codemacros = web.storage() macrostore = storage.DictPile() macrostore.add_dict(diskmacros) macrostore.add_dict(codemacros) def macro(f): """Decorator to register a markdown macro. Macro is a function that takes argument string and returns result as markdown string. """ codemacros[f.__name__] = f return f def load_macros(plugin_root, lazy=False):
def process_thing(thing): t = web.storage() for k in ["key", "title", "name", "displayname"]: t[k] = thing[k] t['type'] = web.storage(key=thing.type.key) return t
def _make_subject_link(self, title, prefix=""): slug = web.safestr(title.lower().replace(' ', '_').replace(',', '')) key = "/subjects/%s%s" % (prefix, slug) return web.storage(key=key, title=title, slug=slug)
def value_to_thing(value, type): if value is None: value = "" return web.storage(value=value, is_primitive=True, type=type)
# coding: UTF-8 import os import web app_root = os.path.dirname(__file__) templates_root = os.path.join(app_root, '../templates') render = web.template.render(templates_root, cache=False) db = web.database(dbn='mysql', db='cover', user='******', pw='lewis') config = web.storage( email='*****@*****.**', site_name='lll', site_desc='lll', static='/static', ) web.template.Template.globals['config'] = config web.template.Template.globals['render'] = render c = { 'debug': True, 'warning': True, 'db_url': 'http://127.0.0.1:5984/', 'db_name': { 'cover': 'albumcover' } }
def get_doc(doc): # called from work_search template e_ia = doc.find("arr[@name='ia']") first_pub = None e_first_pub = doc.find("int[@name='first_publish_year']") if e_first_pub is not None: first_pub = e_first_pub.text e_first_edition = doc.find("str[@name='first_edition']") first_edition = None if e_first_edition is not None: first_edition = e_first_edition.text work_subtitle = None e_subtitle = doc.find("str[@name='subtitle']") if e_subtitle is not None: work_subtitle = e_subtitle.text if doc.find("arr[@name='author_key']") is None: assert doc.find("arr[@name='author_name']") is None authors = [] else: ak = [e.text for e in doc.find("arr[@name='author_key']")] an = [e.text for e in doc.find("arr[@name='author_name']")] authors = [web.storage(key=key, name=name, url="/authors/%s/%s" % (key, (urlsafe(name) if name is not None else 'noname'))) for key, name in zip(ak, an)] cover = doc.find("str[@name='cover_edition_key']") e_public_scan = doc.find("bool[@name='public_scan_b']") e_lending_edition = doc.find("str[@name='lending_edition_s']") e_lending_identifier = doc.find("str[@name='lending_identifier_s']") e_collection = doc.find("str[@name='ia_collection_s']") collections = set() if e_collection is not None: collections = set(e_collection.text.split(';')) doc = web.storage( key = doc.find("str[@name='key']").text, title = doc.find("str[@name='title']").text, edition_count = int(doc.find("int[@name='edition_count']").text), ia = [e.text for e in (e_ia if e_ia is not None else [])], has_fulltext = (doc.find("bool[@name='has_fulltext']").text == 'true'), public_scan = ((e_public_scan.text == 'true') if e_public_scan is not None else (e_ia is not None)), lending_edition = (e_lending_edition.text if e_lending_edition is not None else None), lending_identifier = (e_lending_identifier and e_lending_identifier.text), collections = collections, authors = authors, first_publish_year = first_pub, first_edition = first_edition, subtitle = work_subtitle, cover_edition_key = (cover.text if cover is not None else None), ) doc.url = doc.key + '/' + urlsafe(doc.title) if not doc.public_scan and doc.lending_identifier: store_doc = web.ctx.site.store.get("ebooks/" + doc.lending_identifier) or {} doc.checked_out = store_doc.get("borrowed") == "true" elif not doc.public_scan and doc.lending_edition: store_doc = web.ctx.site.store.get("ebooks/books/" + doc.lending_edition) or {} doc.checked_out = store_doc.get("borrowed") == "true" else: doc.checked_out = "false" return doc
def get_creation_info(self): if web.ctx.path.startswith("/admin"): d = web.ctx.site.versions({'key': self.key, "sort": "-created", "limit": 1})[0] return web.storage({"ip": d.ip, "member_since": d.created})
def get_related_subjects(self): # dummy subjects return [web.storage(name='France', key='/subjects/places/France'), web.storage(name='Travel', key='/subjects/Travel')]
def works_by_author( akey, sort='editions', page=1, rows=100, has_fulltext=False, query=None ): # called by merge_author_works q = 'author_key:' + akey if query: q = query offset = rows * (page - 1) params = [ ('fq', 'author_key:' + akey), ('fq', 'type:work'), ('q', q), ('start', offset), ('rows', rows), ( 'fl', ','.join( [ 'key', 'author_name', 'author_key', 'title', 'subtitle', 'edition_count', 'ia', 'cover_edition_key', 'has_fulltext', 'language', 'first_publish_year', 'public_scan_b', 'lending_edition_s', 'lending_identifier_s', 'ia_collection_s', 'id_project_gutenberg', 'id_librivox', 'id_standard_ebooks', 'cover_i', ] ), ), ('wt', 'json'), ('q.op', 'AND'), ('facet', 'true'), ('facet.mincount', 1), ('f.author_facet.facet.sort', 'count'), ('f.publish_year.facet.limit', -1), ('facet.limit', 25), ] if has_fulltext: params.append(('fq', 'has_fulltext:true')) if sort == "editions": params.append(('sort', 'edition_count desc')) elif sort.startswith('old'): params.append(('sort', 'first_publish_year asc')) elif sort.startswith('new'): params.append(('sort', 'first_publish_year desc')) elif sort.startswith('title'): params.append(('sort', 'title asc')) facet_fields = [ "author_facet", "language", "publish_year", "publisher_facet", "subject_facet", "person_facet", "place_facet", "time_facet", ] for f in facet_fields: params.append(("facet.field", f)) reply = parse_json_from_solr_query(solr_select_url, params) if reply is None: return web.storage( num_found=0, works=[], years=[], get_facet=[], sort=sort, ) # TODO: Deep JSON structure defense - for now, let it blow up so easier to detect facets = reply['facet_counts']['facet_fields'] works = [work_object(w) for w in reply['response']['docs']] def get_facet(f, limit=None): return list(web.group(facets[f][: limit * 2] if limit else facets[f], 2)) return web.storage( num_found=int(reply['response']['numFound']), works=add_availability(works), years=[(int(k), v) for k, v in get_facet('publish_year')], get_facet=get_facet, sort=sort, )
def get_doc(doc): # called from work_search template e_ia = doc.find("arr[@name='ia']") e_id_project_gutenberg = doc.find("arr[@name='id_project_gutenberg']") or [] e_id_librivox = doc.find("arr[@name='id_librivox']") or [] e_id_standard_ebooks = doc.find("arr[@name='id_standard_ebooks']") or [] first_pub = None e_first_pub = doc.find("int[@name='first_publish_year']") if e_first_pub is not None: first_pub = e_first_pub.text e_first_edition = doc.find("str[@name='first_edition']") first_edition = None if e_first_edition is not None: first_edition = e_first_edition.text work_subtitle = None e_subtitle = doc.find("str[@name='subtitle']") if e_subtitle is not None: work_subtitle = e_subtitle.text if doc.find("arr[@name='author_key']") is None: assert doc.find("arr[@name='author_name']") is None authors = [] else: ak = [e.text for e in doc.find("arr[@name='author_key']")] an = [e.text for e in doc.find("arr[@name='author_name']")] authors = [ web.storage( key=key, name=name, url="/authors/{}/{}".format( key, (urlsafe(name) if name is not None else 'noname') ), ) for key, name in zip(ak, an) ] cover = doc.find("str[@name='cover_edition_key']") languages = doc.find("arr[@name='language']") e_public_scan = doc.find("bool[@name='public_scan_b']") e_lending_edition = doc.find("str[@name='lending_edition_s']") e_lending_identifier = doc.find("str[@name='lending_identifier_s']") e_collection = doc.find("str[@name='ia_collection_s']") collections = set() if e_collection is not None: collections = set(e_collection.text.split(';')) doc = web.storage( key=doc.find("str[@name='key']").text, title=doc.find("str[@name='title']").text, edition_count=int(doc.find("int[@name='edition_count']").text), ia=[e.text for e in (e_ia if e_ia is not None else [])], has_fulltext=(doc.find("bool[@name='has_fulltext']").text == 'true'), public_scan=( (e_public_scan.text == 'true') if e_public_scan is not None else (e_ia is not None) ), lending_edition=( e_lending_edition.text if e_lending_edition is not None else None ), lending_identifier=( e_lending_identifier.text if e_lending_identifier is not None else None ), collections=collections, authors=authors, first_publish_year=first_pub, first_edition=first_edition, subtitle=work_subtitle, cover_edition_key=(cover.text if cover is not None else None), languages=languages and [lang.text for lang in languages], id_project_gutenberg=[e.text for e in e_id_project_gutenberg], id_librivox=[e.text for e in e_id_librivox], id_standard_ebooks=[e.text for e in e_id_standard_ebooks], ) doc.url = doc.key + '/' + urlsafe(doc.title) return doc
def get_publishers(self): d = self._get_solr_result() return [web.storage(name=p, count=count) for p, count in d['facets']['publishers']]
def GET(self): from . import search result = search.get_solr().select('*:*', rows=0, facets=['language'], facet_limit=500) languages = [web.storage(name=get_language_name(row.value), key='/languages/' + row.value, count=row.count) for row in result['facets']['language']] return render_template("languages/index", languages)
def GET(self): page = web.ctx.site.get("/admin/block") or web.storage(ips=[ web.storage(ip="127.0.0.1", duration="1 week", since="1 day") ]) return render_template("admin/block", page)
def flash(message, category="info"): flashes = web.ctx.setdefault('flashes', []) flashes.append(web.storage(category=category, message=message))
def process(post): post = web.storage(post) post.pubdate = parse_datetime(post.pubdate) return post
def get_province(self): lsDataSet = Province().query.filter(Province.IsFlag != 0).all() lsData = [] for objData in lsDataSet: lsData.append(web.storage(**obj_to_dict(objData.copy(bind=False)))) return FuncResult(success=True, msg='操作成功!',data=lsData)
def get_authors(doc): return [ web.storage(key=a.key, name=a.name or None) for a in doc.get_authors() ]
def _load(self, key, revision=None): doc = self.get(key, revision=revision) data = doc.dict() data = web.storage(common.parse_query(data)) return self._process_dict(data)