def req_path_to_full_path(req_path, pages_path=conf.pages_path): """ >>> pages_path = "/tmp/pages/" >>> req_path_to_full_path("sandbox1", pages_path) '/tmp/pages/sandbox1.md' >>> req_path_to_full_path("sandbox1/", pages_path) '/tmp/pages/sandbox1/' >>> req_path_to_full_path("hacking/fetion/fetion-protocol/", pages_path) '/tmp/pages/hacking/fetion/fetion-protocol/' >>> req_path_to_full_path("hacking/fetion/fetion-protocol/method-option.md", pages_path) '/tmp/pages/hacking/fetion/fetion-protocol/method-option.md' """ req_path = web.rstrips(req_path, ".md") req_path = web.rstrips(req_path, ".markdown") if not req_path.endswith("/"): path_md = "%s.md" % os.path.join(pages_path, req_path) path_markdown = "%s.markdown" % os.path.join(pages_path, req_path) if os.path.exists(path_md): return path_md elif os.path.exists(path_markdown): return path_markdown else: return path_md elif req_path == "/": return pages_path else: return os.path.join(pages_path, req_path)
def req_path_to_local_full_path(req_path, folder_pages_full_path): req_path = web.rstrips(req_path, ".md") req_path = web.rstrips(req_path, ".markdown") if req_path in consts.g_special_paths: return folder_pages_full_path elif not req_path.endswith("/"): HOME_PAGE = "" if req_path == HOME_PAGE: return folder_pages_full_path path_md = "%s.md" % os.path.join(folder_pages_full_path, req_path) path_markdown = "%s.markdown" % os.path.join(folder_pages_full_path, req_path) if os.path.exists(path_md): return path_md elif os.path.exists(path_markdown): return path_markdown else: return path_md elif req_path == "/": return folder_pages_full_path else: return os.path.join(folder_pages_full_path, req_path)
def req_path_to_full_path(req_path, pages_path = conf.pages_path): """ >>> pages_path = "/tmp/pages/" >>> req_path_to_full_path("sandbox1", pages_path) '/tmp/pages/sandbox1.md' >>> req_path_to_full_path("sandbox1/", pages_path) '/tmp/pages/sandbox1/' >>> req_path_to_full_path("hacking/fetion/fetion-protocol/", pages_path) '/tmp/pages/hacking/fetion/fetion-protocol/' >>> req_path_to_full_path("hacking/fetion/fetion-protocol/method-option.md", pages_path) '/tmp/pages/hacking/fetion/fetion-protocol/method-option.md' """ req_path = web.rstrips(req_path, ".md") req_path = web.rstrips(req_path, ".markdown") if not req_path.endswith("/"): path_md = "%s.md" % os.path.join(pages_path, req_path) path_markdown = "%s.markdown" % os.path.join(pages_path, req_path) if os.path.exists(path_md): return path_md elif os.path.exists(path_markdown): return path_markdown else: return path_md elif req_path == "/": return pages_path else: return os.path.join(pages_path, req_path)
def req_path_to_local_full_path(req_path, folder_pages_full_path): """ >>> folder_pages_full_path = "/tmp/pages/" >>> req_path_to_local_full_path("sandbox1", folder_pages_full_path) '/tmp/pages/sandbox1.md' >>> req_path_to_local_full_path("sandbox1/", folder_pages_full_path) '/tmp/pages/sandbox1/' >>> req_path_to_local_full_path("hacking/fetion/fetion-protocol/", folder_pages_full_path) '/tmp/pages/hacking/fetion/fetion-protocol/' >>> req_path_to_local_full_path("hacking/fetion/fetion-protocol/method-option.md", folder_pages_full_path) '/tmp/pages/hacking/fetion/fetion-protocol/method-option.md' >>> req_path_to_local_full_path("~all", folder_pages_full_path) '/tmp/pages/' >>> req_path_to_local_full_path("/", folder_pages_full_path) '/tmp/pages/' >>> req_path_to_local_full_path("", folder_pages_full_path) '/tmp/pages/' """ req_path = web.rstrips(req_path, ".md") req_path = web.rstrips(req_path, ".markdown") if req_path in consts.g_special_paths: return folder_pages_full_path elif not req_path.endswith("/"): HOME_PAGE = "" if req_path == HOME_PAGE: return folder_pages_full_path path_md = "%s.md" % os.path.join(folder_pages_full_path, req_path) path_markdown = "%s.markdown" % os.path.join(folder_pages_full_path, req_path) if os.path.exists(path_md): return path_md elif os.path.exists(path_markdown): return path_markdown else: return path_md elif req_path == "/": return folder_pages_full_path else: return os.path.join(folder_pages_full_path, req_path)
def demog_to_dist(demog, district): if demog: district.cook_index = get_int(demog, 'Cook Partisan Voting Index') district.area_sqmi = cleanint(web.rstrips(web.rstrips(demog['Area size'], ' sq. mi.'), ' square miles')) district.poverty_pct = get_int(demog, 'Poverty status') or get_int(demog, 'Poverty status') district.median_income = get_int(demog, 'Median income') or get_int(demog, 'Median Income') (district.est_population_year, district.est_population) = coalesce_population(demog, [ (2006, 'Pop. 2006 (est)'), (2005, 'Pop. 2005 (est)'), (2000, 'Pop. 2000'), (2006, 'Population 2006 (est)'), (2005, 'Population 2005 (est)'), (2000, 'Population 2000'), ])
def compute_index(self, doc): key = doc['key'] index = common.flatten_dict(doc) for k, v in index: # for handling last_modified.value if k.endswith(".value"): k = web.rstrips(k, ".value") if k.endswith(".key"): yield web.storage(key=key, datatype="ref", name=web.rstrips(k, ".key"), value=v) elif isinstance(v, basestring): yield web.storage(key=key, datatype="str", name=k, value=v) elif isinstance(v, int): yield web.storage(key=key, datatype="int", name=k, value=v)
def query(frm=None, to=None, source_id=None, limit=None, offset=None, order=None): """queries for matching messsages and returns their ids """ where = "" if frm: where += "from_id = $frm and " if to: where += "to_id = $to and " if source_id: where += "source_id = $source_id and " web.rstrips(where, "and ") try: return db.select("messages", where=where or None, limit=limit, offset=offset, order=order) except Exception, details: print where, details
def compute_index(self, doc): """Returns an iterator with (datatype, key, value) for each value be indexed.""" index = common.flatten_dict(doc) # skip special values and /type/text skip = [ "id", "key", "type.key", "revision", "latest_revison", "last_modified", "created", ] index = { (k, v) for k, v in index if k not in skip and not k.endswith(".value") and not k.endswith(".type") } for k, v in index: if k.endswith(".key"): yield 'ref', web.rstrips(k, ".key"), v elif isinstance(v, str): yield 'str', k, v elif isinstance(v, int): yield 'int', k, v
def load_templates(self, path, lazy=False): def get_template(render, name): tokens = name.split(os.path.sep) render = getattr(render, name) render.filepath = '%s/%s.html' % (path, name) return render def set_template(render, name): t = get_template(render, name) # disable caching in debug mode if not web.config.debug: self[name] = t return t render = web.template.render(path) # assuming all templates have .html extension names = [web.rstrips(p, '.html') for p in find(path) if p.endswith('.html')] for name in names: if lazy: def load(render=render, name=name): return set_template(render, name) self[name] = LazyTemplate(load, name=path + '/' + name, filepath=path + '/' + name + '.html') else: self[name] = get_template(render, name)
def main(): assert os.path.exists(ALMANAC_DIR), ALMANAC_DIR files = glob.glob(ALMANAC_DIR + '*/people/*/rep_*.htm') + \ glob.glob(ALMANAC_DIR + '*/people/*/*s[12].htm') files.sort() for fn in files: district = web.storage() demog = None dist = web.lstrips(web.rstrips(fn.split('/')[-1], '.htm'), 'rep_') diststate = dist[0:2].upper() distnum = dist[-2:] distname = tools.fixdist(diststate + '-' + distnum) d = almanac.scrape_person(fn) load_election_results(d, distname) if ALMANAC_DIR + '2008' in fn: if 'demographics' in d: demog = d['demographics'] elif distname[-2:] == '00' or '-' not in distname: # if -00 then this district is the same as the state. #print "Using state file for:", distname statefile = ALMANAC_DIR + '2008/states/%s/index.html' % diststate.lower() demog = almanac.scrape_state(statefile).get('state') demog_to_dist(demog, district) district.almanac = 'http://' + d['filename'][d['filename'].find('nationaljournal.com'):] #print 'district:', distname, pformat(district) db.update('district', where='name=$distname', vars=locals(), **district)
def make_bsddb(dbfile, dump_file): import bsddb db = bsddb.btopen(dbfile, "w", cachesize=1024 * 1024 * 1024) indexable_keys = { "authors.key", "works.key", # edition "authors.author.key", "subjects", "subject_places", "subject_people", "subject_times", # work } for type, key, revision, timestamp, json_data in read_tsv(dump_file): db[key] = json_data d = json.loads(json_data) index = [(k, v) for k, v in flatten_dict(d) if k in indexable_keys] for k, v in index: k = web.rstrips(k, ".key") if k.startswith("subject"): v = "/" + v.lower().replace(" ", "_") dbkey = web.safestr(f"by_{k}{v}") if dbkey in db: db[dbkey] = db[dbkey] + " " + key else: db[dbkey] = key db.close() log("done")
def POST(self, key): i = web.input(_method="POST") if "_delete" in i: doc = web.ctx.site.store.get(key) if doc: doc['current_status'] = "deleted" web.ctx.site.store[doc['_key']] = doc add_flash_message("info", "The requested library has been deleted.") raise web.seeother("/libraries/dashboard") i._key = web.rstrips(i.key, "/").replace(" ", "_") page = libraries_dashboard()._create_pending_library(i) if web.ctx.site.get(page.key): add_flash_message("error", "URL %s is already used. Please choose a different one." % page.key) return render_template("type/library/edit", page) elif not i.key.startswith("/libraries/"): add_flash_message("error", "The key must start with /libraries/.") return render_template("type/library/edit", page) doc = web.ctx.site.store.get(key) if doc and "registered_on" in doc: page.registered_on = {"type": "/type/datetime", "value": doc['registered_on']} page._save() if doc: doc['current_status'] = "approved" doc['page_key'] = page.key web.ctx.site.store[doc['_key']] = doc raise web.seeother(page.key)
def POST(self, key): i = web.input() if "_delete" in i: doc = web.ctx.site.store.get(key) if doc: doc['current_status'] = "deleted" web.ctx.site.store[doc['_key']] = doc add_flash_message("info", "The requested library has been deleted.") raise web.seeother("/libraries/dashboard") i._key = web.rstrips(i.key, "/").replace(" ", "_") page = libraries_dashboard()._create_pending_library(i) if web.ctx.site.get(page.key): add_flash_message("error", "URL %s is already used. Please choose a different one." % page.key) return render_template("type/library/edit", page) elif not i.key.startswith("/libraries/"): add_flash_message("error", "The key must start with /libraries/.") return render_template("type/library/edit", page) page._save() doc = web.ctx.site.store.get(key) if doc: doc['current_status'] = "approved" web.ctx.site.store[doc['_key']] = doc raise web.seeother(page.key)
def find_mode(): what = web.input(_method='GET').get('m', 'view') path = web.ctx.path encoding = web.ctx.get('encoding') # I don't about this mode. if encoding not in encodings: raise web.HTTPError("406 Not Acceptable", {}) # encoding can be specified as part of path, strip the encoding part of path. if encoding: path = web.rstrips(path, "." + encoding) if what in modes: cls = modes[what].get(encoding) # mode is available, but not for the requested encoding if cls is None: raise web.HTTPError("406 Not Acceptable", {}) args = [path] return cls, args else: return None, None
def make_bsddb(dbfile, dump_file): import bsddb db = bsddb.btopen(dbfile, 'w', cachesize=1024*1024*1024) from infogami.infobase.utils import flatten_dict indexable_keys = set([ "authors.key", "works.key", # edition "authors.author.key", "subjects", "subject_places", "subject_people", "subject_times" # work ]) for type, key, revision, timestamp, json in read_tsv(dump_file): db[key] = json d = simplejson.loads(json) index = [(k, v) for k, v in flatten_dict(d) if k in indexable_keys] for k, v in index: k = web.rstrips(k, ".key") if k.startswith("subject"): v = '/' + v.lower().replace(" ", "_") dbkey = web.safestr('by_%s%s' % (k, v)) if dbkey in db: db[dbkey] = db[dbkey] + " " + key else: db[dbkey] = key db.close() log("done")
def find_page(): path = web.ctx.path encoding = web.ctx.get("encoding") # I don't about this mode. if encoding not in encodings: raise web.HTTPError("406 Not Acceptable", {}) # encoding can be specified as part of path, strip the encoding part of path. if encoding: path = web.rstrips(path, "." + encoding) for p in get_sorted_paths(): m = web.re_compile("^" + p + "$").match(path) if m: cls = pages[p].get(encoding) or pages[p].get(None) args = m.groups() # FeatureFlags support. # A handler can be enabled only if a feature is active. if hasattr(cls, "is_enabled") and bool(cls().is_enabled()) is False: continue return cls, args return None, None
def find_page(): path = web.ctx.path encoding = web.ctx.get('encoding') # I don't about this mode. if encoding not in encodings: raise web.HTTPError("406 Not Acceptable", {}) # encoding can be specified as part of path, strip the encoding part of path. if encoding: path = web.rstrips(path, "." + encoding) for p in get_sorted_paths(): m = web.re_compile('^' + p + '$').match(path) if m: cls = pages[p].get(encoding) or pages[p].get(None) args = m.groups() # FeatureFlags support. # A handler can be enabled only if a feature is active. if hasattr(cls, "is_enabled") and bool(cls().is_enabled()) is False: continue return cls, args return None, None
def main(): assert os.path.exists(ALMANAC_DIR), ALMANAC_DIR files = glob.glob(ALMANAC_DIR + 'people/*/rep_*.htm') + \ glob.glob(ALMANAC_DIR + 'people/*/*s[12].htm') files.sort() for fn in files: district = web.storage() demog = None dist = web.lstrips(web.rstrips(fn.split('/')[-1], '.htm'), 'rep_') diststate = dist[0:2].upper() distnum = dist[-2:] distname = tools.fixdist(diststate + '-' + distnum) d = almanac.scrape_person(fn) load_election_results(d, distname) if 'demographics' in d: demog = d['demographics'] elif distname[-2:] == '00' or '-' not in distname: # if -00 then this district is the same as the state. #print "Using state file for:", distname statefile = ALMANAC_DIR + 'states/%s/index.html' % diststate.lower() demog = almanac.scrape_state(statefile).get('state') demog_to_dist(demog, district) district.almanac = 'http://' + d['filename'][d['filename'].find('nationaljournal.com'):] #print 'district:', distname, pformat(district) db.update('district', where='name=$distname', vars=locals(), **district)
def make_bsddb(dbfile, dump_file): import bsddb db = bsddb.btopen(dbfile, 'w', cachesize=1024 * 1024 * 1024) from infogami.infobase.utils import flatten_dict indexable_keys = set([ "authors.key", "works.key", # edition "authors.author.key", "subjects", "subject_places", "subject_people", "subject_times" # work ]) for type, key, revision, timestamp, json in read_tsv(dump_file): db[key] = json d = simplejson.loads(json) index = [(k, v) for k, v in flatten_dict(d) if k in indexable_keys] for k, v in index: k = web.rstrips(k, ".key") if k.startswith("subject"): v = '/' + v.lower().replace(" ", "_") dbkey = web.safestr('by_%s%s' % (k, v)) if dbkey in db: db[dbkey] = db[dbkey] + " " + key else: db[dbkey] = key db.close() log("done")
def POST(self, key): i = web.input(_method="POST") if "_delete" in i: doc = web.ctx.site.store.get(key) if doc: doc['current_status'] = "deleted" web.ctx.site.store[doc['_key']] = doc add_flash_message("info", "The requested library has been deleted.") raise web.seeother("/libraries/dashboard") i._key = web.rstrips(i.key, "/").replace(" ", "_") page = libraries_dashboard()._create_pending_library(i) if web.ctx.site.get(page.key): raise web.notfound("error", "URL %s is already used. Please choose a different one." % page.key) elif not i.key.startswith("/libraries/"): raise web.notfound( "The key must start with /libraries/." ) doc = web.ctx.site.store.get(key) if doc and "registered_on" in doc: page.registered_on = {"type": "/type/datetime", "value": doc['registered_on']} page._save() if doc: doc['current_status'] = "approved" doc['page_key'] = page.key web.ctx.site.store[doc['_key']] = doc raise web.seeother(page.key)
def find_page(): path = web.ctx.path encoding = web.ctx.get('encoding') # I don't about this mode. if encoding not in encodings: raise web.HTTPError("406 Not Acceptable", {}) # encoding can be specified as part of path, strip the encoding part of path. if encoding: path = web.rstrips(path, "." + encoding) def sort_paths(paths): """Sort path such that wildcards go at the end.""" return sorted(paths, key=lambda path: ('.*' in path, path)) for p in sort_paths(pages): m = re.match('^' + p + '$', path) if m: cls = pages[p].get(encoding) or pages[p].get(None) args = m.groups() # FeatureFlags support. # A handler can be enabled only if a feature is active. if hasattr(cls, "is_enabled") and bool(cls().is_enabled()) is False: continue return cls, args return None, None
def POST_do_claim(self): if jt.site.password: return web.seeother(jt.site.url) i = web.input('password', 'email', 'security') f = forms.claim_site() if not f.validates(): return render('claim_site', vars=locals()) db.claim_site(i.password, i.email, i.security) auth.signin(i.password) web.setcookie('success', "Congratulations! You've claimed your site.") site_url = web.rstrips( web.lstrips(web.lstrips(jt.site.url, 'http://'), 'https://'), '/') sendmail( 'The Jottit Team <*****@*****.**>', i.email, "You claimed " + site_url, """\ Thanks for claiming your site at Jottit.com! It's at: https://%(site_url)s recover password: https://%(site_url)s/site/forgot-password Let us know if you have any thoughts or problems -- just reply to this email (or email [email protected]). - Simon and Aaron, Jottit """ % dict(email=i.email, site_url=site_url, password=i.password)) return web.seeother(jt.site.url)
def load_templates(self, path, lazy=False): def get_template(render, name): tokens = name.split(os.path.sep) render = getattr(render, name) render.filepath = '%s/%s.html' % (path, name) return render def set_template(render, name): t = get_template(render, name) # disable caching in debug mode if not web.config.debug: self[name] = t return t render = web.template.render(path) # assuming all templates have .html extension names = [ web.rstrips(p, '.html') for p in find(path) if p.endswith('.html') ] for name in names: if lazy: def load(render=render, name=name): return set_template(render, name) self[name] = LazyTemplate(load, name=path + '/' + name, filepath=path + '/' + name + '.html') else: self[name] = get_template(render, name)
def reload(self, servers): for s in servers: s = web.rstrips(s, "/") + "/_reload" yield "<h3>" + s + "</h3>" try: response = urllib.urlopen(s).read() yield "<p><pre>" + response[:100] + "</pre></p>" except: yield "<p><pre>%s</pre></p>" % traceback.format_exc()
def reload(self, servers): for s in servers: s = web.rstrips(s, "/") + "/_reload" yield "<h3>" + s + "</h3>" try: response = requests.get(s).text yield "<p><pre>" + response[:100] + "</pre></p>" except: yield "<p><pre>%s</pre></p>" % traceback.format_exc()
def main(): districts = simplejson.load(file(DATA_DIR + '/load/districts/index.json')) assert os.path.exists(ALMANAC_DIR), ALMANAC_DIR out = {} for fn in glob.glob(ALMANAC_DIR + 'people/*/rep*'): district = web.storage() dist = web.lstrips(web.rstrips(fn.split('/')[-1], '.htm'), 'rep_') diststate = dist[0:2].upper() distnum = dist[-2:] d = almanac.scrape_person(fn) if 'demographics' in d: demog = d['demographics'] else: #@@ maybe only when diststate + '-00' in districts? statefile = ALMANAC_DIR + 'states/%s/index.html' % diststate.lower() demog = almanac.scrape_state(statefile).get('state') if demog: district.cook_index = get_int(demog, 'Cook Partisan Voting Index') district.area_sqmi = cleanint(web.rstrips(demog['Area size'], ' sq. mi.')) district.poverty_pct = get_int(demog, 'Poverty status') district.median_income = get_int(demog, 'Median income') (district.est_population_year, district.est_population) = coalesce_population(demog, [ (2006, 'Pop. 2006 (est)'), (2005, 'Pop. 2005 (est)'), (2000, 'Pop. 2000'), ]) if 'interest_group_rating' in d: district.interest_group_rating = d['interest_group_rating'] district.almanac = 'http://' + d['filename'][d['filename'].find('nationaljournal.com'):] # Nationaljournal numbers districts of congressmen-at-large # and territorial delegates '01' in its URLs, but our # districts file numbers them '00'. if distnum == '01' and diststate + '-00' in districts: distnum = '00' out[diststate + '-' + distnum] = district return out
def __getitem__(self, key): key = self.process_key(key) root = web.rstrips(self.getroot() or "", "/") if root is None or context.get('rescue_mode'): raise KeyError, key value = self.templates[root + key] if isinstance(value, LazyTemplate): value = value.func() return value
def notfound(): if key in ["id", "olid"] and config.get("upstream_base_url"): # this is only used in development base = web.rstrips(config.upstream_base_url, "/") raise web.redirect(base + web.ctx.fullpath) elif config.default_image and i.default.lower() != "false" and not is_valid_url(i.default): return read_file(config.default_image) elif is_valid_url(i.default): raise web.seeother(i.default) else: raise web.notfound("")
def __getitem__(self, key): key = self.process_key(key) root = self.getroot() if root is None or context.get('rescue_mode'): raise KeyError(key) root = web.rstrips(root or "", "/") value = self.templates[root + key] if isinstance(value, LazyTemplate): value = value.func() return value
def load_templates(self, path, lazy=False): # assuming all templates have .html extension names = [web.rstrips(p, '.html') for p in find(path) if p.endswith('.html')] for name in names: filepath = path + '/' + name + '.html' if lazy: def load(render=render, name=name, filepath=filepath): self[name] = self.get_template(filepath) return self[name] self[name] = LazyTemplate(load, name=name, filepath=filepath) else: self[name] = self.get_template(filepath)
def query(frm=None, to=None, source_id=None, limit=None, offset=None, order=None): """queries for matching messsages and returns their ids """ where = '' if frm: where += 'from_id = $frm and ' if to: where += 'to_id = $to and ' if source_id: where += 'source_id = $source_id and ' web.rstrips(where, 'and ') try: return db.select('messages', where=where or None, limit=limit, offset=offset, order=order) except Exception, details: print where, details
def compute_index(self, doc): """Returns an iterator with (datatype, key, value) for each value be indexed. """ index = common.flatten_dict(doc) # skip special values and /type/text skip = ["id", "key", "type.key", "revision", "latest_revison", "last_modified", "created"] index = set((k, v) for k, v in index if k not in skip and not k.endswith(".value") and not k.endswith(".type")) for k, v in index: if k.endswith(".key"): yield 'ref', web.rstrips(k, ".key"), v elif isinstance(v, basestring): yield 'str', k, v elif isinstance(v, int): yield 'int', k, v
def things(self, query): limit = query.pop('limit', 100) offset = query.pop('offset', 0) keys = set(self.docs) for k, v in query.items(): if isinstance(v, dict): # query keys need to be flattened properly, # this corrects any nested keys that have been included # in values. flat = common.flatten_dict(v)[0] k += '.' + web.rstrips(flat[0], '.key') v = flat[1] keys = {k for k in self.filter_index(self.index, k, v) if k in keys} keys = sorted(keys) return keys[offset : offset + limit]
def run(cmd): args = shlex.split(cmd) try: p_obj = subprocess.Popen(args, stdout = subprocess.PIPE, shell = True) # resp = p_obj.stdout.read().strip("\n") resp = p_obj.stdout.read() except TypeError: resp = None if not resp: # resp = os.popen(cmd).read().strip().split('\n') resp = os.popen(cmd).read().strip() resp = web.rstrips(resp, "\n") resp = web.safeunicode(resp) return resp
def filter_index(self, index, name, value): operations = { "~": lambda i, value: isinstance(i.value, six.string_types) and i.value. startswith(web.rstrips(value, "*")), "<": lambda i, value: i.value < value, ">": lambda i, value: i.value > value, "!": lambda i, value: i.value != value, "=": lambda i, value: i.value == value, } pattern = ".*([%s])$" % "".join(operations) rx = web.re_compile(pattern) m = rx.match(name) if m: op = m.group(1) name = name[:-1] else: op = "=" f = operations[op] if name == 'isbn_': names = ['isbn_10', 'isbn_13'] else: names = [name] if isinstance( value, list): # Match any of the elements in value if it's a list for n in names: for i in index: if i.name == n and any(f(i, v) for v in value): yield i.key else: # Otherwise just match directly for n in names: for i in index: if i.name == n and f(i, value): yield i.key
def find_page(): path = web.ctx.path encoding = web.ctx.get('encoding') # I don't about this mode. if encoding not in encodings: raise web.HTTPError("406 Not Acceptable", {}) # encoding can be specified as part of path, strip the encoding part of path. if encoding: path = web.rstrips(path, "." + encoding) for p in pages: m = re.match('^' + p + '$', path) if m: cls = pages[p].get(encoding) or pages[p].get(None) args = m.groups() return cls, args return None, None
def load_templates(self, path, lazy=False): def get_template(render, name): tokens = name.split(os.path.sep) render = getattr(render, name) render.filepath = '%s/%s.html' % (path, name) return render def set_template(render, name): self[name] = get_template(render, name) return self[name] render = web.template.render(path) # assuming all templates have .html extension names = [web.rstrips(p, '.html') for p in find(path) if p.endswith('.html')] for name in names: if lazy: self[name] = LazyTemplate(lambda render=render, name=name: set_template(render, name), name=path + '/' + name) else: self[name] = get_template(render, name)
def main(): options, args = parse_args() if options.src.startswith("http://"): src = OpenLibrary(options.src) else: src = Disk(options.src) if options.dest.startswith("http://"): dest = OpenLibrary(options.dest) section = "[%s]" % web.rstrips(options.dest, "http://") if section in read_lines(os.path.expanduser("~/.olrc")): dest.autologin() else: dest.login("admin", "admin123") else: dest = Disk(options.dest) keys = args keys = list(expand(src, keys)) copy(src, dest, keys, comment=options.comment, recursive=options.recursive)
def find_page(): path = web.ctx.path encoding = web.ctx.get('encoding') # I don't about this mode. if encoding not in encodings: raise web.HTTPError("406 Not Acceptable", {}) # encoding can be specified as part of path, strip the encoding part of path. if encoding: path = web.rstrips(path, "." + encoding) def sort_paths(paths): """Sort path such that wildcards go at the end.""" return sorted(paths, key=lambda path: ('.*' in path, path)) for p in sort_paths(pages): m = re.match('^' + p + '$', path) if m: cls = pages[p].get(encoding) or pages[p].get(None) args = m.groups() return cls, args return None, None
def filter_index(self, index, name, value): operations = { "~": lambda i, value: isinstance(i.value, basestring) and i.value.startswith(web.rstrips(value, "*")), "<": lambda i, value: i.value < value, ">": lambda i, value: i.value > value, "!": lambda i, value: i.value != value, "=": lambda i, value: i.value == value, } pattern = ".*([%s])$" % "".join(operations) rx = web.re_compile(pattern) m = rx.match(name) if m: op = m.group(1) name = name[:-1] else: op = "=" f = operations[op] if isinstance(value, list): # Match any of the elements in value if it's a list for i in index: if i.name == name and any(f(i, v) for v in value): yield i.key else: # Otherwise just match directly for i in index: if i.name == name and f(i, value): yield i.key
def filter_index(self, index, name, value): operations = { "~": lambda i, value: isinstance(i.value, basestring) and i.value.startswith(web.rstrips(value, "*")), "<": lambda i, value: i.value < value, ">": lambda i, value: i.value > value, "!": lambda i, value: i.value != value, "=": lambda i, value: i.value == value, } pattern = ".*([%s])$" % "".join(operations) rx = web.re_compile(pattern) m = rx.match(name) if m: op = m.group(1) name = name[:-1] else: op = "=" f = operations[op] for i in index: if i.name == name and f(i, value): yield i.key
def cleanname(fn): return web.rstrips(web.rstrips(web.lstrips(web.rstrips(fn, '.gpg'), 'reply-'), '_doc'), '_msg')
""" Generates `wordlist` from scowl-7.1 (http://wordlist.sourceforge.net/). """ import web import string def just7(x): return all(c in string.printable for c in x) words = set() for i in [35, 20, 10]: words.update(web.rstrips(x.strip(), "'s") for x in file('english-words.%s' % i) if just7(x)) fh = file('wordlist', 'w') for word in words: fh.write('%s\n' % word)
def _get_user_root(): user_root = infogami.config.get("infobase", {}).get("user_root", "/user") return web.rstrips(user_root, "/")
def get_ol_url(): return web.rstrips(config.ol_url, "/")
def __init__(self, root): self.root = web.rstrips(root, '/') self.docs = None self._text = None self.last_modified = None
def _get_user_root(): user_root = infogami.config.get('infobase', {}).get('user_root', '/user') return web.rstrips(user_root, '/')
def unprocess_key(self, key): key = web.lstrips(key, '/templates/') key = web.rstrips(key, '.tmpl') return key