def dirstrip(f, dir): """Strips dir from f. >>> dirstrip('a/b/c/d', 'a/b/') 'c/d' """ f = web.lstrips(f, dir) return web.lstrips(f, '/')
def senatetest(): ''' Creates a file sen/schumer.html with schumers contact page ''' # not working - 6-Jan-2011 notworking = ['hagan', 'corker', 'shelby', 'grassley', 'senate', 'coburn', 'inhofe', 'crapo', 'risch', 'lieberman', 'brown', 'moran', 'roberts'] sendb = get_senate_offices() statfile = open("senate-test-out.txt", "w") for state in sendb: for member in sendb[state]: sen = web.lstrips(web.lstrips(web.lstrips(member, 'http://'), 'https://'), 'www.').split('.')[0] if sen in WYR_MANUAL: member = WYR_MANUAL[sen] #if sen != 'billnelson': continue #if sen in working + failure: continue print repr(sen) q = None try: q = writerep_general(member, prepare_i(state)) file('sen/%s.html' % sen, 'w').write('<base href="%s"/>' % member + q) #subprocess.Popen(['open', 'sen/%s.html' % sen]) #subprocess.Popen(['pbcopy'], stdin=subprocess.PIPE).stdin.write(', ' + repr(sen)) if q.lower().find("thank") >= 0: status = "Thanked" else: status = "Failed. reason unknown." except Exception, e: print "Caught exception on senator: %s " % member status="failed. exception occurred %s" % e.__str__() statfile.write("Member: %s, Status: %s\n" % (member, status)) statfile.flush()
def senatetest2(member2email): sendb = get_senate_offices() for state in sendb: for member in sendb[state]: sen = web.lstrips(web.lstrips(web.lstrips(member, 'http://'), 'https://'), 'www.').split('.')[0] if sen in WYR_MANUAL: member = WYR_MANUAL[sen] if sen != member2email : continue print repr(sen) q = writerep_general(member, prepare_i(state)) if not q: print "Failed to write to %s" % member2email import sys sys.exit(1) file('sen/%s.html' % sen, 'w').write('<base href="%s"/>' % member + q) success=False if "thank" in q.lower() or "your message has been submitted" in q.lower() or "your message has been submitted" in q.lower() : #if you're getting thanked, you're probably successful success=True errorString = getError(q) print "ErrorString: ", errorString subprocess.Popen(['open', 'sen/%s.html' % sen]) subprocess.Popen(['pbcopy'], stdin=subprocess.PIPE).stdin.write(', ' + repr(sen)) if (success): print "Successfully wrote to %s" % member2email else: print "Failed to write to %s" % member2email import sys sys.exit(1)
def POST_do_claim(self): if jt.site.password: return web.seeother(jt.site.url) i = web.input('password', 'email', 'security') f = forms.claim_site() if not f.validates(): return render('claim_site', vars=locals()) db.claim_site(i.password, i.email, i.security) auth.signin(i.password) web.setcookie('success', "Congratulations! You've claimed your site.") site_url = web.rstrips( web.lstrips(web.lstrips(jt.site.url, 'http://'), 'https://'), '/') sendmail( 'The Jottit Team <*****@*****.**>', i.email, "You claimed " + site_url, """\ Thanks for claiming your site at Jottit.com! It's at: https://%(site_url)s recover password: https://%(site_url)s/site/forgot-password Let us know if you have any thoughts or problems -- just reply to this email (or email [email protected]). - Simon and Aaron, Jottit """ % dict(email=i.email, site_url=site_url, password=i.password)) return web.seeother(jt.site.url)
def signin_required(): f = forms.signin() return_to = web.lstrips(web.ctx.fullpath, '/') if not jt.site.public_url: return_to = web.lstrips(return_to, jt.site.secret_url+'/') f.fill(secret_url=jt.site.secret_url, return_to=return_to) page_title = 'Please enter the site-wide password' return view.render('signin', vars=locals())
def signin_required(): f = forms.signin() return_to = web.lstrips(web.ctx.fullpath, '/') if not jt.site.public_url: return_to = web.lstrips(return_to, jt.site.secret_url + '/') f.fill(secret_url=jt.site.secret_url, return_to=return_to) page_title = 'Please enter the site-wide password' return view.render('signin', vars=locals())
def find(path): """Find all files in the file hierarchy rooted at path. >> find('..../web') ['db.py', 'http.py', 'wsgiserver/__init__.py', ....] """ for dirname, dirs, files in os.walk(path): dirname = web.lstrips(dirname, path) dirname = web.lstrips(dirname, '/') for f in files: yield os.path.join(dirname, f)
def getSenatorStateAndContactLink(self, sen2contact): ''' for a given senator, get the contact link senators names must match with what appears in their url. For example, scottbrown, brown, boxer ''' for state in self.sendb: for contactlink in self.sendb.get(state, []): sen = web.lstrips(web.lstrips(web.lstrips(contactlink, 'http://'), 'https://'), 'www.').split('.')[0] if sen == sen2contact: return (state,contactlink) return (None, None)
def getSenatorStateAndContactLink(self, sen2contact): ''' for a given senator, get the contact link senators names must match with what appears in their url. For example, scottbrown, brown, boxer ''' for state in self.sendb: for contactlink in self.sendb.get(state, []): sen = web.lstrips( web.lstrips(web.lstrips(contactlink, 'http://'), 'https://'), 'www.').split('.')[0] if sen == sen2contact: return (state, contactlink) return (None, None)
def GET(self, emailform=None, loadcontactsform=None): i = web.input() url = i.get('url', '/') title = i.get('title', 'The good government site with teeth') user_id = helpers.get_loggedin_userid() contacts = get_contacts(user_id) sender = helpers.get_user_by_email(helpers.get_loggedin_email() or helpers.get_unverified_email()) page_or_petition = 'page' isdraft = False if not emailform: emailform = forms.emailform() track_id, description = None, None if url.startswith('/c/') and url != '/c/': url = url.rstrip('/') pid = web.lstrips(url, '/c/') p = get_petition_by_id(pid) isdraft = is_draft(p) description = p and p.description track_id = helpers.get_trackid(user_id, pid) if not isdraft else None contacts = filter(lambda c: not is_signatory(c.email, pid), contacts) page_or_petition = 'petition' msg = render_plain.share_mail(title, url, sender, description, isdraft, track_id) emailform.fill(subject=title, body=msg) loadcontactsform = loadcontactsform or forms.loadcontactsform() msg, msg_type = helpers.get_delete_msg() return render.share(title, url, emailform, contacts, loadcontactsform, page_or_petition, msg)
def POST_sign(self, pid): i = web.input() sf = forms.signform() tocongress = to_congress(pid) p = get_petition_by_id(pid) is_new = lambda sid: not isinstance(sid, str) get_new = lambda sid: int(web.lstrips(sid, 'old_')) if tocongress: i.pid, i.ptitle, i.msg = pid, p.title, p.description wf = forms.wyrform() captcha_needed = require_captcha(i) wyr_valid = wf.validates(i) and not captcha_needed if captcha_needed: wf.valid, wf.note = False, 'Please fill the captcha below' else: wf, wyr_valid = None, True if sf.validates(i) and wyr_valid: uid = auth.assert_login(i) signid = save_signature(i, pid, uid) if is_new(signid): user = helpers.get_user_by_id(uid) sendmail_to_signatory(user, pid) else: signid = get_new(signid) if tocongress: send_to_congress(uid, i, signid) query = urllib.urlencode(dict(url='/c/%s' % pid, title=p.title)) raise web.seeother('/share?%s' % query, absolute=True) else: return self.GET(pid, sf=sf, wf=wf)
def main(): assert os.path.exists(ALMANAC_DIR), ALMANAC_DIR files = glob.glob(ALMANAC_DIR + '*/people/*/rep_*.htm') + \ glob.glob(ALMANAC_DIR + '*/people/*/*s[12].htm') files.sort() for fn in files: district = web.storage() demog = None dist = web.lstrips(web.rstrips(fn.split('/')[-1], '.htm'), 'rep_') diststate = dist[0:2].upper() distnum = dist[-2:] distname = tools.fixdist(diststate + '-' + distnum) d = almanac.scrape_person(fn) load_election_results(d, distname) if ALMANAC_DIR + '2008' in fn: if 'demographics' in d: demog = d['demographics'] elif distname[-2:] == '00' or '-' not in distname: # if -00 then this district is the same as the state. #print "Using state file for:", distname statefile = ALMANAC_DIR + '2008/states/%s/index.html' % diststate.lower() demog = almanac.scrape_state(statefile).get('state') demog_to_dist(demog, district) district.almanac = 'http://' + d['filename'][d['filename'].find('nationaljournal.com'):] #print 'district:', distname, pformat(district) db.update('district', where='name=$distname', vars=locals(), **district)
def process(self): self.content.seek(0, 0) env = { "REMOTE_ADDR": self.client.host, "REQUEST_METHOD": self.method, "PATH_INFO": self.path, "CONTENT_LENGTH": web.intget(self.getHeader("content-length"), 0), "wsgi.input": self.content, } if "?" in self.uri: env["QUERY_STRING"] = self.uri.split("?", 1)[1] for k, v in self.received_headers.iteritems(): env["HTTP_" + k.upper()] = v if self.path.startswith("/static/"): f = web.lstrips(self.path, "/static/") assert "/" not in f # @@@ big security hole self.write(open("static/" + f).read()) return self.finish() web.webapi._load(env) web.ctx.trequest = self result = self.actualfunc() self.setResponseCode(int(web.ctx.status.split()[0])) for (h, v) in web.ctx.headers: self.setHeader(h, v) self.write(web.ctx.output) if not web.ctx.get("persist"): self.finish()
def _process_subject(self, s): key = s['key'] if key.startswith("subject:"): key = "/subjects/" + web.lstrips(key, "subject:") else: key = "/subjects/" + key return {"name": s['name'], "count": s['count'], "url": key}
def main(): assert os.path.exists(ALMANAC_DIR), ALMANAC_DIR files = glob.glob(ALMANAC_DIR + 'people/*/rep_*.htm') + \ glob.glob(ALMANAC_DIR + 'people/*/*s[12].htm') files.sort() for fn in files: district = web.storage() demog = None dist = web.lstrips(web.rstrips(fn.split('/')[-1], '.htm'), 'rep_') diststate = dist[0:2].upper() distnum = dist[-2:] distname = tools.fixdist(diststate + '-' + distnum) d = almanac.scrape_person(fn) load_election_results(d, distname) if 'demographics' in d: demog = d['demographics'] elif distname[-2:] == '00' or '-' not in distname: # if -00 then this district is the same as the state. #print "Using state file for:", distname statefile = ALMANAC_DIR + 'states/%s/index.html' % diststate.lower() demog = almanac.scrape_state(statefile).get('state') demog_to_dist(demog, district) district.almanac = 'http://' + d['filename'][d['filename'].find('nationaljournal.com'):] #print 'district:', distname, pformat(district) db.update('district', where='name=$distname', vars=locals(), **district)
def generate_docs(dir): shutil.rmtree(docpath(dir), ignore_errors=True) paths = list(find_python_sources(dir)) submodule_dict = defaultdict(list) for path in paths: dir = os.path.dirname(path) if path.endswith("__init__.py"): dir = os.path.dirname(dir) submodule_dict[dir].append(path) for path in paths: dirname = os.path.dirname(path) if path.endswith("__init__.py"): submodules = [web.lstrips(docpath(s), docpath(dirname) + "/") for s in submodule_dict[dirname]] else: submodules = [] submodules.sort() mod = modname(path) text = str(t(mod, submodules)) write(docpath(path), text) # set the modification time same as the source file mtime = os.stat(path).st_mtime os.utime(docpath(path), (mtime, mtime))
def get_meta(self, key): prefix = self.parse_key(key)[0] meta = finddict(SUBJECTS, prefix=prefix) meta = web.storage(meta) meta.path = web.lstrips(key, meta.prefix) return meta
def main(): options, args = parse_args() if options.src.startswith("http://"): src = OpenLibrary(options.src) else: src = Disk(options.src) if options.dest.startswith("http://"): dest = OpenLibrary(options.dest) section = "[%s]" % web.lstrips(options.dest, "http://").strip("/") if section in read_lines(os.path.expanduser("~/.olrc")): dest.autologin() else: dest.login("admin", "admin123") else: dest = Disk(options.dest) for list_key in options.lists: copy_list(src, dest, list_key, comment=options.comment) keys = args keys = list(expand(src, keys)) copy(src, dest, keys, comment=options.comment, recursive=options.recursive)
def process(self): self.content.seek(0, 0) env = { 'REMOTE_ADDR': self.client.host, 'REQUEST_METHOD': self.method, 'PATH_INFO': self.path, 'CONTENT_LENGTH': web.intget(self.getHeader('content-length'), 0), 'wsgi.input': self.content } if '?' in self.uri: env['QUERY_STRING'] = self.uri.split('?', 1)[1] for k, v in self.received_headers.iteritems(): env['HTTP_' + k.upper()] = v if self.path.startswith('/static/'): f = web.lstrips(self.path, '/static/') assert '/' not in f #@@@ big security hole self.write(file('static/' + f).read()) return self.finish() web.webapi._load(env) web.ctx.trequest = self result = self.actualfunc() self.setResponseCode(int(web.ctx.status.split()[0])) for (h, v) in web.ctx.headers: self.setHeader(h, v) self.write(web.ctx.output) if not web.ctx.get('persist'): self.finish()
def unstringify(d): """Removes string_ prefix from every key in a dictionary. >>> unstringify({'string_a': 1, 'string_b': 2}) {'a': 1, 'b': 2} """ return dict([(web.lstrips(k, 'string_'), v) for k, v in d.items() if k.startswith('string_')])
def GET(self, emailform=None, loadcontactsform=None): i = web.input() url = i.get('url', '/') title = i.get('title', 'The good government site with teeth') user_id = helpers.get_loggedin_userid() contacts = get_contacts(user_id) sender = helpers.get_user_by_email(helpers.get_loggedin_email() or helpers.get_unverified_email()) page_or_petition = 'page' if not emailform: emailform = forms.emailform() track_id, description = None, None if url.startswith('/c/') and url != '/c/': url = url.rstrip('/') pid = web.lstrips(url, '/c/') p = get_petition_by_id(pid) description = p and p.description track_id = helpers.get_trackid(user_id, pid) contacts = filter(lambda c: not is_signatory(c.email, pid), contacts) page_or_petition = 'petition' msg = render_plain.share_mail(title, url, sender, description, track_id) emailform.fill(subject=title, body=msg) loadcontactsform = loadcontactsform or forms.loadcontactsform() msg, msg_type = helpers.get_delete_msg() return render.share(title, url, emailform, contacts, loadcontactsform, page_or_petition, msg)
def GET(self, emailform=None, loadcontactsform=None): i = web.input() url = i.get("url", "/") title = i.get("title", "The good government site with teeth") user_id = helpers.get_loggedin_userid() contacts = get_contacts(user_id) sender = helpers.get_user_by_email(helpers.get_loggedin_email() or helpers.get_unverified_email()) page_or_petition = "page" isdraft = False if not emailform: emailform = forms.emailform() track_id, description = None, None if url.startswith("/c/") and url != "/c/": url = url.rstrip("/") pid = web.lstrips(url, "/c/") p = get_petition_by_id(pid) isdraft = is_draft(p) description = p and p.description track_id = helpers.get_trackid(user_id, pid) if not isdraft else None contacts = filter(lambda c: not is_signatory(c.email, pid), contacts) page_or_petition = "petition" msg = render_plain.share_mail(title, url, sender, description, isdraft, track_id) emailform.fill(subject=title, body=msg) loadcontactsform = loadcontactsform or forms.loadcontactsform() msg, msg_type = helpers.get_delete_msg() return render.share(title, url, emailform, contacts, loadcontactsform, page_or_petition, msg)
def POST_sign(self, pid): i = web.input() sf = forms.signform() tocongress = to_congress(pid) p = get_petition_by_id(pid) is_new = lambda sid: not isinstance(sid, str) get_new = lambda sid: int(web.lstrips(sid, "old_")) if tocongress: i.pid, i.ptitle, i.msg = pid, p.title, p.description wf = forms.wyrform() captcha_needed = require_captcha(i) wyr_valid = wf.validates(i) and not captcha_needed if captcha_needed: wf.valid, wf.note = False, "Please fill the captcha below" else: wf, wyr_valid = None, True if sf.validates(i) and wyr_valid: uid = auth.assert_login(i) signid = save_signature(i, pid, uid) if is_new(signid): user = helpers.get_user_by_id(uid) sendmail_to_signatory(user, pid) else: signid = get_new(signid) if tocongress: send_to_congress(uid, i, signid) query = urllib.urlencode(dict(url="/c/%s" % pid, title=p.title)) raise web.seeother("/share?%s" % query, absolute=True) else: return self.GET(pid, sf=sf, wf=wf)
def main( keys: list[str], src="http://openlibrary.org/", dest="http://localhost:8080", comment="", recursive=True, editions=True, lists: list[str] = None, search: str = None, search_limit: int = 10, ): """ Script to copy docs from one OL instance to another. Typically used to copy templates, macros, css and js from openlibrary.org to dev instance. paths can end with wildcards. USAGE: # Copy all templates ./scripts/copydocs.py --src http://openlibrary.org /templates/* # Copy specific records ./scripts/copydocs.py /authors/OL113592A /works/OL1098727W?v=2 # Copy search results ./scripts/copydocs.py --search "publisher:librivox" --search-limit 10 :param src: URL of the source open library server :param dest: URL of the destination open library server :param recursive: Recursively fetch all the referred docs :param editions: Also fetch all the editions of works :param lists: Copy docs from list(s) :param search: Run a search on open library and copy docs from the results """ # Mypy doesn't handle union-ing types across if statements -_- # https://github.com/python/mypy/issues/6233 src_ol: Union[Disk, OpenLibrary] = ( OpenLibrary(src) if src.startswith("http://") else Disk(src)) dest_ol: Union[Disk, OpenLibrary] = ( OpenLibrary(dest) if dest.startswith("http://") else Disk(dest)) if isinstance(dest_ol, OpenLibrary): section = "[%s]" % web.lstrips(dest, "http://").strip("/") if section in read_lines(os.path.expanduser("~/.olrc")): dest_ol.autologin() else: dest_ol.login("admin", "admin123") for list_key in (lists or []): copy_list(src_ol, dest_ol, list_key, comment=comment) if search: assert isinstance(src_ol, OpenLibrary), "Search only works with OL src" keys += [ doc['key'] for doc in src_ol.search(search, limit=search_limit, fields=['key'])['docs'] ] keys = list(expand(src_ol, ('/' + k.lstrip('/') for k in keys))) copy(src_ol, dest_ol, keys, comment=comment, recursive=recursive, editions=editions)
def dispatch_url(urlpath, modvars, mapping, prefix="/widget"): """ Based on handle in web.py's request.py Simplified to work only for the current module. """ _d, _d, path, _d, query, fragment = urlparse.urlparse(urlpath) path = web.lstrips(path, base_path) for url, ofn in web.utils.group(mapping, 2): fn, result = web.utils.re_subm('^' + prefix + url + '$', ofn, path) if result: try: cls = modvars[fn] except KeyError: return "[ERROR 1]" meth = "GET" if not hasattr(cls, meth): return "[ERROR 2]" tocall = getattr(cls(), meth) args = list(result.groups()) for d in re.findall(r'\\(\d+)', ofn): args.pop(int(d) - 1) query = get_query(urlpath) return tocall(*([x and urllib.unquote(x) for x in args]), **{'onlycode': True, 'webinput': query} ) return "[ERROR 3]"
def dispatch_url(urlpath, modvars, mapping, prefix="/widget"): """ Based on handle in web.py's request.py Simplified to work only for the current module. """ _d, _d, path, _d, query, fragment = urlparse.urlparse(urlpath) path = web.lstrips(path, base_path) for url, ofn in web.utils.group(mapping, 2): fn, result = web.utils.re_subm('^' + prefix + url + '$', ofn, path) if result: try: cls = modvars[fn] except KeyError: return "[ERROR 1]" meth = "GET" if not hasattr(cls, meth): return "[ERROR 2]" tocall = getattr(cls(), meth) args = list(result.groups()) for d in re.findall(r'\\(\d+)', ofn): args.pop(int(d) - 1) query = get_query(urlpath) return tocall(*([x and urllib.unquote(x) for x in args]), **{ 'onlycode': True, 'webinput': query }) return "[ERROR 3]"
def get_url(self): if self.document: return self.document.url() else: if self.key.startswith("subject:"): return "/subjects/" + web.lstrips(self.key, "subject:") else: return "/subjects/" + self.key
def url(self): if self.document: return self.document.url() else: if self.key.startswith("subject:"): return "/subjects/" + web.lstrips(self.key, "subject:") else: return "/subjects/" + self.key
def __call__(self, environ, start_response): path = environ.get('PATH_INFO', '') path = self.normpath(path) for prefix, root_path in self.paths: if path.startswith(prefix): environ["PATH_INFO"] = web.lstrips(path, prefix) return StaticApp(root_path, environ, start_response) return self.app(environ, start_response)
def __call__(self, environ, start_response): path = environ.get('PATH_INFO', '') path = self.normpath(path) if path.startswith(self.prefix): environ["PATH_INFO"] = os.path.join(self.root_path, web.lstrips(path, self.prefix)) return web.httpserver.StaticApp(environ, start_response) else: return self.app(environ, start_response)
def find_i18n_namespace(path): """Finds i18n namespace from the path. >>> find_i18n_namespace('/i18n/type/type/strings.en') '/type/type' >>> find_i18n_namespace('/i18n/strings.en') '/' """ return os.path.dirname(web.lstrips(path, '/i18n'))
def translate_path(self, path): root = os.getcwd() for prefix in self.prefixDict.keys(): if path.startswith(prefix): rootPath = os.path.join(root, self.prefixDict[prefix]) fullPath = os.path.join(rootPath, web.lstrips(path,prefix)) return fullPath result = web.httpserver.StaticApp.translate_path(self, path) return result
def f(key, value): if isinstance(value, dict): for k, v in value.items(): f(key + "." + k, v) elif isinstance(value, list): for v in value: f(key, v) else: key = web.lstrips(key, ".") items.append((key, value))
def writerep_email(pol, pol_email, i): name = '%s. %s %s' % (i.prefix, i.fname, i.lname) from_addr = '%s <%s>' % (name, i.email) if production_mode: to_addr = web.lstrips(pol_email, 'mailto:') elif test_mode: to_addr = test_email web.sendmail(from_addr, to_addr, i.subject, i.full_msg) return True
def find_i18n_namespace(path): """Finds i18n namespace from the path. >>> find_i18n_namespace('/i18n/type/type/strings.en') '/type/type' >>> find_i18n_namespace('/i18n/strings.en') '/' """ import os.path return os.path.dirname(web.lstrips(path, '/i18n'))
def load_things(keys, query): _things = simplejson.loads(store.get_many(keys)) xthings.update(_things) for k, v in query.requested.items(): k = web.lstrips(k, query.prefix) if isinstance(v, Query): keys2 = common.flatten([d.get(k) for d in _things.values() if d.get(k)]) keys2 = [k['key'] for k in keys2] load_things(set(keys2), v)
def load_things(keys, query): _things = simplejson.loads(store.get_many(keys)) xthings.update(_things) for k, v in query.requested.items(): k = web.lstrips(k, query.prefix) if isinstance(v, Query): keys2 = common.flatten( [d.get(k) for d in _things.values() if d.get(k)]) keys2 = [k['key'] for k in keys2] load_things(set(keys2), v)
def main(): options, keys = parse_options() # set query host host = web.lstrips(options.server, "http://").strip("/") set_query_host(host) # load config config.load(options.config) update_keys(keys)
def create_dummy_user(username, password, displayname=None, email=''): username = web.lstrips(username, 'user/') displayname = displayname or username from infogami.core.db import new_user # hack user = new_user(db.get_site(), username, displayname, email, password) user.displayname = username user.save() return user
def _process_subject(self, s): key = s['key'] if key.startswith("subject:"): key = "/subjects/" + web.lstrips(key, "subject:") else: key = "/subjects/" + key return { "name": s['name'], "count": s['count'], "url": key }
def make_path(doc): if doc['key'].endswith(".css"): return "static/css/" + doc['key'].split("/")[-1] elif doc['key'].endswith(".js"): return "openlibrary/plugins/openlibrary/js/" + doc['key'].split("/")[-1] else: key = doc['key'].rsplit(".")[0] key = web.lstrips(key, options.template_root) plugin = doc.get("plugin", options.default_plugin) return "openlibrary/plugins/%s%s.html" % (plugin, key)
def contact_state(i): sendb = get_senate_offices() status = "" #these are the senators with captchas. we'll just skip them. captcha = ['shelby', 'crapo', 'risch', 'moran', 'roberts'] for member in sendb.get(i.state, []): print "member", member sen = web.lstrips(web.lstrips(web.lstrips(member, 'http://'), 'https://'), 'www.').split('.')[0] if sen in WYR_MANUAL: member = WYR_MANUAL[sen] if sen in captcha: #file('failures.log', 'a').write('%s %s %s\n' % (i.id, member, "Captcha-no-attempt-made")) status += "Captcha with " + sen + ". " continue if DEBUG: print "writing to member", member print sen, q=None try: q = writerep_general(member, i) confirmations=[cstr for cstr in confirmationStrings if cstr in q.lower()] if len(confirmations) > 0: status += 'Thanked by ' + sen + ". " else: status += 'Failure with ' + sen + ". " if DEBUG: print status #file('failures.log', 'a').write('%s %s %s\n' % (i.id, member, status)) except Exception as e: print "Caught an exception on member ", member import traceback; traceback.print_exc() #file('failures.log', 'a').write('%s %s %s\n' % (i.id, member, e)) print >>sys.stderr, 'fail:', sen, e status += "Caught an exception on member ", member except: print "Caught an exception on member ", member import traceback; traceback.print_exc() #file('failures.log', 'a').write('%s %s %s\n' % (i.id, member, "unknown error")) print >>sys.stderr, 'fail:', sen, "unknown error" status += "Caught an unknown exception on member ", member return (q, status)
def find_files(root, filter): '''Find all files that pass the filter function in and below the root directory. ''' absroot = os.path.abspath(root) for path, dirs, files in os.walk(os.path.abspath(root)): path = root + web.lstrips(path, absroot) for file in files: f = os.path.join(path, file) if filter(f): yield f
def delegate(self): sitename = web.ctx.site.name path = web.lstrips(web.ctx.path, "/api") method = web.ctx.method data = web.input() conn = self.create_connection() try: out = conn.request(sitename, path, method, data) return '{"status": "ok", "result": %s}' % out except client.ClientException as e: return '{"status": "fail", "message": "%s"}' % str(e)
def setup_solr_updater(): from infogami import config # solr-updater reads configuration from openlibrary.config.runtime_config from openlibrary import config as olconfig olconfig.runtime_config = config.__dict__ # The solr-updater makes a http call to the website insted of using the # infobase API. It requires setting the host before start using it. from openlibrary.catalog.utils.query import set_query_host dev_instance_url = config.get("dev_instance_url", "http://127.0.0.1:8080/") host = web.lstrips(dev_instance_url, "http://").strip("/") set_query_host(host)
def main(): global args FORMAT = "%(asctime)-15s %(levelname)s %(message)s" logging.basicConfig(level=logging.INFO, format=FORMAT) logger.info("BEGIN new-solr-updater") args = parse_arguments() process_args(args) # set OL URL when running on a dev-instance if args.ol_url: host = web.lstrips(args.ol_url, "http://").strip("/") update_work.set_query_host(host) print str(args) logger.info("loading config") config = load_config(args.config) state_file = args.state_file offset = read_state_file(state_file) logfile = InfobaseLog(config['infobase_server']) logfile.seek(offset) solr = Solr() while True: records = logfile.read_records() keys = parse_log(records) count = update_keys(keys) offset = logfile.tell() logger.info("saving offset %s", offset) with open(state_file, "w") as f: f.write(offset) if COMMIT: logger.info("solr commit") solr.commit(ndocs=count) else: logger.info("not doing solr commit as commit is off") # don't sleep after committing some records. # While the commit was on, some more edits might have happened. if count == 0: logger.info("No more log records available, sleeping...") time.sleep(5)