def __repr__(self): r''' # note that we use raw strings to avoid having to use double back slashes below NOTE! This function is a clone of web2py:gluon.languages.utf_repl() function:: utf8.__repr__() works same as str.repr() when processing ascii string >>> repr(Utf8('abc')) == repr(Utf8("abc")) == repr('abc') == repr("abc") == "'abc'" True >>> repr(Utf8('a"b"c')) == repr('a"b"c') == '\'a"b"c\'' True >>> repr(Utf8("a'b'c")) == repr("a'b'c") == '"a\'b\'c"' True >>> repr(Utf8('a\'b"c')) == repr('a\'b"c') == repr(Utf8("a'b\"c")) == repr("a'b\"c") == '\'a\\\'b"c\'' True >>> repr(Utf8('a\r\nb')) == repr('a\r\nb') == "'a\\r\\nb'" # Test for \r, \n True Unlike str.repr(), Utf8.__repr__() remains utf8 content when processing utf8 string:: >>> repr(Utf8('中文字')) == repr(Utf8("中文字")) == "'中文字'" != repr('中文字') True >>> repr(Utf8('中"文"字')) == "'中\"文\"字'" != repr('中"文"字') True >>> repr(Utf8("中'文'字")) == '"中\'文\'字"' != repr("中'文'字") True >>> repr(Utf8('中\'文"字')) == repr(Utf8("中'文\"字")) == '\'中\\\'文"字\'' != repr('中\'文"字') == repr("中'文\"字") True >>> repr(Utf8('中\r\n文')) == "'中\\r\\n文'" != repr('中\r\n文') # Test for \r, \n True ''' if str.find(self, "'") >= 0 and str.find(self, '"') < 0: # only single quote exists return '"' + to_native(to_unicode(self, 'utf-8').translate(repr_escape_tab), 'utf-8') + '"' else: return "'" + to_native(to_unicode(self, 'utf-8').translate(repr_escape_tab2), 'utf-8') + "'"
def apply_filter(self, message, symbols={}, filter=None, ftag=None): def get_tr(message, prefix, filter): s = self.get_t(message, prefix) return filter(s) if filter else self.filter(s) if filter: prefix = '@' + (ftag or 'userdef') + '\x01' else: prefix = '@' + self.ftag + '\x01' message = get_from_cache( self.cache, prefix + message, lambda: get_tr(message, prefix, filter)) if symbols or symbols == 0 or symbols == "": if isinstance(symbols, dict): symbols.update( (key, xmlescape(value).translate(ttab_in)) for key, value in iteritems(symbols) if not isinstance(value, NUMBERS)) else: if not isinstance(symbols, tuple): symbols = (symbols,) symbols = tuple( value if isinstance(value, NUMBERS) else to_native(xmlescape(value)).translate(ttab_in) for value in symbols) message = self.params_substitution(message, symbols) return to_native(XML(message.translate(ttab_out)).xml())
def get_t(self, message, prefix=''): """ Use ## to add a comment into a translation string the comment can be useful do discriminate different possible translations for the same string (for example different locations):: T(' hello world ') -> ' hello world ' T(' hello world ## token') -> ' hello world ' T('hello ## world## token') -> 'hello ## world' the ## notation is ignored in multiline strings and strings that start with ##. This is needed to allow markmin syntax to be translated """ message = to_native(message, 'utf8') prefix = to_native(prefix, 'utf8') key = prefix + message mt = self.t.get(key, None) if mt is not None: return mt # we did not find a translation if message.find('##') > 0: pass if message.find('##') > 0 and not '\n' in message: # remove comments message = message.rsplit('##', 1)[0] # guess translation same as original self.t[key] = mt = self.default_t.get(key, message) # update language file for latter translation if self.is_writable and is_writable() and \ self.language_file != self.default_language_file: write_dict(self.language_file, self.t) return regex_backslash.sub(lambda m: m.group(1).translate(ttab_in), to_native(mt))
def get_t(self, message, prefix=''): """ Use ## to add a comment into a translation string the comment can be useful do discriminate different possible translations for the same string (for example different locations):: T(' hello world ') -> ' hello world ' T(' hello world ## token') -> ' hello world ' T('hello ## world## token') -> 'hello ## world' the ## notation is ignored in multiline strings and strings that start with ##. This is needed to allow markmin syntax to be translated """ message = to_native(message, 'utf8') prefix = to_native(prefix, 'utf8') key = prefix + message mt = self.t.get(key, None) if mt is not None: return mt # we did not find a translation if message.find('##') > 0: pass if message.find('##') > 0 and not '\n' in message: # remove comments message = message.rsplit('##', 1)[0] # guess translation same as original self.t[key] = mt = self.default_t.get(key, message) # update language file for latter translation if self.is_writable and is_writable() and \ self.language_file != self.default_language_file: write_dict(self.language_file, self.t) return regex_backslash.sub( lambda m: m.group(1).translate(ttab_in), to_native(mt))
def __new__(cls, content='', codepage='utf-8'): if isinstance(content, unicodeT): return str.__new__(cls, to_native(content, 'utf-8')) elif codepage in ('utf-8', 'utf8') or isinstance(content, cls): return str.__new__(cls, content) else: return str.__new__(cls, to_native(to_unicode(content, codepage), 'utf-8'))
def __new__(cls, content='', codepage='utf-8'): if isinstance(content, unicodeT): return str.__new__(cls, to_native(content, 'utf-8')) elif codepage in ('utf-8', 'utf8') or isinstance(content, cls): return str.__new__(cls, content) else: return str.__new__( cls, to_native(to_unicode(content, codepage), 'utf-8'))
def include_meta(self): s = "\n" for meta in iteritems((self.meta or {})): k, v = meta if isinstance(v, dict): s += '<meta' + ''.join(' %s="%s"' % (xmlescape(key), to_native(xmlescape(v[key]))) for key in v) +' />\n' else: s += '<meta name="%s" content="%s" />\n' % (k, to_native(xmlescape(v))) self.write(s, escape=False)
def include_meta(self): s = "\n" for meta in iteritems((self.meta or {})): k, v = meta if isinstance(v, dict): s += '<meta' + ''.join( ' %s="%s"' % (xmlescape(key), to_native(xmlescape(v[key]))) for key in v) + ' />\n' else: s += '<meta name="%s" content="%s" />\n' % ( k, to_native(xmlescape(v))) self.write(s, escape=False)
def sub_tuple(m): """ word !word, !!word, !!!word ?word1?number ??number, ?number ?word1?number?word0 ?word1?number? ??number?word0 ??number? word[number] !word[number], !!word[number], !!!word[number] ?word1?word[number] ?word1?[number] ??word[number], ?word[number] ?word1?word?word0[number] ?word1?word?[number] ??word?word0[number] ??word?[number] """ w, i = m.group('w', 'i') c = w[0] if c not in '!?': return self.plural(w, symbols[int(i or 0)]) elif c == '?': (p1, sep, p2) = w[1:].partition("?") part1 = p1 if sep else "" (part2, sep, part3) = (p2 if sep else p1).partition("?") if not sep: part3 = part2 if i is None: # ?[word]?number[?number] or ?number if not part2: return m.group(0) num = int(part2) else: # ?[word1]?word[?word0][number] num = int(symbols[int(i or 0)]) return part1 if num == 1 else part3 if num == 0 else part2 elif w.startswith('!!!'): word = w[3:] fun = upper_fun elif w.startswith('!!'): word = w[2:] fun = title_fun else: word = w[1:] fun = cap_fun if i is not None: return to_native(fun(self.plural(word, symbols[int(i)]))) return to_native(fun(word))
def load(self): self.local.session_cookie_name = '%s_session' % request.app_name cookie_data = _compat.to_bytes( request.get_cookie(self.local.session_cookie_name)) self.local.changed = False self.local.secure = request.url.startswith('https') self.local.data = {} if cookie_data: try: if self.storage: json_data = self.storage.get(cookie_data) if json_data: self.local.data = json.loads(json_data) else: self.local.data = jwt.decode(cookie_data, self.secret, algorithms=[self.algorithm]) if self.expiration is not None and self.storage is None: assert self.local.data['timestamp'] > time.time() - int( self.expiration) assert self.local.data.get('secure') == self.local.secure except (jwt.exceptions.InvalidSignatureError, AssertionError, ValueError): pass if not 'uuid' in self.local.data: self.local.changed = True self.local.data['uuid'] = _compat.to_native(str(uuid.uuid4())) self.local.data['secure'] = self.local.secure
def check_new_version(myversion, version_url): """Compares current web2py's version with the latest stable web2py version. Args: myversion: the current version as stored in file `web2py/VERSION` version_URL: the URL that contains the version of the latest stable release Returns: tuple: state, version - state : `True` if upgrade available, `False` if current version is up-to-date, -1 on error - version : the most up-to-version available """ try: version = to_native(urlopen(version_url).read()) pversion = parse_version(version) pmyversion = parse_version(myversion) except IOError: import traceback print(traceback.format_exc()) return -1, myversion if pversion[:3] + pversion[-6:] > pmyversion[:3] + pmyversion[-6:]: return True, version else: return False, version
def parse_template(filename, path='views/', context=dict(), lexers={}, delimiters=('{{', '}}')): """ Args: filename: can be a view filename in the views folder or an input stream path: is the path of a views folder context: is a dictionary of symbols used to render the template lexers: dict of custom lexers to use delimiters: opening and closing tags """ # First, if we have a str try to open the file if isinstance(filename, str): try: fp = open(os.path.join(path, filename), 'rb') text = fp.read() fp.close() except IOError: raise RestrictedError(filename, '', 'Unable to find the file') else: text = filename.read() text = to_native(text) # Use the file contents to get a parsed template and return it. return str( TemplateParser(text, context=context, path=path, lexers=lexers, delimiters=delimiters))
def load(self): self.local.session_cookie_name = '%s_session' % request.app_name raw_token = (request.get_cookie(self.local.session_cookie_name) or request.query.get('_session_token')) if not raw_token and request.method in ('POST', 'PUT', 'DELETE'): raw_token = ((request.forms and request.forms.get('_session_token')) or (request.json and request.json and request.json.get('_session_token'))) token_data = _compat.to_bytes(raw_token) self.local.changed = False self.local.secure = request.url.startswith('https') self.local.data = {} if token_data: try: if self.storage: json_data = self.storage.get(token_data) if json_data: self.local.data = json.loads(json_data) else: self.local.data = jwt.decode(token_data, self.secret, algorithms=[self.algorithm]) if self.expiration is not None and self.storage is None: assert self.local.data['timestamp'] > time.time() - int( self.expiration) assert self.local.data.get('secure') == self.local.secure except (jwt.exceptions.InvalidSignatureError, AssertionError, ValueError): pass if not 'uuid' in self.local.data: self.local.changed = True self.local.data['uuid'] = _compat.to_native(str(uuid.uuid4())) self.local.data['secure'] = self.local.secure
def custom_json(o): if hasattr(o, 'custom_json') and callable(o.custom_json): return o.custom_json() if isinstance(o, (datetime.date, datetime.datetime, datetime.time)): return o.isoformat()[:19].replace('T', ' ') elif isinstance(o, (int, long)): return int(o) elif isinstance(o, decimal.Decimal): return str(o) elif isinstance(o, (bytes, bytearray)): return str(o) elif isinstance(o, lazyT): return str(o) elif isinstance(o, XmlComponent): return to_native(o.xml()) elif isinstance(o, set): return list(o) elif hasattr(o, 'as_list') and callable(o.as_list): return o.as_list() elif hasattr(o, 'as_dict') and callable(o.as_dict): return o.as_dict() else: raise TypeError(repr(o) + " is not JSON serializable")
def parse_template(filename, path='views/', context=dict(), lexers={}, delimiters=('{{', '}}') ): """ Args: filename: can be a view filename in the views folder or an input stream path: is the path of a views folder context: is a dictionary of symbols used to render the template lexers: dict of custom lexers to use delimiters: opening and closing tags """ # First, if we have a str try to open the file if isinstance(filename, str): try: fp = open(os.path.join(path, filename), 'rb') text = fp.read() fp.close() except IOError: raise RestrictedError(filename, '', 'Unable to find the file') else: text = filename.read() text = to_native(text) # Use the file contents to get a parsed template and return it. return str(TemplateParser(text, context=context, path=path, lexers=lexers, delimiters=delimiters))
def secure_loads(data, encryption_key, hash_key=None, compression_level=None): encryption_key = to_bytes(encryption_key) data = to_native(data) if ':' not in data: return None if not hash_key: hash_key = sha1(encryption_key).hexdigest() signature, encrypted_data = data.split(':', 1) encrypted_data = to_bytes(encrypted_data) actual_signature = hmac.new(to_bytes(hash_key), encrypted_data, hashlib.md5).hexdigest() if not compare(signature, actual_signature): return None key = pad(encryption_key)[:32] encrypted_data = base64.urlsafe_b64decode(encrypted_data) IV, encrypted_data = encrypted_data[:16], encrypted_data[16:] cipher, _ = AES_new(key, IV=IV) try: data = cipher.decrypt(encrypted_data) data = data.rstrip(b' ') if compression_level: data = zlib.decompress(data) return pickle.loads(data) except Exception as e: return None
def save(self): self.local.data['timestamp'] = time.time() if self.storage: cookie_data = self.local.data['uuid'] self.storage.set(cookie_data, json.dumps(self.local.data), self.expiration) else: cookie_data = jwt.encode(self.local.data, self.secret, algorithm=self.algorithm) response.set_cookie(self.local.session_cookie_name, _compat.to_native(cookie_data), path='/', secure=self.local.secure)
def _sign_form(self): """Signs the form, for csrf""" # Adds a form key. First get the signing key from the session. payload = {"ts": str(time.time())} if self.lifespan is not None: payload["exp"] = time.time() + self.lifespan key = self._get_key() or self._make_key() self.formkey = to_native(jwt.encode(payload, key, algorithm="HS256"))
def read_dict_aux(filename): lang_text = read_locked(filename).replace(b'\r\n', b'\n') clear_cache(filename) try: return safe_eval(to_native(lang_text)) or {} except Exception: e = sys.exc_info()[1] status = 'Syntax error in %s (%s)' % (filename, e) logging.error(status) return {'__corrupted__': status}
def __repr__(self): r''' # note that we use raw strings to avoid having to use double back slashes below NOTE! This function is a clone of web2py:gluon.languages.utf_repl() function:: utf8.__repr__() works same as str.repr() when processing ascii string >>> repr(Utf8('abc')) == repr(Utf8("abc")) == repr('abc') == repr("abc") == "'abc'" True >>> repr(Utf8('a"b"c')) == repr('a"b"c') == '\'a"b"c\'' True >>> repr(Utf8("a'b'c")) == repr("a'b'c") == '"a\'b\'c"' True >>> repr(Utf8('a\'b"c')) == repr('a\'b"c') == repr(Utf8("a'b\"c")) == repr("a'b\"c") == '\'a\\\'b"c\'' True >>> repr(Utf8('a\r\nb')) == repr('a\r\nb') == "'a\\r\\nb'" # Test for \r, \n True Unlike str.repr(), Utf8.__repr__() remains utf8 content when processing utf8 string:: >>> repr(Utf8('中文字')) == repr(Utf8("中文字")) == "'中文字'" != repr('中文字') True >>> repr(Utf8('中"文"字')) == "'中\"文\"字'" != repr('中"文"字') True >>> repr(Utf8("中'文'字")) == '"中\'文\'字"' != repr("中'文'字") True >>> repr(Utf8('中\'文"字')) == repr(Utf8("中'文\"字")) == '\'中\\\'文"字\'' != repr('中\'文"字') == repr("中'文\"字") True >>> repr(Utf8('中\r\n文')) == "'中\\r\\n文'" != repr('中\r\n文') # Test for \r, \n True ''' if str.find(self, "'") >= 0 and str.find( self, '"') < 0: # only single quote exists return '"' + to_native( to_unicode(self, 'utf-8').translate(repr_escape_tab), 'utf-8') + '"' else: return "'" + to_native( to_unicode(self, 'utf-8').translate(repr_escape_tab2), 'utf-8') + "'"
def contenttype(filename, default='text/plain'): """ Returns the Content-Type string matching extension of the given filename. """ filename=to_native(filename) i = filename.rfind('.') if i >= 0: default = CONTENT_TYPE.get(filename[i:].lower(), default) j = filename.rfind('.', 0, i) if j >= 0: default = CONTENT_TYPE.get(filename[j:].lower(), default) if default.startswith('text/'): default += '; charset=utf-8' return default
def findT(path, language=DEFAULT_LANGUAGE): """ Note: Must be run by the admin app """ from gluon.tools import Auth, Crud lang_file = pjoin(path, 'languages', language + '.py') sentences = read_dict(lang_file) mp = pjoin(path, 'models') cp = pjoin(path, 'controllers') vp = pjoin(path, 'views') mop = pjoin(path, 'modules') def add_message(message): if not message.startswith('#') and not '\n' in message: tokens = message.rsplit('##', 1) else: # this allows markmin syntax in translations tokens = [message] if len(tokens) == 2: message = tokens[0].strip() + '##' + tokens[1].strip() if message and not message in sentences: sentences[message] = message.replace("@markmin\x01", "") for filename in \ listdir(mp, '^.+\.py$', 0) + listdir(cp, '^.+\.py$', 0)\ + listdir(vp, '^.+\.html$', 0) + listdir(mop, '^.+\.py$', 0): data = to_native(read_locked(filename)) items = regex_translate.findall(data) for x in regex_translate_m.findall(data): if x[0:3] in ["'''", '"""']: items.append("%s@markmin\x01%s" % (x[0:3], x[3:])) else: items.append("%s@markmin\x01%s" % (x[0], x[1:])) for item in items: try: message = safe_eval(item) except: continue # silently ignore inproperly formatted strings add_message(message) gluon_msg = [Auth.default_messages, Crud.default_messages] for item in [x for m in gluon_msg for x in m.values() if x is not None]: add_message(item) if not '!langcode!' in sentences: sentences['!langcode!'] = (DEFAULT_LANGUAGE if language in ('default', DEFAULT_LANGUAGE) else language) if not '!langname!' in sentences: sentences['!langname!'] = (DEFAULT_LANGUAGE_NAME if language in ( 'default', DEFAULT_LANGUAGE) else sentences['!langcode!']) write_dict(lang_file, sentences)
def save(self): self.local.data['timestamp'] = time.time() if self.storage: cookie_data = self.local.data['uuid'] self.storage.set( cookie_data, json.dumps(self.local.data), self.expiration) else: cookie_data = jwt.encode( self.local.data, self.secret, algorithm=self.algorithm) response.set_cookie(self.local.session_cookie_name, _compat.to_native(cookie_data), path='/', secure=self.local.secure, same_site=self.same_site)
def findT(path, language=DEFAULT_LANGUAGE): """ Note: Must be run by the admin app """ from gluon.tools import Auth, Crud lang_file = pjoin(path, 'languages', language + '.py') sentences = read_dict(lang_file) mp = pjoin(path, 'models') cp = pjoin(path, 'controllers') vp = pjoin(path, 'views') mop = pjoin(path, 'modules') def add_message(message): if not message.startswith('#') and not '\n' in message: tokens = message.rsplit('##', 1) else: # this allows markmin syntax in translations tokens = [message] if len(tokens) == 2: message = tokens[0].strip() + '##' + tokens[1].strip() if message and not message in sentences: sentences[message] = message.replace("@markmin\x01", "") for filename in \ listdir(mp, '^.+\.py$', 0) + listdir(cp, '^.+\.py$', 0)\ + listdir(vp, '^.+\.html$', 0) + listdir(mop, '^.+\.py$', 0): data = to_native(read_locked(filename)) items = regex_translate.findall(data) for x in regex_translate_m.findall(data): if x[0:3] in ["'''", '"""']: items.append("%s@markmin\x01%s" %(x[0:3], x[3:])) else: items.append("%s@markmin\x01%s" %(x[0], x[1:])) for item in items: try: message = safe_eval(item) except: continue # silently ignore inproperly formatted strings add_message(message) gluon_msg = [Auth.default_messages, Crud.default_messages] for item in [x for m in gluon_msg for x in m.values() if x is not None]: add_message(item) if not '!langcode!' in sentences: sentences['!langcode!'] = ( DEFAULT_LANGUAGE if language in ('default', DEFAULT_LANGUAGE) else language) if not '!langname!' in sentences: sentences['!langname!'] = ( DEFAULT_LANGUAGE_NAME if language in ('default', DEFAULT_LANGUAGE) else sentences['!langcode!']) write_dict(lang_file, sentences)
def findT(path, language=DEFAULT_LANGUAGE): """ Note: Must be run by the admin app """ lang_file = pjoin(path, 'languages', language + '.py') sentences = read_dict(lang_file) mp = pjoin(path, 'models') cp = pjoin(path, 'controllers') vp = pjoin(path, 'views') mop = pjoin(path, 'modules') for filename in \ listdir(mp, '^.+\.py$', 0) + listdir(cp, '^.+\.py$', 0)\ + listdir(vp, '^.+\.html$', 0) + listdir(mop, '^.+\.py$', 0): data = to_native(read_locked(filename)) items = regex_translate.findall(data) items += regex_translate_m.findall(data) for item in items: try: message = safe_eval(item) except: continue # silently ignore inproperly formatted strings if not message.startswith('#') and not '\n' in message: tokens = message.rsplit('##', 1) else: # this allows markmin syntax in translations tokens = [message] if len(tokens) == 2: message = tokens[0].strip() + '##' + tokens[1].strip() if message and not message in sentences: sentences[message] = message if not '!langcode!' in sentences: sentences['!langcode!'] = ( DEFAULT_LANGUAGE if language in ('default', DEFAULT_LANGUAGE) else language) if not '!langname!' in sentences: sentences['!langname!'] = ( DEFAULT_LANGUAGE_NAME if language in ('default', DEFAULT_LANGUAGE) else sentences['!langcode!']) write_dict(lang_file, sentences)
def findT(path, language=DEFAULT_LANGUAGE): """ Note: Must be run by the admin app """ lang_file = pjoin(path, 'languages', language + '.py') sentences = read_dict(lang_file) mp = pjoin(path, 'models') cp = pjoin(path, 'controllers') vp = pjoin(path, 'views') mop = pjoin(path, 'modules') for filename in \ listdir(mp, '^.+\.py$', 0) + listdir(cp, '^.+\.py$', 0)\ + listdir(vp, '^.+\.html$', 0) + listdir(mop, '^.+\.py$', 0): data = to_native(read_locked(filename)) items = regex_translate.findall(data) items += regex_translate_m.findall(data) for item in items: try: message = safe_eval(item) except: continue # silently ignore inproperly formatted strings if not message.startswith('#') and not '\n' in message: tokens = message.rsplit('##', 1) else: # this allows markmin syntax in translations tokens = [message] if len(tokens) == 2: message = tokens[0].strip() + '##' + tokens[1].strip() if message and not message in sentences: sentences[message] = message if not '!langcode!' in sentences: sentences['!langcode!'] = (DEFAULT_LANGUAGE if language in ('default', DEFAULT_LANGUAGE) else language) if not '!langname!' in sentences: sentences['!langname!'] = (DEFAULT_LANGUAGE_NAME if language in ( 'default', DEFAULT_LANGUAGE) else sentences['!langcode!']) write_dict(lang_file, sentences)
def simple_hash(text, key='', salt='', digest_alg='md5'): """ Generates hash with the given text using the specified digest hashing algorithm """ text = to_bytes(text) key = to_bytes(key) salt = to_bytes(salt) if not digest_alg: raise RuntimeError("simple_hash with digest_alg=None") elif not isinstance(digest_alg, str): # manual approach h = digest_alg(text + key + salt) elif digest_alg.startswith('pbkdf2'): # latest and coolest! iterations, keylen, alg = digest_alg[7:-1].split(',') return to_native(pbkdf2_hex(text, salt, int(iterations), int(keylen), get_digest(alg))) elif key: # use hmac digest_alg = get_digest(digest_alg) h = hmac.new(key + salt, text, digest_alg) else: # compatible with third party systems h = get_digest(digest_alg)() h.update(text + salt) return h.hexdigest()
def _get_file_text(self, filename): """ Attempts to open ``filename`` and retrieve its text. This will use self.path to search for the file. """ # If they didn't specify a filename, how can we find one! if not filename.strip(): self._raise_error('Invalid template filename') # Allow Views to include other views dynamically context = self.context if current and not "response" in context: context["response"] = getattr(current, 'response', None) # Get the filename; filename looks like ``"template.html"``. # We need to eval to remove the quotes and get the string type. filename = eval(filename, context) # Allow empty filename for conditional extend and include directives. if not filename: return '' # Get the path of the file on the system. filepath = self.path and os.path.join(self.path, filename) or filename # try to read the text. try: fileobj = open(filepath, 'rb') text = fileobj.read() fileobj.close() except IOError: self._raise_error('Unable to open included view file: ' + filepath) text = to_native(text) return text
def simple_hash(text, key='', salt='', digest_alg='md5'): """ Generates hash with the given text using the specified digest hashing algorithm """ text = to_bytes(text) key = to_bytes(key) salt = to_bytes(salt) if not digest_alg: raise RuntimeError("simple_hash with digest_alg=None") elif not isinstance(digest_alg, str): # manual approach h = digest_alg(text + key + salt) elif digest_alg.startswith('pbkdf2'): # latest and coolest! iterations, keylen, alg = digest_alg[7:-1].split(',') return to_native( pbkdf2_hex(text, salt, int(iterations), int(keylen), get_digest(alg))) elif key: # use hmac digest_alg = get_digest(digest_alg) h = hmac.new(key + salt, text, digest_alg) else: # compatible with third party systems h = get_digest(digest_alg)() h.update(text + salt) return h.hexdigest()
def key_filter_in_windows(key): """ Windows doesn't allow \ / : * ? "< > | in filenames. To go around this encode the keys with base32. """ return to_native(base64.b32encode(to_bytes(key)))
def key_filter_out_windows(key): """ We need to decode the keys so regex based removal works. """ return to_native(base64.b32decode(to_bytes(key)))
def render(content="hello world", stream=None, filename=None, path=None, context={}, lexers={}, delimiters=('{{', '}}'), writer='response.write' ): """ Generic render function Args: content: default content stream: file-like obj to read template from filename: where to find template path: base path for templates context: env lexers: custom lexers to use delimiters: opening and closing tags writer: where to inject the resulting stream Example:: >>> render() 'hello world' >>> render(content='abc') 'abc' >>> render(content="abc'") "abc'" >>> render(content=''''a"'bc''') 'a"'bc' >>> render(content='a\\nbc') 'a\\nbc' >>> render(content='a"bcd"e') 'a"bcd"e' >>> render(content="'''a\\nc'''") "'''a\\nc'''" >>> render(content="'''a\\'c'''") "'''a\'c'''" >>> render(content='{{for i in range(a):}}{{=i}}<br />{{pass}}', context=dict(a=5)) '0<br />1<br />2<br />3<br />4<br />' >>> render(content='{%for i in range(a):%}{%=i%}<br />{%pass%}', context=dict(a=5),delimiters=('{%','%}')) '0<br />1<br />2<br />3<br />4<br />' >>> render(content="{{='''hello\\nworld'''}}") 'hello\\nworld' >>> render(content='{{for i in range(3):\\n=i\\npass}}') '012' """ # here to avoid circular Imports try: from gluon.globals import Response except ImportError: # Working standalone. Build a mock Response object. Response = DummyResponse # Add it to the context so we can use it. if not 'NOESCAPE' in context: context['NOESCAPE'] = NOESCAPE if isinstance(content, unicodeT): content = content.encode('utf8') # save current response class if context and 'response' in context: old_response_body = context['response'].body context['response'].body = StringIO() else: old_response_body = None context['response'] = Response() # If we don't have anything to render, why bother? if not content and not stream and not filename: raise SyntaxError("Must specify a stream or filename or content") # Here for legacy purposes, probably can be reduced to # something more simple. close_stream = False if not stream: if filename: stream = open(filename, 'rb') close_stream = True elif content: stream = StringIO(to_native(content)) # Execute the template. code = str(TemplateParser(stream.read( ), context=context, path=path, lexers=lexers, delimiters=delimiters, writer=writer)) try: exec(code, context) except Exception: # for i,line in enumerate(code.split('\n')): print i,line raise if close_stream: stream.close() # Returned the rendered content. text = context['response'].body.getvalue() if old_response_body is not None: context['response'].body = old_response_body return text
def run_controller_in(controller, function, environment): """ Runs the controller.function() (for the app specified by the current folder). It tries pre-compiled controller_function.pyc first before compiling it. """ # if compiled should run compiled! folder = current.request.folder path = pjoin(folder, 'compiled') badc = 'invalid controller (%s/%s)' % (controller, function) badf = 'invalid function (%s/%s)' % (controller, function) if os.path.exists(path): filename = pjoin(path, 'controllers.%s.%s.pyc' % (controller, function)) if not os.path.exists(filename): ### for backward compatibility filename = pjoin(path, 'controllers_%s_%s.pyc' % (controller, function)) ### end for backward compatibility if not os.path.exists(filename): raise HTTP(404, rewrite.THREAD_LOCAL.routes.error_message % badf, web2py_error=badf) restricted(read_pyc(filename), environment, layer=filename) elif function == '_TEST': # TESTING: adjust the path to include site packages from settings import global_settings from admin import abspath, add_path_first paths = (global_settings.gluon_parent, abspath('site-packages', gluon=True), abspath('gluon', gluon=True), '') [add_path_first(path) for path in paths] # TESTING END filename = pjoin(folder, 'controllers/%s.py' % controller) if not os.path.exists(filename): raise HTTP(404, rewrite.THREAD_LOCAL.routes.error_message % badc, web2py_error=badc) environment['__symbols__'] = environment.keys() code = read_file(filename) code += TEST_CODE restricted(code, environment, layer=filename) else: filename = pjoin(folder, 'controllers/%s.py' % controller) if not os.path.exists(filename): raise HTTP(404, rewrite.THREAD_LOCAL.routes.error_message % badc, web2py_error=badc) code = read_file(filename) exposed = find_exposed_functions(code) if not function in exposed: raise HTTP(404, rewrite.THREAD_LOCAL.routes.error_message % badf, web2py_error=badf) code = "%s\nresponse._vars=response._caller(%s)\n" % (code, function) if is_gae: layer = filename + ':' + function code = getcfs(layer, filename, lambda: compile2(code, layer)) restricted(code, environment, filename) response = current.response vars = response._vars if response.postprocessing: vars = reduce(lambda vars, p: p(vars), response.postprocessing, vars) if isinstance(vars, unicodeT): vars = to_native(vars) elif hasattr(vars, 'xml') and callable(vars.xml): vars = vars.xml() return vars
def __getitem__(self, index): return str.__new__(Utf8, to_native(to_unicode(self, 'utf-8')[index], 'utf-8'))
def write(self, data, escape=True): if not escape: self.body.write(str(data)) else: # FIXME PY3: self.body.write(to_native(xmlescape(data)))
def __getslice__(self, begin, end): return str.__new__( Utf8, to_native(to_unicode(self, 'utf-8')[begin:end], 'utf-8'))
def __getitem__(self, index): return str.__new__( Utf8, to_native(to_unicode(self, 'utf-8')[index], 'utf-8'))
def pickle_lazyT(c): return str, (to_native(c.xml()), )
def post(self, url, data=None, cookies=None, headers=None, auth=None, method='auto'): self.url = self.app + url # if this POST form requires a postback do it if data and '_formname' in data and self.postbacks and \ self.history and self.history[-1][1] != self.url: # to bypass the web2py CSRF need to get formkey # before submitting the form self.get(url, cookies=cookies, headers=headers, auth=auth) # unless cookies are specified, recycle cookies if cookies is None: cookies = self.cookies cookies = cookies or {} headers = headers or {} cj = cookielib.CookieJar() args = [ urllib2.HTTPCookieProcessor(cj), urllib2.HTTPHandler(debuglevel=0) ] # if required do basic auth if auth: auth_handler = urllib2.HTTPBasicAuthHandler() auth_handler.add_password(**auth) args.append(auth_handler) opener = urllib2.build_opener(*args) # copy headers from dict to list of key,value headers_list = [] for key, value in iteritems(self.default_headers): if not key in headers: headers[key] = value for key, value in iteritems(headers): if isinstance(value, (list, tuple)): for v in value: headers_list.append((key, v)) else: headers_list.append((key, value)) # move cookies to headers for key, value in iteritems(cookies): headers_list.append(('Cookie', '%s=%s' % (key, value))) # add headers to request for key, value in headers_list: opener.addheaders.append((key, str(value))) # assume everything is ok and make http request error = None try: if isinstance(data, str): self.method = 'POST' if method=='auto' else method elif isinstance(data, dict): self.method = 'POST' if method=='auto' else method # if there is only one form, set _formname automatically if not '_formname' in data and len(self.forms) == 1: data['_formname'] = self.forms.keys()[0] # if there is no formkey but it is known, set it if '_formname' in data and not '_formkey' in data and \ data['_formname'] in self.forms: data['_formkey'] = self.forms[data['_formname']] # time the POST request data = urlencode(data, doseq=True) else: self.method = 'GET' if method=='auto' else method data = None t0 = time.time() self.response = opener.open(self.url, to_bytes(data)) self.time = time.time() - t0 except urllib2.HTTPError as er: error = er # catch HTTP errors self.time = time.time() - t0 self.response = er if hasattr(self.response, 'getcode'): self.status = self.response.getcode() else:#python2.5 self.status = None self.text = to_native(self.response.read()) # In PY3 self.response.headers are case sensitive self.headers = dict() for h in self.response.headers: self.headers[h.lower()] = self.response.headers[h] # treat web2py tickets as special types of errors if error is not None: if 'web2py_error' in self.headers: raise RuntimeError(self.headers['web2py_error']) else: raise error # parse headers into cookies self.cookies = {} if 'set-cookie' in self.headers: for item in self.headers['set-cookie'].split(','): key, value = item[:item.find(';')].split('=') self.cookies[key.strip()] = value.strip() # check is a new session id has been issued, symptom of broken session if self.session_regex is not None: for cookie, value in iteritems(self.cookies): match = self.session_regex.match(cookie) if match: name = match.group('name') if name in self.sessions and self.sessions[name] != value: print(RuntimeError('Changed session ID %s' % name)) self.sessions[name] = value # find all forms and formkeys in page self.forms = {} for match in FORM_REGEX.finditer(to_native(self.text)): self.forms[match.group('formname')] = match.group('formkey') # log this request self.history.append((self.method, self.url, self.status, self.time))
def pickle_lazyT(c): return str, (to_native(c.xml()),)
def custom_importer(name, globals=None, locals=None, fromlist=None, level=-1): """ web2py's custom importer. It behaves like the standard Python importer but it tries to transform import statements as something like "import applications.app_name.modules.x". If the import fails, it falls back on naive_importer """ if isinstance(name, unicodeT): name = to_native(name) globals = globals or {} locals = locals or {} fromlist = fromlist or [] try: if current.request._custom_import_track_changes: base_importer = TRACK_IMPORTER else: base_importer = NATIVE_IMPORTER except: # there is no current.request (should never happen) base_importer = NATIVE_IMPORTER if not(PY2) and level < 0: level = 0 # if not relative and not from applications: if hasattr(current, 'request') \ and level <= 0 \ and not name.partition('.')[0] in INVALID_MODULES \ and isinstance(globals, dict): import_tb = None try: try: oname = name if not name.startswith('.') else '.'+name return NATIVE_IMPORTER(oname, globals, locals, fromlist, level) except ImportError: items = current.request.folder.split(os.path.sep) if not items[-1]: items = items[:-1] modules_prefix = '.'.join(items[-2:]) + '.modules' if not fromlist: # import like "import x" or "import x.y" result = None for itemname in name.split("."): new_mod = base_importer( modules_prefix, globals, locals, [itemname], level) try: result = result or sys.modules[modules_prefix+'.'+itemname] except KeyError as e: raise ImportError('Cannot import module %s' % str(e)) modules_prefix += "." + itemname return result else: # import like "from x import a, b, ..." pname = modules_prefix + "." + name return base_importer(pname, globals, locals, fromlist, level) except ImportError as e1: import_tb = sys.exc_info()[2] try: return NATIVE_IMPORTER(name, globals, locals, fromlist, level) except ImportError as e3: raise ImportError(e1, import_tb) # there an import error in the module except Exception as e2: raise # there is an error in the module finally: if import_tb: import_tb = None return NATIVE_IMPORTER(name, globals, locals, fromlist, level)
def render(content="hello world", stream=None, filename=None, path=None, context={}, lexers={}, delimiters=('{{', '}}'), writer='response.write'): """ Generic render function Args: content: default content stream: file-like obj to read template from filename: where to find template path: base path for templates context: env lexers: custom lexers to use delimiters: opening and closing tags writer: where to inject the resulting stream Example:: >>> render() 'hello world' >>> render(content='abc') 'abc' >>> render(content="abc'") "abc'" >>> render(content=''''a"'bc''') 'a"'bc' >>> render(content='a\\nbc') 'a\\nbc' >>> render(content='a"bcd"e') 'a"bcd"e' >>> render(content="'''a\\nc'''") "'''a\\nc'''" >>> render(content="'''a\\'c'''") "'''a\'c'''" >>> render(content='{{for i in range(a):}}{{=i}}<br />{{pass}}', context=dict(a=5)) '0<br />1<br />2<br />3<br />4<br />' >>> render(content='{%for i in range(a):%}{%=i%}<br />{%pass%}', context=dict(a=5),delimiters=('{%','%}')) '0<br />1<br />2<br />3<br />4<br />' >>> render(content="{{='''hello\\nworld'''}}") 'hello\\nworld' >>> render(content='{{for i in range(3):\\n=i\\npass}}') '012' """ # here to avoid circular Imports try: from gluon.globals import Response except ImportError: # Working standalone. Build a mock Response object. Response = DummyResponse # Add it to the context so we can use it. if not 'NOESCAPE' in context: context['NOESCAPE'] = NOESCAPE if isinstance(content, unicodeT): content = content.encode('utf8') # save current response class if context and 'response' in context: old_response_body = context['response'].body context['response'].body = StringIO() else: old_response_body = None context['response'] = Response() # If we don't have anything to render, why bother? if not content and not stream and not filename: raise SyntaxError("Must specify a stream or filename or content") # Here for legacy purposes, probably can be reduced to # something more simple. close_stream = False if not stream: if filename: stream = open(filename, 'rb') close_stream = True elif content: stream = StringIO(to_native(content)) # Execute the template. code = str( TemplateParser(stream.read(), context=context, path=path, lexers=lexers, delimiters=delimiters, writer=writer)) try: exec(code, context) except Exception: # for i,line in enumerate(code.split('\n')): print i,line raise if close_stream: stream.close() # Returned the rendered content. text = context['response'].body.getvalue() if old_response_body is not None: context['response'].body = old_response_body return text
def post(self, url, data=None, cookies=None, headers=None, auth=None, method='auto'): self.url = self.app + url # if this POST form requires a postback do it if data and '_formname' in data and self.postbacks and \ self.history and self.history[-1][1] != self.url: # to bypass the web2py CSRF need to get formkey # before submitting the form self.get(url, cookies=cookies, headers=headers, auth=auth) # unless cookies are specified, recycle cookies if cookies is None: cookies = self.cookies cookies = cookies or {} headers = headers or {} cj = cookielib.CookieJar() args = [ urllib2.HTTPCookieProcessor(cj), urllib2.HTTPHandler(debuglevel=0) ] # if required do basic auth if auth: auth_handler = urllib2.HTTPBasicAuthHandler() auth_handler.add_password(**auth) args.append(auth_handler) opener = urllib2.build_opener(*args) # copy headers from dict to list of key,value headers_list = [] for key, value in iteritems(self.default_headers): if not key in headers: headers[key] = value for key, value in iteritems(headers): if isinstance(value, (list, tuple)): for v in value: headers_list.append((key, v)) else: headers_list.append((key, value)) # move cookies to headers for key, value in iteritems(cookies): headers_list.append(('Cookie', '%s=%s' % (key, value))) # add headers to request for key, value in headers_list: opener.addheaders.append((key, str(value))) # assume everything is ok and make http request error = None try: if isinstance(data, str): self.method = 'POST' if method == 'auto' else method elif isinstance(data, dict): self.method = 'POST' if method == 'auto' else method # if there is only one form, set _formname automatically if not '_formname' in data and len(self.forms) == 1: data['_formname'] = self.forms.keys()[0] # if there is no formkey but it is known, set it if '_formname' in data and not '_formkey' in data and \ data['_formname'] in self.forms: data['_formkey'] = self.forms[data['_formname']] # time the POST request data = urlencode(data, doseq=True) else: self.method = 'GET' if method == 'auto' else method data = None t0 = time.time() self.response = opener.open(self.url, to_bytes(data)) self.time = time.time() - t0 except urllib2.HTTPError as er: error = er # catch HTTP errors self.time = time.time() - t0 self.response = er if hasattr(self.response, 'getcode'): self.status = self.response.getcode() else: #python2.5 self.status = None self.text = to_native(self.response.read()) # In PY3 self.response.headers are case sensitive self.headers = dict() for h in self.response.headers: self.headers[h.lower()] = self.response.headers[h] # treat web2py tickets as special types of errors if error is not None: if 'web2py_error' in self.headers: raise RuntimeError(self.headers['web2py_error']) else: raise error # parse headers into cookies self.cookies = {} if 'set-cookie' in self.headers: for item in self.headers['set-cookie'].split(','): key, value = item[:item.find(';')].split('=') self.cookies[key.strip()] = value.strip() # check is a new session id has been issued, symptom of broken session if self.session_regex is not None: for cookie, value in iteritems(self.cookies): match = self.session_regex.match(cookie) if match: name = match.group('name') if name in self.sessions and self.sessions[name] != value: print(RuntimeError('Changed session ID %s' % name)) self.sessions[name] = value # find all forms and formkeys in page self.forms = {} for match in FORM_REGEX.finditer(to_native(self.text)): self.forms[match.group('formname')] = match.group('formkey') # log this request self.history.append((self.method, self.url, self.status, self.time))
def __getslice__(self, begin, end): return str.__new__(Utf8, to_native(to_unicode(self, 'utf-8')[begin:end], 'utf-8'))