def escape(s, quote=None): """Replace special characters "&", "<", ">" and (") to HTML-safe sequences. There is a special handling for `None` which escapes to an empty string. .. versionchanged:: 0.9 `quote` is now implicitly on. :param s: the string to escape. :param quote: ignored. """ if s is None: return "" elif hasattr(s, "__html__"): return text_type(s.__html__()) elif not isinstance(s, string_types): s = text_type(s) if quote is not None: from warnings import warn warn(DeprecationWarning("quote parameter is implicit now"), stacklevel=2) s = (s.replace("&", "&").replace("<", "<").replace(">", ">").replace( '"', """)) return s
def escape(s, quote=None): """Replace special characters "&", "<", ">" and (") to HTML-safe sequences. There is a special handling for `None` which escapes to an empty string. .. versionchanged:: 0.9 `quote` is now implicitly on. :param s: the string to escape. :param quote: ignored. """ if s is None: return '' elif hasattr(s, '__html__'): return text_type(s.__html__()) elif not isinstance(s, string_types): s = text_type(s) if quote is not None: from warnings import warn warn( "The 'quote' parameter is no longer used as of version 0.9" " and will be removed in version 1.0.", DeprecationWarning, stacklevel=2, ) s = s.replace('&', '&').replace('<', '<') \ .replace('>', '>').replace('"', """) return s
def test_exception_repr(): exc = exceptions.NotFound() assert text_type(exc) == '404: Not Found' assert repr(exc) == "<NotFound '404: Not Found'>" exc = exceptions.NotFound('Not There') assert text_type(exc) == '404: Not Found' assert repr(exc) == "<NotFound '404: Not Found'>"
def test_exception_repr(self): exc = exceptions.NotFound() self.assert_equal(text_type(exc), '404: Not Found') self.assert_equal(repr(exc), "<NotFound '404: Not Found'>") exc = exceptions.NotFound('Not There') self.assert_equal(text_type(exc), '404: Not Found') self.assert_equal(repr(exc), "<NotFound '404: Not Found'>")
def _url_encode_impl(obj, charset, encode_keys, sort, key): iterable = iter_multi_items(obj) if sort: iterable = sorted(iterable, key=key) for key, value in iterable: if value is None: continue if not isinstance(key, bytes): key = text_type(key).encode(charset) if not isinstance(value, bytes): value = text_type(value).encode(charset) yield url_quote(key) + '=' + url_quote_plus(value)
def _url_encode_impl(obj, charset, encode_keys, sort, key): iterable = iter_multi_items(obj) if sort: iterable = sorted(iterable, key=key) for key, value in iterable: if value is None: continue if not isinstance(key, bytes): key = text_type(key).encode(charset) if not isinstance(value, bytes): value = text_type(value).encode(charset) yield url_quote_plus(key) + '=' + url_quote_plus(value)
def test_exception_repr(): exc = exceptions.NotFound() assert text_type(exc) == ( '404 Not Found: The requested URL was not found ' 'on the server. If you entered the URL manually please check your ' 'spelling and try again.') assert repr(exc) == "<NotFound '404: Not Found'>" exc = exceptions.NotFound('Not There') assert text_type(exc) == '404 Not Found: Not There' assert repr(exc) == "<NotFound '404: Not Found'>" exc = exceptions.HTTPException('An error message') assert text_type(exc) == '??? Unknown Error: An error message' assert repr(exc) == "<HTTPException '???: Unknown Error'>"
def test_exception_repr(): exc = exceptions.NotFound() assert text_type(exc) == ( "404 Not Found: The requested URL was not found on the server." " If you entered the URL manually please check your spelling" " and try again.") assert repr(exc) == "<NotFound '404: Not Found'>" exc = exceptions.NotFound("Not There") assert text_type(exc) == "404 Not Found: Not There" assert repr(exc) == "<NotFound '404: Not Found'>" exc = exceptions.HTTPException("An error message") assert text_type(exc) == "??? Unknown Error: An error message" assert repr(exc) == "<HTTPException '???: Unknown Error'>"
def build_artifact(self, artifact): ctx = get_ctx() feed_source = self.source page = feed_source.parent feed = AtomFeed(title=page.record_label + u' — Pallets Project', feed_url=url_to(feed_source, external=True), url=url_to('/blog', external=True), id=get_id(ctx.env.project.id)) for item in page.children.order_by('-pub_date').limit(10): item_author = item['author'] feed.add( item['title'], text_type(item['body']), xml_base=url_to(item, external=True), url=url_to(item, external=True), content_type='html', id=get_id(u'%s/%s' % (ctx.env.project.id, item['_path'].encode('utf-8'))), author=item_author, updated=datetime(*item['pub_date'].timetuple()[:3])) with artifact.open('wb') as f: f.write(feed.to_string().encode('utf-8'))
def url_quote(string, charset='utf-8', errors='strict', safe='/:', unsafe=''): """URL encode a single string with a given encoding. :param s: the string to quote. :param charset: the charset to be used. :param safe: an optional sequence of safe characters. :param unsafe: an optional sequence of unsafe characters. .. versionadded:: 0.9.2 The `unsafe` parameter was added. """ if not isinstance(string, (text_type, bytes, bytearray)): string = text_type(string) if isinstance(string, text_type): string = string.encode(charset, errors) if isinstance(safe, text_type): safe = safe.encode(charset, errors) if isinstance(unsafe, text_type): unsafe = unsafe.encode(charset, errors) safe = frozenset(bytearray(safe) + _always_safe) - frozenset( bytearray(unsafe)) rv = bytearray() for char in bytearray(string): if char in safe: rv.append(char) else: rv.extend(_bytetohex[char]) return to_native(bytes(rv))
def get_body(self, environ=None): return text_type( json_dumps({ 'errcode': self.errcode, 'error': self.error, 'errmsg': self.errmsg, }))
def url_quote(string, charset='utf-8', errors='strict', safe='/:', unsafe=''): """URL encode a single string with a given encoding. :param s: the string to quote. :param charset: the charset to be used. :param safe: an optional sequence of safe characters. :param unsafe: an optional sequence of unsafe characters. .. versionadded:: 0.9.2 The `unsafe` parameter was added. """ if not isinstance(string, (text_type, bytes, bytearray)): string = text_type(string) if isinstance(string, text_type): string = string.encode(charset, errors) if isinstance(safe, text_type): safe = safe.encode(charset, errors) if isinstance(unsafe, text_type): unsafe = unsafe.encode(charset, errors) safe = frozenset(bytearray(safe) + _always_safe) - frozenset(bytearray(unsafe)) rv = bytearray() for char in bytearray(string): if char in safe: rv.append(char) else: rv.extend(('%%%02X' % char).encode('ascii')) return to_native(bytes(rv))
def __init__(self, exc_type, exc_value, tb): self.lineno = tb.tb_lineno self.function_name = tb.tb_frame.f_code.co_name self.locals = tb.tb_frame.f_locals self.globals = tb.tb_frame.f_globals fn = inspect.getsourcefile(tb) or inspect.getfile(tb) if fn[-4:] in ('.pyo', '.pyc'): fn = fn[:-1] # if it's a file on the file system resolve the real filename. if os.path.isfile(fn): fn = os.path.realpath(fn) self.filename = fn self.module = self.globals.get('__name__') self.loader = self.globals.get('__loader__') self.code = tb.tb_frame.f_code # support for paste's traceback extensions self.hide = self.locals.get('__traceback_hide__', False) info = self.locals.get('__traceback_info__') if info is not None: try: info = text_type(info) except UnicodeError: info = str(info).decode('utf-8', 'replace') self.info = info
def slugify(text, delim=u'-'): text = unidecode.unidecode(text) result = [] for word in _punct_re.split(text.lower()): if word: result.append(word) return text_type(delim.join(result))
def __init__(self, exc_type, exc_value, tb): self.lineno = tb.tb_lineno self.function_name = tb.tb_frame.f_code.co_name self.locals = tb.tb_frame.f_locals self.globals = tb.tb_frame.f_globals fn = inspect.getsourcefile(tb) or inspect.getfile(tb) if fn[-4:] in ('.pyo', '.pyc'): fn = fn[:-1] # if it's a file on the file system resolve the real filename. if os.path.isfile(fn): fn = os.path.realpath(fn) self.filename = to_unicode(fn, get_filesystem_encoding()) self.module = self.globals.get('__name__') self.loader = self.globals.get('__loader__') self.code = tb.tb_frame.f_code # support for paste's traceback extensions self.hide = self.locals.get('__traceback_hide__', False) info = self.locals.get('__traceback_info__') if info is not None: try: info = text_type(info) except UnicodeError: info = str(info).decode('utf-8', 'replace') self.info = info
def get_body(self, environ=None): body = dict(msg=self.msg, error_code=self.error_code, request=request.method + ' ' + self.get_url_no_param(), data=self.data) text = json.dumps(body) return text_type(text)
def build_artifact(self, artifact): ctx = get_ctx() feed_source = self.source page = feed_source.parent feed = AtomFeed( title=page.record_label + u' — Pallets Project', feed_url=url_to(feed_source, external=True), url=url_to('/blog', external=True), id=get_id(ctx.env.project.id) ) for item in page.children.order_by( '-pub_date', '-pub_order', 'title' ).limit(10): item_author = item['author'] feed.add( item['title'], text_type(item['body']), xml_base=url_to(item, external=True), url=url_to(item, external=True), content_type='html', id=get_id(u'%s/%s' % ( ctx.env.project.id, item['_path'].encode('utf-8'))), author=item_author, updated=datetime(*item['pub_date'].timetuple()[:3])) with artifact.open('wb') as f: f.write(feed.to_string().encode('utf-8'))
def get_body(self, environ=None): return text_type(json.dumps(dict( msg=self.error, code=self.error_code, request=request.method+' '+self.get_url_no_param() )))
def proxy(*children, **arguments): buffer = "<" + tag for key, value in iteritems(arguments): if value is None: continue if key[-1] == "_": key = key[:-1] if key in self._boolean_attributes: if not value: continue if self._dialect == "xhtml": value = '="' + key + '"' else: value = "" else: value = '="' + escape(value) + '"' buffer += " " + key + value if not children and tag in self._empty_elements: if self._dialect == "xhtml": buffer += " />" else: buffer += ">" return buffer buffer += ">" children_as_string = "".join([text_type(x) for x in children if x is not None]) if children_as_string: if tag in self._plaintext_elements: children_as_string = escape(children_as_string) elif tag in self._c_like_cdata and self._dialect == "xhtml": children_as_string = "/*<![CDATA[*/" + children_as_string + "/*]]>*/" buffer += children_as_string + "</" + tag + ">" return buffer
def get_body(self, environ=None): return text_type(json.dumps(dict( msg=self.error, code=self.error_code, request=request.method + ' ' + self.get_url_no_param() ), ensure_ascii=False))
def test_exception_repr(): exc = exceptions.NotFound() assert text_type(exc) == ( "404 Not Found: The requested URL was not found on the server." " If you entered the URL manually please check your spelling" " and try again." ) assert repr(exc) == "<NotFound '404: Not Found'>" exc = exceptions.NotFound("Not There") assert text_type(exc) == "404 Not Found: Not There" assert repr(exc) == "<NotFound '404: Not Found'>" exc = exceptions.HTTPException("An error message") assert text_type(exc) == "??? Unknown Error: An error message" assert repr(exc) == "<HTTPException '???: Unknown Error'>"
def get_body(self, environ=None): return text_type( json.dumps( dict( error=self.error, error_description=self.description, )))
def get_body(self, environ=None): body = dict( message=self.message, code=self.message_code, request=request.method + " " + self.get_url_no_param(), ) text = json.dumps(body) return text_type(text)
def get_body(self, environ=None): error_body = { 'errorCode': self.error_code, 'message': self.message, 'errors': 'permission denied', 'permissions': self.permissions } return text_type(json.dumps(error_body))
def get_body(self, environ=None): """Get the HTML body.""" return text_type( (u'<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">\n' u'<title>%(code)s %(name)s</title>\n' u'<h1>%(name)s</h1>\n' u'%(description)s\n') % { 'code': self.code, 'name': escape(self.name), 'description': self.get_description(environ) })
def get_body(self, environ=None): """Get the HTML body.""" return text_type( (u'<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">\n' u"<title>%(code)s %(name)s</title>\n" u"<h1>%(name)s</h1>\n" u"%(description)s\n") % { "code": self.code, "name": escape(self.name), "description": self.get_description(environ), })
def get_body(self, environ=None): """Get the XML body.""" return text_type( (u'<?xml version="1.0" encoding="UTF-8"?>\n' u'<ows:ExceptionReport xmlns:ows="http://www.opengis.net/ows/1.1" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.opengis.net/ows/1.1 ../../../ows/1.1.0/owsExceptionReport.xsd" version="1.0.0">' u'<ows:Exception exceptionCode="%(name)s">' u'%(description)s' u'</ows:Exception>' u'</ows:ExceptionReport>') % { 'name': escape(self.name), 'description': self.get_description(environ) })
def get_body(self, environ=None): """Get the HTML body.""" return text_type(( u'<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">\n' u'<title>%(code)s %(name)s</title>\n' u'<h1>%(name)s</h1>\n' u'%(description)s\n' ) % { 'code': self.code, 'name': escape(self.name), 'description': self.get_description(environ) })
def safe_str_cmp(a, b): """This function compares strings in somewhat constant time. This requires that the length of at least one string is known in advance. Returns `True` if the two strings are equal, or `False` if they are not. .. versionadded:: 0.7 """ if _builtin_safe_str_cmp is not None: return _builtin_safe_str_cmp(text_type(a), text_type(b)) # Python2's version of that code dies when one is Unicode and the other is not if len(a) != len(b): return False rv = 0 if isinstance(a, bytes) and isinstance(b, bytes) and not PY2: for x, y in izip(a, b): rv |= x ^ y else: for x, y in izip(a, b): rv |= ord(x) ^ ord(y) return rv == 0
def get_body(self, environ=None): """Get the XML body.""" return text_type(( u'<?xml version="1.0" encoding="UTF-8"?>\n' u'<ows:ExceptionReport xmlns:ows="http://www.opengis.net/ows/1.1" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.opengis.net/ows/1.1 ../../../ows/1.1.0/owsExceptionReport.xsd" version="1.0.0">' u'<ows:Exception exceptionCode="%(name)s">' u'%(description)s' u'</ows:Exception>' u'</ows:ExceptionReport>' ) % { 'name': escape(self.name), 'description': self.get_description(environ) })
def escape(s, quote=None): """Replace special characters "&", "<", ">" and (") to HTML-safe sequences. There is a special handling for `None` which escapes to an empty string. .. versionchanged:: 0.9 `quote` is now implicitly on. :param s: the string to escape. :param quote: ignored. """ if s is None: return "" elif hasattr(s, "__html__"): return text_type(s.__html__()) elif not isinstance(s, string_types): s = text_type(s) if quote is not None: from warnings import warn warn(DeprecationWarning("quote parameter is implicit now"), stacklevel=2) s = s.replace("&", "&").replace("<", "<").replace(">", ">").replace('"', """) return s
def test_sorted_url_encode(): strict_eq( urls.url_encode( {u"a": 42, u"b": 23, 1: 1, 2: 2}, sort=True, key=lambda i: text_type(i[0]) ), "1=1&2=2&a=42&b=23", ) strict_eq( urls.url_encode( {u"A": 1, u"a": 2, u"B": 3, "b": 4}, sort=True, key=lambda x: x[0].lower() + x[0], ), "A=1&a=2&B=3&b=4", )
def test_etag_response_mixin_freezing(): class WithFreeze(wrappers.ETagResponseMixin, wrappers.BaseResponse): pass class WithoutFreeze(wrappers.BaseResponse, wrappers.ETagResponseMixin): pass response = WithFreeze("Hello World") response.freeze() strict_eq(response.get_etag(), (text_type(generate_etag(b"Hello World")), False)) response = WithoutFreeze("Hello World") response.freeze() assert response.get_etag() == (None, None) response = wrappers.Response("Hello World") response.freeze() assert response.get_etag() == (None, None)
def get_body(self, environ=None): """Get the XML body.""" return text_type( (u'<?xml version="1.0" encoding="UTF-8"?>\n' u'<!-- PyWPS %(version)s -->\n' u'<ows:ExceptionReport xmlns:ows="http://www.opengis.net/ows/1.1" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.opengis.net/ows/1.1 http://schemas.opengis.net/ows/1.1.0/owsExceptionReport.xsd" version="1.0.0">\n' u' <ows:Exception exceptionCode="%(name)s" locator="%(locator)s" >\n' u' %(description)s\n' u' </ows:Exception>\n' u'</ows:ExceptionReport>') % { 'version': __version__, 'code': self.code, 'locator': escape(self.locator), 'name': escape(self.name), 'description': self.get_description(environ) })
def get_body(self, environ=None): error_body = {'errorCode': self.error_code, 'message': self.message} if self.field: errors = {self.field: self.message} error_body['errors'] = errors if self.errors: for key, value in list(self.errors.items()): if isinstance(value, list) and value: new_message = value[0] # Delete the period in the marshmallow standard error if new_message.endswith('.'): new_message = new_message[:-1] self.errors[key] = new_message error_body['errors'] = self.errors record = json.dumps(error_body) return text_type(record)
def test_etag_response_mixin_freezing(): class WithFreeze(wrappers.ETagResponseMixin, wrappers.BaseResponse): pass class WithoutFreeze(wrappers.BaseResponse, wrappers.ETagResponseMixin): pass response = WithFreeze("Hello World") response.freeze() strict_eq(response.get_etag(), (text_type(wrappers.generate_etag(b"Hello World")), False)) response = WithoutFreeze("Hello World") response.freeze() assert response.get_etag() == (None, None) response = wrappers.Response("Hello World") response.freeze() assert response.get_etag() == (None, None)
def test_etag_response_mixin_freezing(self): class WithFreeze(wrappers.ETagResponseMixin, wrappers.BaseResponse): pass class WithoutFreeze(wrappers.BaseResponse, wrappers.ETagResponseMixin): pass response = WithFreeze('Hello World') response.freeze() self.assert_strict_equal(response.get_etag(), (text_type(wrappers.generate_etag(b'Hello World')), False)) response = WithoutFreeze('Hello World') response.freeze() self.assert_equal(response.get_etag(), (None, None)) response = wrappers.Response('Hello World') response.freeze() self.assert_equal(response.get_etag(), (None, None))
def get_response(self, environ=None): doc = text_type(( u'<?xml version="1.0" encoding="UTF-8"?>\n' u'<!-- PyWPS %(version)s -->\n' u'<ows:ExceptionReport xmlns:ows="http://www.opengis.net/ows/1.1" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.opengis.net/ows/1.1 http://schemas.opengis.net/ows/1.1.0/owsExceptionReport.xsd" version="1.0.0">\n' # noqa u' <ows:Exception exceptionCode="%(name)s" locator="%(locator)s" >\n' u' %(description)s\n' u' </ows:Exception>\n' u'</ows:ExceptionReport>' ) % { 'version': __version__, 'code': self.code, 'locator': escape(self.locator), 'name': escape(self.name), 'description': self.get_description(environ) }) return Response(doc, self.code, mimetype='text/xml')
def get_response(self, environ=None): args = { 'version': __version__, 'code': self.code, 'locator': escape(self.locator), 'name': escape(self.name), 'description': self.get_description(environ) } doc = text_type(( u'<?xml version="1.0" encoding="UTF-8"?>\n' u'<!-- PyWPS {version} -->\n' u'<ows:ExceptionReport xmlns:ows="http://www.opengis.net/ows/1.1" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.opengis.net/ows/1.1 http://schemas.opengis.net/ows/1.1.0/owsExceptionReport.xsd" version="1.0.0">\n' # noqa u' <ows:Exception exceptionCode="{name}" locator="{locator}" >\n' u' {description}\n' u' </ows:Exception>\n' u'</ows:ExceptionReport>').format(**args)) return Response(doc, self.code, mimetype='text/xml')
def get_body(self, environ=None): """Get the XML body.""" return text_type( ( u'<?xml version="1.0" encoding="UTF-8"?>\n' u'<ows:ExceptionReport xmlns:ows="http://www.opengis.net/ows/1.1" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.opengis.net/ows/1.1 ../../../ows/1.1.0/owsExceptionReport.xsd" version="1.0.0">' u'<ows:Exception exceptionCode="%(name)s" locator="%(locator)s" >' u"%(description)s" u"</ows:Exception>" u"</ows:ExceptionReport>" ) % { "code": self.code, "locator": escape(self.locator), "name": escape(self.name), "description": self.get_description(environ), } )
def test_sorted_url_encode(): strict_eq( urls.url_encode({ u"a": 42, u"b": 23, 1: 1, 2: 2 }, sort=True, key=lambda i: text_type(i[0])), '1=1&2=2&a=42&b=23') strict_eq( urls.url_encode({ u'A': 1, u'a': 2, u'B': 3, 'b': 4 }, sort=True, key=lambda x: x[0].lower() + x[0]), 'A=1&a=2&B=3&b=4')
def url_quote(string, charset='utf-8', errors='strict', safe='/:'): """URL encode a single string with a given encoding. :param s: the string to quote. :param charset: the charset to be used. :param safe: an optional sequence of safe characters. """ if not isinstance(string, (text_type, bytes, bytearray)): string = text_type(string) if isinstance(string, text_type): string = string.encode(charset, errors) if isinstance(safe, text_type): safe = safe.encode(charset, errors) safe = frozenset(bytearray(safe) + _always_safe) rv = bytearray() for char in bytearray(string): if char in safe: rv.append(char) else: rv.extend(('%%%02X' % char).encode('ascii')) return to_native(bytes(rv))
def urlize(text, trim_url_limit=None, nofollow=False): """Converts any URLs in text into clickable links. Works on http://, https:// and www. links. Links can have trailing punctuation (periods, commas, close-parens) and leading punctuation (opening parens) and it'll still do the right thing. If trim_url_limit is not None, the URLs in link text will be limited to trim_url_limit characters. If nofollow is True, the URLs in link text will get a rel="nofollow" attribute. """ words = _word_split_re.split(text_type(escape(text))) nofollow_attr = nofollow and ' rel="nofollow"' or '' for i, word in enumerate(words): replace = _urlize_parse(word, nofollow_attr, trim_url_limit) if replace: words[i] = replace return u''.join(words)
def _url_quote(string, charset='utf-8', errors='strict', safe='/:', unsafe=''): """URL encode a single string with a given encoding. :param s: the string to quote. :param charset: the charset to be used. :param safe: an optional sequence of safe characters. :param unsafe: an optional sequence of unsafe characters. .. versionadded:: 0.9.2 The `unsafe` parameter was added. """ if not isinstance(string, (text_type, bytes, bytearray)): string = text_type(string) if isinstance(string, text_type): string = string.encode(charset, errors) safe = _get_stringy_set_impl(safe, charset, errors) unsafe = _get_stringy_set_impl(unsafe, charset, errors) safe = frozenset(safe + _always_safe) - frozenset(unsafe) rv = _transform_impl(string, safe) return to_native(bytes(rv))
def proxy(*children, **arguments): buffer = '<' + tag for key, value in iteritems(arguments): if value is None: continue if key[-1] == '_': key = key[:-1] if key in self._boolean_attributes: if not value: continue if self._dialect == 'xhtml': value = '="' + key + '"' else: value = '' else: value = '="' + escape(value) + '"' buffer += ' ' + key + value if not children and tag in self._empty_elements: if self._dialect == 'xhtml': buffer += ' />' else: buffer += '>' return buffer buffer += '>' children_as_string = ''.join([text_type(x) for x in children if x is not None]) if children_as_string: if tag in self._plaintext_elements: children_as_string = escape(children_as_string) elif tag in self._c_like_cdata and self._dialect == 'xhtml': children_as_string = '/*<![CDATA[*/' + \ children_as_string + '/*]]>*/' buffer += children_as_string + '</' + tag + '>' return buffer
def dump_cookie(key, value='', max_age=None, expires=None, path='/', domain=None, secure=False, httponly=False, charset='utf-8', sync_expires=True, max_size=4093, samesite=None): """Creates a new Set-Cookie header without the ``Set-Cookie`` prefix The parameters are the same as in the cookie Morsel object in the Python standard library but it accepts unicode data, too. On Python 3 the return value of this function will be a unicode string, on Python 2 it will be a native string. In both cases the return value is usually restricted to ascii as the vast majority of values are properly escaped, but that is no guarantee. If a unicode string is returned it's tunneled through latin1 as required by PEP 3333. The return value is not ASCII safe if the key contains unicode characters. This is technically against the specification but happens in the wild. It's strongly recommended to not use non-ASCII values for the keys. :param max_age: should be a number of seconds, or `None` (default) if the cookie should last only as long as the client's browser session. Additionally `timedelta` objects are accepted, too. :param expires: should be a `datetime` object or unix timestamp. :param path: limits the cookie to a given path, per default it will span the whole domain. :param domain: Use this if you want to set a cross-domain cookie. For example, ``domain=".example.com"`` will set a cookie that is readable by the domain ``www.example.com``, ``foo.example.com`` etc. Otherwise, a cookie will only be readable by the domain that set it. :param secure: The cookie will only be available via HTTPS :param httponly: disallow JavaScript to access the cookie. This is an extension to the cookie standard and probably not supported by all browsers. :param charset: the encoding for unicode values. :param sync_expires: automatically set expires if max_age is defined but expires not. :param max_size: Warn if the final header value exceeds this size. The default, 4093, should be safely `supported by most browsers <cookie_>`_. Set to 0 to disable this check. :param samesite: Limits the scope of the cookie such that it will only be attached to requests if those requests are "same-site". .. _`cookie`: http://browsercookielimits.squawky.net/ """ key = to_bytes(key, charset) value = to_bytes(value, charset) if path is not None: path = iri_to_uri(path, charset) domain = _make_cookie_domain(domain) if isinstance(max_age, timedelta): max_age = (max_age.days * 60 * 60 * 24) + max_age.seconds if expires is not None: if not isinstance(expires, string_types): expires = cookie_date(expires) elif max_age is not None and sync_expires: expires = to_bytes(cookie_date(time() + max_age)) samesite = samesite.title() if samesite else None if samesite not in ('Strict', 'Lax', None): raise ValueError("invalid SameSite value; must be 'Strict', 'Lax' or None") buf = [key + b'=' + _cookie_quote(value)] # XXX: In theory all of these parameters that are not marked with `None` # should be quoted. Because stdlib did not quote it before I did not # want to introduce quoting there now. for k, v, q in ((b'Domain', domain, True), (b'Expires', expires, False,), (b'Max-Age', max_age, False), (b'Secure', secure, None), (b'HttpOnly', httponly, None), (b'Path', path, False), (b'SameSite', samesite, False)): if q is None: if v: buf.append(k) continue if v is None: continue tmp = bytearray(k) if not isinstance(v, (bytes, bytearray)): v = to_bytes(text_type(v), charset) if q: v = _cookie_quote(v) tmp += b'=' + v buf.append(bytes(tmp)) # The return value will be an incorrectly encoded latin1 header on # Python 3 for consistency with the headers object and a bytestring # on Python 2 because that's how the API makes more sense. rv = b'; '.join(buf) if not PY2: rv = rv.decode('latin1') # Warn if the final value of the cookie is less than the limit. If the # cookie is too large, then it may be silently ignored, which can be quite # hard to debug. cookie_size = len(rv) if max_size and cookie_size > max_size: value_size = len(value) warnings.warn( 'The "{key}" cookie is too large: the value was {value_size} bytes' ' but the header required {extra_size} extra bytes. The final size' ' was {cookie_size} bytes but the limit is {max_size} bytes.' ' Browsers may silently ignore cookies larger than this.'.format( key=key, value_size=value_size, extra_size=cookie_size - value_size, cookie_size=cookie_size, max_size=max_size ), stacklevel=2 ) return rv
def get_json(self): return text_type(json.dumps(dict( msg=self.msg, code=self.error_code, request=self.uri )))
def test_sorted_url_encode(self): self.assert_strict_equal(urls.url_encode({u"a": 42, u"b": 23, 1: 1, 2: 2}, sort=True, key=lambda i: text_type(i[0])), '1=1&2=2&a=42&b=23') self.assert_strict_equal(urls.url_encode({u'A': 1, u'a': 2, u'B': 3, 'b': 4}, sort=True, key=lambda x: x[0].lower() + x[0]), 'A=1&a=2&B=3&b=4')
def dump_cookie(key, value='', max_age=None, expires=None, path='/', domain=None, secure=False, httponly=False, charset='utf-8', sync_expires=True): """Creates a new Set-Cookie header without the ``Set-Cookie`` prefix The parameters are the same as in the cookie Morsel object in the Python standard library but it accepts unicode data, too. On Python 3 the return value of this function will be a unicode string, on Python 2 it will be a native string. In both cases the return value is usually restricted to ascii as the vast majority of values are properly escaped, but that is no guarantee. If a unicode string is returned it's tunneled through latin1 as required by PEP 3333. The return value is not ASCII safe if the key contains unicode characters. This is technically against the specification but happens in the wild. It's strongly recommended to not use non-ASCII values for the keys. :param max_age: should be a number of seconds, or `None` (default) if the cookie should last only as long as the client's browser session. Additionally `timedelta` objects are accepted, too. :param expires: should be a `datetime` object or unix timestamp. :param path: limits the cookie to a given path, per default it will span the whole domain. :param domain: Use this if you want to set a cross-domain cookie. For example, ``domain=".example.com"`` will set a cookie that is readable by the domain ``www.example.com``, ``foo.example.com`` etc. Otherwise, a cookie will only be readable by the domain that set it. :param secure: The cookie will only be available via HTTPS :param httponly: disallow JavaScript to access the cookie. This is an extension to the cookie standard and probably not supported by all browsers. :param charset: the encoding for unicode values. :param sync_expires: automatically set expires if max_age is defined but expires not. """ key = to_bytes(key, charset) value = to_bytes(value, charset) if path is not None: path = iri_to_uri(path, charset) domain = _make_cookie_domain(domain) if isinstance(max_age, timedelta): max_age = (max_age.days * 60 * 60 * 24) + max_age.seconds if expires is not None: if not isinstance(expires, string_types): expires = cookie_date(expires) elif max_age is not None and sync_expires: expires = to_bytes(cookie_date(time() + max_age)) buf = [key + b'=' + _cookie_quote(value)] # XXX: In theory all of these parameters that are not marked with `None` # should be quoted. Because stdlib did not quote it before I did not # want to introduce quoting there now. for k, v, q in ((b'Domain', domain, True), (b'Expires', expires, False,), (b'Max-Age', max_age, False), (b'Secure', secure, None), (b'HttpOnly', httponly, None), (b'Path', path, False)): if q is None: if v: buf.append(k) continue if v is None: continue tmp = bytearray(k) if not isinstance(v, (bytes, bytearray)): v = to_bytes(text_type(v), charset) if q: v = _cookie_quote(v) tmp += b'=' + v buf.append(bytes(tmp)) # The return value will be an incorrectly encoded latin1 header on # Python 3 for consistency with the headers object and a bytestring # on Python 2 because that's how the API makes more sense. rv = b'; '.join(buf) if not PY2: rv = rv.decode('latin1') return rv
def _urandom(): if hasattr(os, 'urandom'): return os.urandom(30) return text_type(random()).encode('ascii')
def _urandom(): if hasattr(os, "urandom"): return os.urandom(30) return text_type(random()).encode("ascii")
def __html__(self): return text_type(self)