Esempio n. 1
0
def pbkdf2_bin(data, salt, iterations=DEFAULT_PBKDF2_ITERATIONS,
               keylen=None, hashfunc=None):
    """Returns a binary digest for the PBKDF2 hash algorithm of `data`
    with the given `salt`. It iterates `iterations` times and produces a
    key of `keylen` bytes. By default, SHA-256 is used as hash function;
    a different hashlib `hashfunc` can be provided.

    .. versionadded:: 0.9

    :param data: the data to derive.
    :param salt: the salt for the derivation.
    :param iterations: the number of iterations.
    :param keylen: the length of the resulting key.  If not provided
                   the digest size will be used.
    :param hashfunc: the hash function to use.  This can either be the
                     string name of a known hash function or a function
                     from the hashlib module.  Defaults to sha256.
    """
    if not hashfunc:
        hashfunc = 'sha256'

    data = to_bytes(data)
    salt = to_bytes(salt)

    if callable(hashfunc):
        _test_hash = hashfunc()
        hash_name = getattr(_test_hash, 'name', None)
    else:
        hash_name = hashfunc
    return hashlib.pbkdf2_hmac(hash_name, data, salt, iterations, keylen)
Esempio n. 2
0
def pbkdf2_bin(data,
               salt,
               iterations=DEFAULT_PBKDF2_ITERATIONS,
               keylen=None,
               hashfunc=None):
    """Returns a binary digest for the PBKDF2 hash algorithm of `data`
    with the given `salt`. It iterates `iterations` times and produces a
    key of `keylen` bytes. By default, SHA-256 is used as hash function;
    a different hashlib `hashfunc` can be provided.

    .. versionadded:: 0.9

    :param data: the data to derive.
    :param salt: the salt for the derivation.
    :param iterations: the number of iterations.
    :param keylen: the length of the resulting key.  If not provided
                   the digest size will be used.
    :param hashfunc: the hash function to use.  This can either be the
                     string name of a known hash function or a function
                     from the hashlib module.  Defaults to sha256.
    """
    if not hashfunc:
        hashfunc = 'sha256'

    data = to_bytes(data)
    salt = to_bytes(salt)

    if callable(hashfunc):
        _test_hash = hashfunc()
        hash_name = getattr(_test_hash, 'name', None)
    else:
        hash_name = hashfunc
    return hashlib.pbkdf2_hmac(hash_name, data, salt, iterations, keylen)
Esempio n. 3
0
def pbkdf2_bin(data,
               salt,
               iterations=DEFAULT_PBKDF2_ITERATIONS,
               keylen=None,
               hashfunc=None):
    """Returns a binary digest for the PBKDF2 hash algorithm of `data`
    with the given `salt`. It iterates `iterations` times and produces a
    key of `keylen` bytes. By default, SHA-1 is used as hash function;
    a different hashlib `hashfunc` can be provided.

    .. versionadded:: 0.9

    :param data: the data to derive.
    :param salt: the salt for the derivation.
    :param iterations: the number of iterations.
    :param keylen: the length of the resulting key.  If not provided
                   the digest size will be used.
    :param hashfunc: the hash function to use.  This can either be the
                     string name of a known hash function or a function
                     from the hashlib module.  Defaults to sha1.
    """
    if isinstance(hashfunc, string_types):
        hashfunc = _hash_funcs[hashfunc]
    elif not hashfunc:
        hashfunc = hashlib.sha1
    data = to_bytes(data)
    salt = to_bytes(salt)

    # If we're on Python with pbkdf2_hmac we can try to use it for
    # compatible digests.
    if _has_native_pbdkf2:
        _test_hash = hashfunc()
        if hasattr(_test_hash, 'name') and \
           _test_hash.name in hashlib.algorithms_available:
            return hashlib.pbkdf2_hmac(_test_hash.name, data, salt, iterations,
                                       keylen)

    mac = hmac.HMAC(data, None, hashfunc)
    if not keylen:
        keylen = mac.digest_size

    def _pseudorandom(x, mac=mac):
        h = mac.copy()
        h.update(x)
        return bytearray(h.digest())

    buf = bytearray()
    for block in range_type(1, -(-keylen // mac.digest_size) + 1):
        rv = u = _pseudorandom(salt + _pack_int(block))
        for i in range_type(iterations - 1):
            u = _pseudorandom(bytes(u))
            rv = bytearray(starmap(xor, izip(rv, u)))
        buf.extend(rv)
    return bytes(buf[:keylen])
Esempio n. 4
0
def pbkdf2_bin(data, salt, iterations=DEFAULT_PBKDF2_ITERATIONS,
               keylen=None, hashfunc=None):
    """Returns a binary digest for the PBKDF2 hash algorithm of `data`
    with the given `salt`. It iterates `iterations` times and produces a
    key of `keylen` bytes. By default, SHA-1 is used as hash function;
    a different hashlib `hashfunc` can be provided.

    .. versionadded:: 0.9

    :param data: the data to derive.
    :param salt: the salt for the derivation.
    :param iterations: the number of iterations.
    :param keylen: the length of the resulting key.  If not provided
                   the digest size will be used.
    :param hashfunc: the hash function to use.  This can either be the
                     string name of a known hash function or a function
                     from the hashlib module.  Defaults to sha1.
    """
    if isinstance(hashfunc, string_types):
        hashfunc = _hash_funcs[hashfunc]
    elif not hashfunc:
        hashfunc = hashlib.sha1
    data = to_bytes(data)
    salt = to_bytes(salt)

    # If we're on Python with pbkdf2_hmac we can try to use it for
    # compatible digests.
    if _has_native_pbkdf2:
        _test_hash = hashfunc()
        if hasattr(_test_hash, 'name') and \
                        _test_hash.name in _hash_funcs:
            return hashlib.pbkdf2_hmac(_test_hash.name,
                                       data, salt, iterations,
                                       keylen)

    mac = hmac.HMAC(data, None, hashfunc)
    if not keylen:
        keylen = mac.digest_size

    def _pseudorandom(x, mac=mac):
        h = mac.copy()
        h.update(x)
        return bytearray(h.digest())

    buf = bytearray()
    for block in range_type(1, -(-keylen // mac.digest_size) + 1):
        rv = u = _pseudorandom(salt + _pack_int(block))
        for i in range_type(iterations - 1):
            u = _pseudorandom(bytes(u))
            rv = bytearray(starmap(xor, izip(rv, u)))
        buf.extend(rv)
    return bytes(buf[:keylen])
Esempio n. 5
0
def tag(template, name):
    '''
    :param template:
        模板文件,此参数自动传入
    :param name:
        Tag名称,若为非ASCII字符,一般是经过URL编码的
    '''
    # 若name为非ASCII字符,传入时一般是经过URL编码的
    # 若name为URL编码,则需要解码为Unicode
    # URL编码判断方法:若已为URL编码, 再次编码会在每个码之前出现`%25`
    _name = to_bytes(name, 'utf-8')
    if urllib.quote(_name).count('%25') > 0:
        name = urllib.unquote(_name)

    tag = Tag.query.filter_by(name=name).first_or_404()
    
    page = int(request.args.get('page', 1))
    
    _url = PageURL(url_for('main.tag', name=name), {"page": page})
    _query = Article.query.public().filter(Article.tags.any(id=tag.id))
    pagination = Page(_query, page=page, items_per_page=Article.PER_PAGE, url=_url)

    articles = pagination.items

    _template = template % (tag.template or 'tag.html')
    return render_template(_template,
                           tag=tag, 
                           pagination=pagination,
                           articles=articles)
Esempio n. 6
0
    def compile_item(self, item):
        item.module_name = None
        code = item.server_code
        item.module_name = item.get_module_name()
        item_module = type(sys)(item.module_name)
        item_module.__dict__['this'] = item
        sys.modules[item.module_name] = item_module

        item.task.modules.append(item.module_name)
        if item.owner:
            sys.modules[item.owner.get_module_name()].__dict__[item.module_name] = item_module
        if code:
            try:
                code = to_bytes(code, 'utf-8')
            except Exception as e:
                self.log.exception(error_message(e))
            comp_code = compile(code, item.module_name, "exec")
            exec_(comp_code, item_module.__dict__)

            item_module.__dict__['__loader__'] = item._loader
            funcs = inspect.getmembers(item_module, inspect.isfunction)
            item._events = []
            for func_name, func in funcs:
                item._events.append((func_name, func))
                setattr(item, func_name, func)
        del code
Esempio n. 7
0
def tag(template, name, page=1):
    """
    :param template:
        模板文件,此参数自动传入
    :param name:
        Tag名称,若为非ASCII字符,一般是经过URL编码的
    """
    # 若name为非ASCII字符,传入时一般是经过URL编码的
    # 若name为URL编码,则需要解码为Unicode
    # URL编码判断方法:若已为URL编码, 再次编码会在每个码之前出现`%25`
    _name = to_bytes(name, 'utf-8')
    if urllib.quote(_name).count('%25') > 0:
        name = urllib.unquote(_name)

    tag = Tag.query.filter_by(name=name).first_or_404()

    _url = page_url
    _query = Article.query.public().filter(Article.tags.any(id=tag.id))
    pagination = Page(_query,
                      page=page,
                      items_per_page=Article.PER_PAGE,
                      url=_url)

    articles = pagination.items

    _template = template % (tag.template or 'tag.html')
    return render_template(_template,
                           tag=tag,
                           pagination=pagination,
                           articles=articles)
Esempio n. 8
0
def test_dispatchermiddleware():
    def null_application(environ, start_response):
        start_response('404 NOT FOUND', [('Content-Type', 'text/plain')])
        yield b'NOT FOUND'

    def dummy_application(environ, start_response):
        start_response('200 OK', [('Content-Type', 'text/plain')])
        yield to_bytes(environ['SCRIPT_NAME'])

    app = wsgi.DispatcherMiddleware(null_application, {
        '/test1': dummy_application,
        '/test2/very': dummy_application,
    })
    tests = {
        '/test1': ('/test1', '/test1/asfd', '/test1/very'),
        '/test2/very':
        ('/test2/very', '/test2/very/long/path/after/script/name')
    }
    for name, urls in tests.items():
        for p in urls:
            environ = create_environ(p)
            app_iter, status, headers = run_wsgi_app(app, environ)
            assert status == '200 OK'
            assert b''.join(app_iter).strip() == to_bytes(name)

    app_iter, status, headers = run_wsgi_app(app, create_environ('/missing'))
    assert status == '404 NOT FOUND'
    assert b''.join(app_iter).strip() == b'NOT FOUND'
Esempio n. 9
0
def test_dispatchermiddleware():
    def null_application(environ, start_response):
        start_response('404 NOT FOUND', [('Content-Type', 'text/plain')])
        yield b'NOT FOUND'

    def dummy_application(environ, start_response):
        start_response('200 OK', [('Content-Type', 'text/plain')])
        yield to_bytes(environ['SCRIPT_NAME'])

    app = wsgi.DispatcherMiddleware(null_application, {
        '/test1': dummy_application,
        '/test2/very': dummy_application,
    })
    tests = {
        '/test1': ('/test1', '/test1/asfd', '/test1/very'),
        '/test2/very': ('/test2/very', '/test2/very/long/path/after/script/name')
    }
    for name, urls in tests.items():
        for p in urls:
            environ = create_environ(p)
            app_iter, status, headers = run_wsgi_app(app, environ)
            assert status == '200 OK'
            assert b''.join(app_iter).strip() == to_bytes(name)

    app_iter, status, headers = run_wsgi_app(
        app, create_environ('/missing'))
    assert status == '404 NOT FOUND'
    assert b''.join(app_iter).strip() == b'NOT FOUND'
Esempio n. 10
0
    def test_proxy_fix(self, environ, assumed_addr, assumed_host):
        @Request.application
        def app(request):
            return Response('%s|%s' % (
                request.remote_addr,
                # do not use request.host as this fixes too :)
                request.environ['wsgi.url_scheme'] + '://' +
                get_host(request.environ)))

        app = fixers.ProxyFix(app, num_proxies=2)
        has_host = 'HTTP_HOST' in environ
        environ = dict(create_environ(), **environ)
        if not has_host:
            del environ[
                'HTTP_HOST']  # create_environ() defaults to 'localhost'

        response = Response.from_app(app, environ)

        assert response.get_data() == to_bytes('{}|{}'.format(
            assumed_addr, assumed_host))

        # And we must check that if it is a redirection it is
        # correctly done:

        redirect_app = redirect('/foo/bar.hml')
        response = Response.from_app(redirect_app, environ)

        wsgi_headers = response.get_wsgi_headers(environ)
        assert wsgi_headers['Location'] == '{}/foo/bar.hml'.format(
            assumed_host)
Esempio n. 11
0
    def now_handler(event, context):
        payload = json.loads(event['body'])

        headers = Headers(payload.get('headers', {}))

        body = payload.get('body', '')
        if body != '':
            if payload.get('encoding') == 'base64':
                body = base64.b64decode(body)
        if isinstance(body, string_types):
            body = to_bytes(body, charset='utf-8')

        path = unquote(payload['path'])
        query = urlparse(path).query

        environ = {
            'CONTENT_LENGTH': str(len(body)),
            'CONTENT_TYPE': headers.get('content-type', ''),
            'PATH_INFO': path,
            'QUERY_STRING': query,
            'REMOTE_ADDR': headers.get(
                'x-forwarded-for', headers.get(
                    'x-real-ip', payload.get(
                        'true-client-ip', ''))),
            'REQUEST_METHOD': payload['method'],
            'SERVER_NAME': headers.get('host', 'lambda'),
            'SERVER_PORT': headers.get('x-forwarded-port', '80'),
            'SERVER_PROTOCOL': 'HTTP/1.1',
            'event': event,
            'context': context,
            'wsgi.errors': sys.stderr,
            'wsgi.input': BytesIO(body),
            'wsgi.multiprocess': False,
            'wsgi.multithread': False,
            'wsgi.run_once': False,
            'wsgi.url_scheme': headers.get('x-forwarded-proto', 'http'),
            'wsgi.version': (1, 0),
        }

        for key, value in environ.items():
            if isinstance(value, string_types) and key != 'QUERY_STRING':
                environ[key] = wsgi_encoding_dance(value)

        for key, value in headers.items():
            key = 'HTTP_' + key.upper().replace('-', '_')
            if key not in ('HTTP_CONTENT_TYPE', 'HTTP_CONTENT_LENGTH'):
                environ[key] = value

        response = Response.from_app(__NOW_HANDLER_FILENAME.app, environ)

        return_dict = {
            'statusCode': response.status_code,
            'headers': dict(response.headers)
        }

        if response.data:
            return_dict['body'] = base64.b64encode(response.data).decode('utf-8')
            return_dict['encoding'] = 'base64'

        return return_dict
Esempio n. 12
0
 def __init__(self, data=None, secret_key=None, new=True):
     ModificationTrackingDict.__init__(self, data or ())
     # explicitly convert it into a bytestring because python 2.6
     # no longer performs an implicit string conversion on hmac
     if secret_key is not None:
         secret_key = to_bytes(secret_key, 'utf-8')
     self.secret_key = secret_key
     self.new = new
Esempio n. 13
0
 def quote(cls, value):
     if cls.serialization_method is not None:
         value = cls.serialization_method.dumps(value)
         ### Added line
         value = to_bytes(value, 'utf-8')
     if cls.quote_base64:
         value = b''.join(base64.b64encode(value).splitlines()).strip()
     return value
 def __init__(self, data=None, secret_key=None, new=True):
     ModificationTrackingDict.__init__(self, data or ())
     # explicitly convert it into a bytestring because python 2.6
     # no longer performs an implicit string conversion on hmac
     if secret_key is not None:
         secret_key = to_bytes(secret_key, 'utf-8')
     self.secret_key = secret_key
     self.new = new
Esempio n. 15
0
 def quote(cls, value):
     if cls.serialization_method is not None:
         value = cls.serialization_method.dumps(value)
         ### Added line
         value = to_bytes(value, 'utf-8')
     if cls.quote_base64:
         value = b''.join(base64.b64encode(value).splitlines()).strip()
     return value
Esempio n. 16
0
 def get_session(self, task):
     if not hasattr(self, '_cookie') and task:
         secret_key = to_bytes('', 'utf-8')
         key = self.session_key(task)
         self._cookie = JamSecureCookie.load_cookie(self, key=key, secret_key=secret_key)
         expires = self._cookie.get('session_expires')
         if expires and time.time() > expires:
             self._cookie = {}
     return self._cookie
Esempio n. 17
0
def process_sql_params(params, cursor):
    result = []
    for p in params:
        if type(p) == tuple:
            value, data_type = p
            if data_type in [LONGTEXT, KEYS]:
                if type(value) == text_type:
                    value = to_bytes(value, 'utf-8')
        else:
            value = p
        result.append(value)
    return result
Esempio n. 18
0
def load_interface(item):
    item._view_list = []
    item._edit_list = []
    item._order_list = []
    item._reports_list = []
    if item.f_info.value:
        lists = pickle.loads(to_bytes(item.f_info.value, 'utf-8'))
        item._view_list = lists['view']
        item._edit_list = lists['edit']
        item._order_list = lists['order']
        if lists.get('reports'):
            item._reports_list = lists['reports']
Esempio n. 19
0
def process_sql_params(params, cursor):
    result = []
    for p in params:
        if type(p) == tuple:
            value, data_type = p
            if data_type in [BLOB, KEYS]:
                if type(value) == text_type:
                    value = to_bytes(value, 'utf-8')
        else:
            value = p
        result.append(value)
    return result
Esempio n. 20
0
 def test_multiple_cookies(self):
     @Request.application
     def test_app(request):
         response = Response(repr(sorted(request.cookies.items())))
         response.set_cookie(u'test1', b'foo')
         response.set_cookie(u'test2', b'bar')
         return response
     client = Client(test_app, Response)
     resp = client.get('/')
     self.assert_strict_equal(resp.data, b'[]')
     resp = client.get('/')
     self.assert_strict_equal(resp.data,
                       to_bytes(repr([('test1', u'foo'), ('test2', u'bar')]), 'ascii'))
Esempio n. 21
0
def test_multiple_cookies():
    @Request.application
    def test_app(request):
        response = Response(repr(sorted(request.cookies.items())))
        response.set_cookie(u'test1', b'foo')
        response.set_cookie(u'test2', b'bar')
        return response
    client = Client(test_app, Response)
    resp = client.get('/')
    strict_eq(resp.data, b'[]')
    resp = client.get('/')
    strict_eq(resp.data,
              to_bytes(repr([('test1', u'foo'), ('test2', u'bar')]), 'ascii'))
Esempio n. 22
0
def test_multiple_cookies():
    @Request.application
    def test_app(request):
        response = Response(repr(sorted(request.cookies.items())))
        response.set_cookie(u"test1", b"foo")
        response.set_cookie(u"test2", b"bar")
        return response

    client = Client(test_app, Response)
    resp = client.get("/")
    strict_eq(resp.data, b"[]")
    resp = client.get("/")
    strict_eq(resp.data, to_bytes(repr([("test1", u"foo"), ("test2", u"bar")]), "ascii"))
Esempio n. 23
0
    def quote(cls, value):
        """Quote the value for the cookie.  This can be any object supported
        by :attr:`serialization_method`.

        :param value: the value to quote.
        """
        if cls.serialization_method is not None:
            value = cls.serialization_method.dumps(value)
        if cls.quote_base64:
            value = b''.join(
                base64.b64encode(to_bytes(value, "utf8")).splitlines()
            ).strip()
        return value
Esempio n. 24
0
    def quote(cls, value):
        """Quote the value for the cookie.  This can be any object supported
        by :attr:`serialization_method`.

        :param value: the value to quote.
        """
        if cls.serialization_method is not None:
            value = cls.serialization_method.dumps(value)
        if cls.quote_base64:
            value = b''.join(
                base64.b64encode(to_bytes(value, "utf8")).splitlines()
            ).strip()
        return value
Esempio n. 25
0
    def __init__(self, servers=None, default_timeout=300, key_prefix=None):
        BaseCache.__init__(self, default_timeout)
        if servers is None or isinstance(servers, (list, tuple)):
            if servers is None:
                servers = ['127.0.0.1:11211']
            self._client = self.import_preferred_memcache_lib(servers)
            if self._client is None:
                raise RuntimeError('no memcache module found')
        else:
            # NOTE: servers is actually an already initialized memcache
            # client.
            self._client = servers

        self.key_prefix = to_bytes(key_prefix)
Esempio n. 26
0
            def make_cache_key(*args, **kwargs):
                if callable(key_prefix):
                    cache_key = key_prefix()
                elif '%s' in key_prefix:
                    # 这里要转换成str(UTF-8)类型, 否则会报类型错误
                    _path = to_bytes(request.path, 'utf-8')
                    # 对于非ASCII的URL,需要进行URL编码
                    if quote(_path).count('%25') <= 0:
                        _path = quote(_path)
                    cache_key = key_prefix % _path
                else:
                    cache_key = key_prefix

                return cache_key
Esempio n. 27
0
def test_multiple_cookies():
    @Request.application
    def test_app(request):
        response = Response(repr(sorted(request.cookies.items())))
        response.set_cookie(u"test1", b"foo")
        response.set_cookie(u"test2", b"bar")
        return response

    client = Client(test_app, Response)
    resp = client.get("/")
    strict_eq(resp.data, b"[]")
    resp = client.get("/")
    strict_eq(resp.data,
              to_bytes(repr([("test1", u"foo"), ("test2", u"bar")]), "ascii"))
Esempio n. 28
0
def make_chunk_iter(stream, separator, limit=None, buffer_size=10 * 1024):
    """Works like :func:`make_line_iter` but accepts a separator
    which divides chunks.  If you want newline based processing
    you should use :func:`make_line_iter` instead as it
    supports arbitrary newline markers.

    .. versionadded:: 0.8

    .. versionadded:: 0.9
       added support for iterators as input stream.

    :param stream: the stream or iterate to iterate over.
    :param separator: the separator that divides chunks.
    :param limit: the limit in bytes for the stream.  (Usually
                  content length.  Not necessary if the `stream`
                  is otherwise already limited).
    :param buffer_size: The optional buffer size.
    """
    _iter = _make_chunk_iter(stream, limit, buffer_size)

    first_item = next(_iter, '')
    if not first_item:
        return

    _iter = chain((first_item,), _iter)
    if isinstance(first_item, text_type):
        separator = to_unicode(separator)
        _split = re.compile(r'(%s)' % re.escape(separator)).split
        _join = u''.join
    else:
        separator = to_bytes(separator)
        _split = re.compile(b'(' + re.escape(separator) + b')').split
        _join = b''.join

    buffer = []
    while 1:
        new_data = next(_iter, '')
        if not new_data:
            break
        chunks = _split(new_data)
        new_buf = []
        for item in chain(buffer, chunks):
            if item == separator:
                yield _join(new_buf)
                new_buf = []
            else:
                new_buf.append(item)
        buffer = new_buf
    if buffer:
        yield _join(buffer)
Esempio n. 29
0
def make_chunk_iter(stream, separator, limit=None, buffer_size=10 * 1024):
    """Works like :func:`make_line_iter` but accepts a separator
    which divides chunks.  If you want newline based processing
    you should use :func:`make_line_iter` instead as it
    supports arbitrary newline markers.

    .. versionadded:: 0.8

    .. versionadded:: 0.9
       added support for iterators as input stream.

    :param stream: the stream or iterate to iterate over.
    :param separator: the separator that divides chunks.
    :param limit: the limit in bytes for the stream.  (Usually
                  content length.  Not necessary if the `stream`
                  is otherwise already limited).
    :param buffer_size: The optional buffer size.
    """
    _iter = _make_chunk_iter(stream, limit, buffer_size)

    first_item = next(_iter, '')
    if not first_item:
        return

    _iter = chain((first_item, ), _iter)
    if isinstance(first_item, text_type):
        separator = to_unicode(separator)
        _split = re.compile(r'(%s)' % re.escape(separator)).split
        _join = u''.join
    else:
        separator = to_bytes(separator)
        _split = re.compile(b'(' + re.escape(separator) + b')').split
        _join = b''.join

    buffer = []
    while 1:
        new_data = next(_iter, '')
        if not new_data:
            break
        chunks = _split(new_data)
        new_buf = []
        for item in chain(buffer, chunks):
            if item == separator:
                yield _join(new_buf)
                new_buf = []
            else:
                new_buf.append(item)
        buffer = new_buf
    if buffer:
        yield _join(buffer)
Esempio n. 30
0
    def __init__(self, servers=None, default_timeout=300, key_prefix=None):
        BaseCache.__init__(self, default_timeout)
        if servers is None or isinstance(servers, (list, tuple)):
            if servers is None:
                servers = ['127.0.0.1:11211']
            self._client = self.import_preferred_memcache_lib(servers)
            if self._client is None:
                raise RuntimeError('no memcache module found')
        else:
            # NOTE: servers is actually an already initialized memcache
            # client.
            self._client = servers

        self.key_prefix = to_bytes(key_prefix)
Esempio n. 31
0
 def copy_rows(item, db_module, con, sql, rows):
     error = None
     for i, r in enumerate(rows):
         j = 0
         for field in item.fields:
             if not field.master_field:
                 if not r[j] is None:
                     if field.data_type == consts.INTEGER:
                         r[j] = int(r[j])
                     elif field.data_type in (consts.FLOAT, consts.CURRENCY):
                         r[j] = float(r[j])
                     elif field.data_type == consts.BOOLEAN:
                         if r[j]:
                             r[j] = 1
                         else:
                             r[j] = 0
                     elif field.data_type == consts.DATE and type(r[j]) == text_type:
                         r[j] = consts.convert_date(r[j])
                     elif field.data_type == consts.DATETIME and type(r[j]) == text_type:
                         r[j] = consts.convert_date_time(r[j])
                     elif field.data_type in [consts.LONGTEXT, consts.KEYS]:
                         if self.db_module.DATABASE == 'FIREBIRD':
                             if type(r[j]) == text_type:
                                 r[j] = to_bytes(r[j], 'utf-8')
                         elif db_module.DATABASE == 'FIREBIRD':
                             if type(r[j]) == bytes:
                                 r[j] = to_unicode(r[j], 'utf-8')
                 j += 1
     cursor = con.cursor()
     try:
         if hasattr(db_module, 'set_identity_insert'):
             if item._primary_key:
                 cursor.execute(db_module.set_identity_insert(item.table_name, True))
             new_rows = []
             for r in rows:
                 new_rows.append(tuple(r))
             rows = new_rows
         if hasattr(cursor, 'executemany'):
             cursor.executemany(sql, rows)
         else:
             for r in rows:
                 cursor.execute(sql, r)
         con.commit()
         if hasattr(db_module, 'set_identity_insert'):
             if item._primary_key:
                 cursor.execute(db_module.set_identity_insert(item.table_name, False))
     except Exception as e:
         self.log.exception(error_message(e))
         con.rollback()
     return error
Esempio n. 32
0
def process_sql_result(rows):
    result = []
    for row in rows:
        fields = []
        for field in row:
            if PY2:
                if type(field) == buffer:
                    field = str(field)
            else:
                if type(field) == memoryview:
                    field = to_unicode(to_bytes(field, 'utf-8'), 'utf-8')
            fields.append(field)
        result.append(fields)
    return result
Esempio n. 33
0
    def __init__(self, data=None, secret_key=None, new=True):
        ModificationTrackingDict.__init__(self, data or ())

        if secret_key is not None:
            secret_key = to_bytes(secret_key, "utf-8")

        self.secret_key = secret_key
        self.new = new

        if self.serialization_method is pickle:
            warnings.warn("The default SecureCookie.serialization_method will"
                          " change from pickle to json in 1.0. To upgrade"
                          " existing tokens, override unquote to try pickle if"
                          " json fails.")
Esempio n. 34
0
            def make_cache_key(*args, **kwargs):
                if callable(key_prefix):
                    cache_key = key_prefix()
                elif "%s" in key_prefix:
                    # 这里要转换成str(UTF-8)类型, 否则会报类型错误
                    _path = to_bytes(request.path, "utf-8")
                    # 对于非ASCII的URL,需要进行URL编码
                    if quote(_path).count("%25") <= 0:
                        _path = quote(_path)
                    cache_key = key_prefix % _path
                else:
                    cache_key = key_prefix

                return cache_key
Esempio n. 35
0
def process_sql_result(rows):
    result = []
    for row in rows:
        fields = []
        for field in row:
            if PY2:
                if type(field) == buffer:
                    field = str(field)
            else:
                if type(field) == memoryview:
                    field = to_unicode(to_bytes(field, 'utf-8'), 'utf-8')
            fields.append(field)
        result.append(fields)
    return result
Esempio n. 36
0
    def __init__(self, data=None, secret_key=None, new=True):
        ModificationTrackingDict.__init__(self, data or ())
        # explicitly convert it into a bytestring because python 2.6
        # no longer performs an implicit string conversion on hmac
        if secret_key is not None:
            secret_key = to_bytes(secret_key, 'utf-8')
        self.secret_key = secret_key
        self.new = new

        if self.serialization_method is pickle:
            warnings.warn(
                'The default SecureCookie.serialization_method will change from pickle'
                ' to json in 1.0. To upgrade existing tokens, override unquote to try'
                ' pickle if json fails.'
            )
Esempio n. 37
0
def process_sql_params(params, cursor):
    result = []
    for i, p in enumerate(params):
        if type(p) == tuple:
            value, data_type = p
            if data_type in [BLOB, KEYS]:
                if type(value) == text_type:
                    value = to_bytes(value, 'utf-8')
                blob = cursor.var(cx_Oracle.BLOB)
                blob.setvalue(0, value)
                value = blob
        else:
            value = p
        result.append(value)
    return result
Esempio n. 38
0
 def load_interface(self):
     self._view_list = []
     self._edit_list = []
     self._order_list = []
     self._reports_list = []
     value = self.f_info.value
     if value:
         if len(value) >= 4 and value[0:4] == 'json':
             lists = json.loads(value[4:])
         else:
             lists = pickle.loads(to_bytes(value, 'utf-8'))
         self._view_list = lists['view']
         self._edit_list = lists['edit']
         self._order_list = lists['order']
         if lists.get('reports'):
             self._reports_list = lists['reports']
Esempio n. 39
0
def load_interface(item):
    item._view_list = []
    item._edit_list = []
    item._order_list = []
    item._reports_list = []
    value = item.f_info.value
    if value:
        if len(value) >= 4 and value[0:4] == 'json':
            lists = json.loads(value[4:])
        else:
            lists = pickle.loads(to_bytes(value, 'utf-8'))
        item._view_list = lists['view']
        item._edit_list = lists['edit']
        item._order_list = lists['order']
        if lists.get('reports'):
            item._reports_list = lists['reports']
Esempio n. 40
0
    def __init__(self, data=None, secret_key=None, new=True):
        ModificationTrackingDict.__init__(self, data or ())
        # explicitly convert it into a bytestring because python 2.6
        # no longer performs an implicit string conversion on hmac
        if secret_key is not None:
            secret_key = to_bytes(secret_key, 'utf-8')
        self.secret_key = secret_key
        self.new = new

        if self.serialization_method is pickle:
            warnings.warn(
                "The default 'SecureCookie.serialization_method' will"
                " change from pickle to json in version 1.0. To upgrade"
                " existing tokens, override 'unquote' to try pickle if"
                " json fails.",
                stacklevel=2,
            )
Esempio n. 41
0
    def decode_netloc(self):
        """Decodes the netloc part into a string."""
        rv = self.host or ''
        try:
            rv = to_bytes(rv, 'utf-8').decode('idna')
        except (AttributeError, TypeError, UnicodeError):
            pass

        if ':' in rv:
            rv = '[%s]' % rv
        port = self.port
        if port is not None:
            rv = '%s:%d' % (rv, port)
        auth = ':'.join(filter(None, [
            _url_unquote_legacy(self.raw_username or '', '/:%@'),
            _url_unquote_legacy(self.raw_password or '', '/:%@'),
        ]))
        if auth:
            rv = '%s@%s' % (auth, rv)
        return rv
Esempio n. 42
0
    def decode_netloc(self):
        """Decodes the netloc part into a string."""
        rv = self.host or ''
        try:
            rv = to_bytes(rv, 'utf-8').decode('idna')
        except (AttributeError, TypeError, UnicodeError):
            pass

        if ':' in rv:
            rv = '[%s]' % rv
        port = self.port
        if port is not None:
            rv = '%s:%d' % (rv, port)
        auth = ':'.join(
            filter(None, [
                _url_unquote_legacy(self.raw_username or '', '/:%@'),
                _url_unquote_legacy(self.raw_password or '', '/:%@'),
            ]))
        if auth:
            rv = '%s@%s' % (auth, rv)
        return rv
Esempio n. 43
0
def tag(template, name, page=1):
    """
    :param template:
        模板文件,此参数自动传入
    :param name:
        Tag名称,若为非ASCII字符,一般是经过URL编码的
    """
    # 若name为非ASCII字符,传入时一般是经过URL编码的
    # 若name为URL编码,则需要解码为Unicode
    # URL编码判断方法:若已为URL编码, 再次编码会在每个码之前出现`%25`
    _name = to_bytes(name, "utf-8")
    if urllib.quote(_name).count("%25") > 0:
        name = urllib.unquote(_name)

    tag = Tag.query.filter_by(name=name).first_or_404()

    _url = page_url
    _query = Article.query.public().filter(Article.tags.any(id=tag.id))
    pagination = Page(_query, page=page, items_per_page=Article.PER_PAGE, url=_url)

    articles = pagination.items

    _template = template % (tag.template or "tag.html")
    return render_template(_template, tag=tag, pagination=pagination, articles=articles)
Esempio n. 44
0
def test_dispatchermiddleware():
    def null_application(environ, start_response):
        start_response("404 NOT FOUND", [("Content-Type", "text/plain")])
        yield b"NOT FOUND"

    def dummy_application(environ, start_response):
        start_response("200 OK", [("Content-Type", "text/plain")])
        yield to_bytes(environ["SCRIPT_NAME"])

    app = wsgi.DispatcherMiddleware(null_application, {"/test1": dummy_application, "/test2/very": dummy_application})
    tests = {
        "/test1": ("/test1", "/test1/asfd", "/test1/very"),
        "/test2/very": ("/test2/very", "/test2/very/long/path/after/script/name"),
    }
    for name, urls in tests.items():
        for p in urls:
            environ = create_environ(p)
            app_iter, status, headers = run_wsgi_app(app, environ)
            assert status == "200 OK"
            assert b"".join(app_iter).strip() == to_bytes(name)

    app_iter, status, headers = run_wsgi_app(app, create_environ("/missing"))
    assert status == "404 NOT FOUND"
    assert b"".join(app_iter).strip() == b"NOT FOUND"
Esempio n. 45
0
def stream_encode_multipart(values,
                            use_tempfile=True,
                            threshold=1024 * 500,
                            boundary=None,
                            charset='utf-8'):
    """Encode a dict of values (either strings or file descriptors or
    :class:`FileStorage` objects.) into a multipart encoded string stored
    in a file descriptor.
    """
    if boundary is None:
        boundary = '---------------WerkzeugFormPart_%s%s' % (time(), random())
    _closure = [BytesIO(), 0, False]

    if use_tempfile:

        def write_binary(string):
            stream, total_length, on_disk = _closure
            if on_disk:
                stream.write(string)
            else:
                length = len(string)
                if length + _closure[1] <= threshold:
                    stream.write(string)
                else:
                    new_stream = TemporaryFile('wb+')
                    new_stream.write(stream.getvalue())
                    new_stream.write(string)
                    _closure[0] = new_stream
                    _closure[2] = True
                _closure[1] = total_length + length
    else:
        write_binary = _closure[0].write

    def write(string):
        write_binary(string.encode(charset))

    if not isinstance(values, MultiDict):
        values = MultiDict(values)

    for key, values in iterlists(values):
        for value in values:
            write('--%s\r\nContent-Disposition: form-data; name="%s"' %
                  (boundary, key))
            reader = getattr(value, 'read', None)
            if reader is not None:
                filename = getattr(value, 'filename',
                                   getattr(value, 'name', None))
                content_type = getattr(value, 'content_type', None)
                if content_type is None:
                    content_type = filename and \
                        mimetypes.guess_type(filename)[0] or \
                        'application/octet-stream'
                if filename is not None:
                    write('; filename="%s"\r\n' % filename)
                else:
                    write('\r\n')
                write('Content-Type: %s\r\n\r\n' % content_type)
                while 1:
                    chunk = reader(16384)
                    if not chunk:
                        break
                    write_binary(chunk)
            else:
                if not isinstance(value, string_types):
                    value = str(value)
                else:
                    value = to_bytes(value, charset)
                write('\r\n\r\n')
                write_binary(value)
            write('\r\n')
    write('--%s--\r\n' % boundary)

    length = int(_closure[0].tell())
    _closure[0].seek(0)
    return _closure[0], length, boundary
Esempio n. 46
0
def handle_request(application, event, context):

    if u"multiValueHeaders" in event:
        headers = Headers(event["multiValueHeaders"])
    else:
        headers = Headers(event["headers"])

    strip_stage_path = os.environ.get("STRIP_STAGE_PATH",
                                      "").lower().strip() in [
                                          "yes",
                                          "y",
                                          "true",
                                          "t",
                                          "1",
                                      ]
    if u"apigw.tencentcs.com" in headers.get(u"Host",
                                             u"") and not strip_stage_path:
        script_name = "/{}".format(event["requestContext"].get(u"stage", ""))
    else:
        script_name = ""

    path_info = event["path"]
    base_path = os.environ.get("API_GATEWAY_BASE_PATH")
    if base_path:
        script_name = "/" + base_path

        if path_info.startswith(script_name):
            path_info = path_info[len(script_name):] or "/"

    if u"body" in event:
        body = event[u"body"] or ""
    else:
        body = ""

    if event.get("isBase64Encoded", False):
        body = base64.b64decode(body)
    if isinstance(body, string_types):
        body = to_bytes(body, charset="utf-8")

    environ = {
        "CONTENT_LENGTH":
        str(len(body)),
        "CONTENT_TYPE":
        headers.get(u"Content-Type", ""),
        "PATH_INFO":
        url_unquote(path_info),
        "QUERY_STRING":
        encode_query_string(event),
        "REMOTE_ADDR":
        event["requestContext"].get(u"identity", {}).get(u"sourceIp", ""),
        "REMOTE_USER":
        event["requestContext"].get(u"authorizer", {}).get(u"principalId", ""),
        "REQUEST_METHOD":
        event["httpMethod"],
        "SCRIPT_NAME":
        script_name,
        "SERVER_NAME":
        headers.get(u"Host", "lambda"),
        "SERVER_PORT":
        headers.get(u"X-Forwarded-Port", "80"),
        "SERVER_PROTOCOL":
        "HTTP/1.1",
        "wsgi.errors":
        sys.stderr,
        "wsgi.input":
        BytesIO(body),
        "wsgi.multiprocess":
        False,
        "wsgi.multithread":
        False,
        "wsgi.run_once":
        False,
        "wsgi.url_scheme":
        headers.get(u"X-Forwarded-Proto", "http"),
        "wsgi.version": (1, 0),
        "serverless.authorizer":
        event["requestContext"].get(u"authorizer"),
        "serverless.event":
        event,
        "serverless.context":
        context,
        # TODO: Deprecate the following entries, as they do not comply with the WSGI
        # spec. For custom variables, the spec says:
        #
        #   Finally, the environ dictionary may also contain server-defined variables.
        #   These variables should be named using only lower-case letters, numbers, dots,
        #   and underscores, and should be prefixed with a name that is unique to the
        #   defining server or gateway.
        "API_GATEWAY_AUTHORIZER":
        event["requestContext"].get(u"authorizer"),
        "event":
        event,
        "context":
        context,
    }

    for key, value in environ.items():
        if isinstance(value, string_types):
            environ[key] = wsgi_encoding_dance(value)

    for key, value in headers.items():
        key = "HTTP_" + key.upper().replace("-", "_")
        if key not in ("HTTP_CONTENT_TYPE", "HTTP_CONTENT_LENGTH"):
            environ[key] = value

    response = Response.from_app(application, environ)

    returndict = {u"statusCode": response.status_code}

    if u"multiValueHeaders" in event:
        returndict["multiValueHeaders"] = group_headers(response.headers)
    else:
        returndict["headers"] = split_headers(response.headers)

    if event.get("requestContext").get("elb"):
        # If the request comes from ALB we need to add a status description
        returndict["statusDescription"] = u"%d %s" % (
            response.status_code,
            HTTP_STATUS_CODES[response.status_code],
        )

    if response.data:
        mimetype = response.mimetype or "text/plain"
        if (mimetype.startswith("text/")
                or mimetype in TEXT_MIME_TYPES) and not response.headers.get(
                    "Content-Encoding", ""):
            returndict["body"] = response.get_data(as_text=True)
            returndict["isBase64Encoded"] = False
        else:
            returndict["body"] = base64.b64encode(
                response.data).decode("utf-8")
            returndict["isBase64Encoded"] = True

    return returndict
Esempio n. 47
0
def file_write(filename, data):
    with open(filename, 'wb') as f:
        f.write(to_bytes(data, 'utf-8', errors='ignore'))
Esempio n. 48
0
def dump_cookie(key,
                value='',
                max_age=None,
                expires=None,
                path='/',
                domain=None,
                secure=False,
                httponly=False,
                charset='utf-8',
                sync_expires=True,
                max_size=4093,
                samesite=None):
    """Creates a new Set-Cookie header without the ``Set-Cookie`` prefix
    The parameters are the same as in the cookie Morsel object in the
    Python standard library but it accepts unicode data, too.

    On Python 3 the return value of this function will be a unicode
    string, on Python 2 it will be a native string.  In both cases the
    return value is usually restricted to ascii as the vast majority of
    values are properly escaped, but that is no guarantee.  If a unicode
    string is returned it's tunneled through latin1 as required by
    PEP 3333.

    The return value is not ASCII safe if the key contains unicode
    characters.  This is technically against the specification but
    happens in the wild.  It's strongly recommended to not use
    non-ASCII values for the keys.

    :param max_age: should be a number of seconds, or `None` (default) if
                    the cookie should last only as long as the client's
                    browser session.  Additionally `timedelta` objects
                    are accepted, too.
    :param expires: should be a `datetime` object or unix timestamp.
    :param path: limits the cookie to a given path, per default it will
                 span the whole domain.
    :param domain: Use this if you want to set a cross-domain cookie. For
                   example, ``domain=".example.com"`` will set a cookie
                   that is readable by the domain ``www.example.com``,
                   ``foo.example.com`` etc. Otherwise, a cookie will only
                   be readable by the domain that set it.
    :param secure: The cookie will only be available via HTTPS
    :param httponly: disallow JavaScript to access the cookie.  This is an
                     extension to the cookie standard and probably not
                     supported by all browsers.
    :param charset: the encoding for unicode values.
    :param sync_expires: automatically set expires if max_age is defined
                         but expires not.
    :param max_size: Warn if the final header value exceeds this size. The
        default, 4093, should be safely `supported by most browsers
        <cookie_>`_. Set to 0 to disable this check.
    :param samesite: Limits the scope of the cookie such that it will only
                     be attached to requests if those requests are "same-site".

    .. _`cookie`: http://browsercookielimits.squawky.net/
    """
    key = to_bytes(key, charset)
    value = to_bytes(value, charset)

    if path is not None:
        path = iri_to_uri(path, charset)
    domain = _make_cookie_domain(domain)
    if isinstance(max_age, timedelta):
        max_age = (max_age.days * 60 * 60 * 24) + max_age.seconds
    if expires is not None:
        if not isinstance(expires, string_types):
            expires = cookie_date(expires)
    elif max_age is not None and sync_expires:
        expires = to_bytes(cookie_date(time() + max_age))

    samesite = samesite.title() if samesite else None
    if samesite not in ('Strict', 'Lax', None):
        raise ValueError(
            "invalid SameSite value; must be 'Strict', 'Lax' or None")

    buf = [key + b'=' + _cookie_quote(value)]

    # XXX: In theory all of these parameters that are not marked with `None`
    # should be quoted.  Because stdlib did not quote it before I did not
    # want to introduce quoting there now.
    for k, v, q in ((b'Domain', domain, True), (
            b'Expires',
            expires,
            False,
    ), (b'Max-Age', max_age, False), (b'Secure', secure,
                                      None), (b'HttpOnly', httponly, None),
                    (b'Path', path, False), (b'SameSite', samesite, False)):
        if q is None:
            if v:
                buf.append(k)
            continue

        if v is None:
            continue

        tmp = bytearray(k)
        if not isinstance(v, (bytes, bytearray)):
            v = to_bytes(text_type(v), charset)
        if q:
            v = _cookie_quote(v)
        tmp += b'=' + v
        buf.append(bytes(tmp))

    # The return value will be an incorrectly encoded latin1 header on
    # Python 3 for consistency with the headers object and a bytestring
    # on Python 2 because that's how the API makes more sense.
    rv = b'; '.join(buf)
    if not PY2:
        rv = rv.decode('latin1')

    # Warn if the final value of the cookie is less than the limit. If the
    # cookie is too large, then it may be silently ignored, which can be quite
    # hard to debug.
    cookie_size = len(rv)

    if max_size and cookie_size > max_size:
        value_size = len(value)
        warnings.warn(
            'The "{key}" cookie is too large: the value was {value_size} bytes'
            ' but the header required {extra_size} extra bytes. The final size'
            ' was {cookie_size} bytes but the limit is {max_size} bytes.'
            ' Browsers may silently ignore cookies larger than this.'.format(
                key=key,
                value_size=value_size,
                extra_size=cookie_size - value_size,
                cookie_size=cookie_size,
                max_size=max_size),
            stacklevel=2)

    return rv
Esempio n. 49
0
 def inner(environ, start_response):
     if environ['PATH_INFO'] == '/_getpid':
         start_response('200 OK', [('Content-Type', 'text/plain')])
         return [to_bytes(str(os.getpid()))]
     return f(environ, start_response)
Esempio n. 50
0
def dump_cookie(key, value='', max_age=None, expires=None, path='/',
                domain=None, secure=False, httponly=False,
                charset='utf-8', sync_expires=True):
    """Creates a new Set-Cookie header without the ``Set-Cookie`` prefix
    The parameters are the same as in the cookie Morsel object in the
    Python standard library but it accepts unicode data, too.

    On Python 3 the return value of this function will be a unicode
    string, on Python 2 it will be a native string.  In both cases the
    return value is usually restricted to ascii as the vast majority of
    values are properly escaped, but that is no guarantee.  If a unicode
    string is returned it's tunneled through latin1 as required by
    PEP 3333.

    The return value is not ASCII safe if the key contains unicode
    characters.  This is technically against the specification but
    happens in the wild.  It's strongly recommended to not use
    non-ASCII values for the keys.

    :param max_age: should be a number of seconds, or `None` (default) if
                    the cookie should last only as long as the client's
                    browser session.  Additionally `timedelta` objects
                    are accepted, too.
    :param expires: should be a `datetime` object or unix timestamp.
    :param path: limits the cookie to a given path, per default it will
                 span the whole domain.
    :param domain: Use this if you want to set a cross-domain cookie. For
                   example, ``domain=".example.com"`` will set a cookie
                   that is readable by the domain ``www.example.com``,
                   ``foo.example.com`` etc. Otherwise, a cookie will only
                   be readable by the domain that set it.
    :param secure: The cookie will only be available via HTTPS
    :param httponly: disallow JavaScript to access the cookie.  This is an
                     extension to the cookie standard and probably not
                     supported by all browsers.
    :param charset: the encoding for unicode values.
    :param sync_expires: automatically set expires if max_age is defined
                         but expires not.
    """
    key = to_bytes(key, charset)
    value = to_bytes(value, charset)

    if path is not None:
        path = iri_to_uri(path, charset)
    domain = _make_cookie_domain(domain)
    if isinstance(max_age, timedelta):
        max_age = (max_age.days * 60 * 60 * 24) + max_age.seconds
    if expires is not None:
        if not isinstance(expires, string_types):
            expires = cookie_date(expires)
    elif max_age is not None and sync_expires:
        expires = to_bytes(cookie_date(time() + max_age))

    buf = [key + b'=' + _cookie_quote(value)]

    # XXX: In theory all of these parameters that are not marked with `None`
    # should be quoted.  Because stdlib did not quote it before I did not
    # want to introduce quoting there now.
    for k, v, q in ((b'Domain', domain, True),
                    (b'Expires', expires, False,),
                    (b'Max-Age', max_age, False),
                    (b'Secure', secure, None),
                    (b'HttpOnly', httponly, None),
                    (b'Path', path, False)):
        if q is None:
            if v:
                buf.append(k)
            continue

        if v is None:
            continue

        tmp = bytearray(k)
        if not isinstance(v, (bytes, bytearray)):
            v = to_bytes(text_type(v), charset)
        if q:
            v = _cookie_quote(v)
        tmp += b'=' + v
        buf.append(bytes(tmp))

    # The return value will be an incorrectly encoded latin1 header on
    # Python 3 for consistency with the headers object and a bytestring
    # on Python 2 because that's how the API makes more sense.
    rv = b'; '.join(buf)
    if not PY2:
        rv = rv.decode('latin1')
    return rv
Esempio n. 51
0
def dump_cookie(key, value='', max_age=None, expires=None, path='/',
                domain=None, secure=False, httponly=False,
                charset='utf-8', sync_expires=True):
    """Creates a new Set-Cookie header without the ``Set-Cookie`` prefix
    The parameters are the same as in the cookie Morsel object in the
    Python standard library but it accepts unicode data, too.

    On Python 3 the return value of this function will be a unicode
    string, on Python 2 it will be a native string.  In both cases the
    return value is usually restricted to ascii as the vast majority of
    values are properly escaped, but that is no guarantee.  If a unicode
    string is returned it's tunneled through latin1 as required by
    PEP 3333.

    The return value is not ASCII safe if the key contains unicode
    characters.  This is technically against the specification but
    happens in the wild.  It's strongly recommended to not use
    non-ASCII values for the keys.

    :param max_age: should be a number of seconds, or `None` (default) if
                    the cookie should last only as long as the client's
                    browser session.  Additionally `timedelta` objects
                    are accepted, too.
    :param expires: should be a `datetime` object or unix timestamp.
    :param path: limits the cookie to a given path, per default it will
                 span the whole domain.
    :param domain: Use this if you want to set a cross-domain cookie. For
                   example, ``domain=".example.com"`` will set a cookie
                   that is readable by the domain ``www.example.com``,
                   ``foo.example.com`` etc. Otherwise, a cookie will only
                   be readable by the domain that set it.
    :param secure: The cookie will only be available via HTTPS
    :param httponly: disallow JavaScript to access the cookie.  This is an
                     extension to the cookie standard and probably not
                     supported by all browsers.
    :param charset: the encoding for unicode values.
    :param sync_expires: automatically set expires if max_age is defined
                         but expires not.
    """
    key = to_bytes(key, charset)
    value = to_bytes(value, charset)

    if path is not None:
        path = iri_to_uri(path, charset)
    domain = _make_cookie_domain(domain)
    if isinstance(max_age, timedelta):
        max_age = (max_age.days * 60 * 60 * 24) + max_age.seconds
    if expires is not None:
        if not isinstance(expires, string_types):
            expires = cookie_date(expires)
    elif max_age is not None and sync_expires:
        expires = to_bytes(cookie_date(time() + max_age))

    buf = [key + b'=' + _cookie_quote(value)]

    # XXX: In theory all of these parameters that are not marked with `None`
    # should be quoted.  Because stdlib did not quote it before I did not
    # want to introduce quoting there now.
    for k, v, q in ((b'Domain', domain, True),
                    (b'Expires', expires, False,),
                    (b'Max-Age', max_age, False),
                    (b'Secure', secure, None),
                    (b'HttpOnly', httponly, None),
                    (b'Path', path, False)):
        if q is None:
            if v:
                buf.append(k)
            continue

        if v is None:
            continue

        tmp = bytearray(k)
        if not isinstance(v, (bytes, bytearray)):
            v = to_bytes(text_type(v), charset)
        if q:
            v = _cookie_quote(v)
        tmp += b'=' + v
        buf.append(bytes(tmp))

    # The return value will be an incorrectly encoded latin1 header on
    # Python 3 for consistency with the headers object and a bytestring
    # on Python 2 because that's how the API makes more sense.
    rv = b'; '.join(buf)
    if not PY2:
        rv = rv.decode('latin1')
    return rv
Esempio n. 52
0
def make_chunk_iter(stream, separator, limit=None, buffer_size=10 * 1024,
                    cap_at_buffer=False):
    """Works like :func:`make_line_iter` but accepts a separator
    which divides chunks.  If you want newline based processing
    you should use :func:`make_line_iter` instead as it
    supports arbitrary newline markers.

    .. versionadded:: 0.8

    .. versionadded:: 0.9
       added support for iterators as input stream.

    .. versionadded:: 0.11.10
       added support for the `cap_at_buffer` parameter.

    :param stream: the stream or iterate to iterate over.
    :param separator: the separator that divides chunks.
    :param limit: the limit in bytes for the stream.  (Usually
                  content length.  Not necessary if the `stream`
                  is otherwise already limited).
    :param buffer_size: The optional buffer size.
    :param cap_at_buffer: if this is set chunks are split if they are longer
                          than the buffer size.  Internally this is implemented
                          that the buffer size might be exhausted by a factor
                          of two however.
    """
    _iter = _make_chunk_iter(stream, limit, buffer_size)

    first_item = next(_iter, '')
    if not first_item:
        return

    _iter = chain((first_item,), _iter)
    if isinstance(first_item, text_type):
        separator = to_unicode(separator)
        _split = re.compile(r'(%s)' % re.escape(separator)).split
        _join = u''.join
    else:
        separator = to_bytes(separator)
        _split = re.compile(b'(' + re.escape(separator) + b')').split
        _join = b''.join

    buffer = []
    while 1:
        new_data = next(_iter, '')
        if not new_data:
            break
        chunks = _split(new_data)
        new_buf = []
        buf_size = 0
        for item in chain(buffer, chunks):
            if item == separator:
                yield _join(new_buf)
                new_buf = []
                buf_size = 0
            else:
                buf_size += len(item)
                new_buf.append(item)

                if cap_at_buffer and buf_size >= buffer_size:
                    rv = _join(new_buf)
                    while len(rv) >= buffer_size:
                        yield rv[:buffer_size]
                        rv = rv[buffer_size:]
                    new_buf = [rv]
                    buf_size = len(rv)

        buffer = new_buf
    if buffer:
        yield _join(buffer)
Esempio n. 53
0
 def inner(environ, start_response):
     if environ["PATH_INFO"] == "/_getpid":
         start_response("200 OK", [("Content-Type", "text/plain")])
         pid_logger.info("pid=%s", os.getpid())
         return [to_bytes(str(os.getpid()))]
     return f(environ, start_response)
Esempio n. 54
0
 def dummy_application(environ, start_response):
     start_response("200 OK", [("Content-Type", "text/plain")])
     yield to_bytes(environ["SCRIPT_NAME"])
Esempio n. 55
0
def stream_encode_multipart(values, use_tempfile=True, threshold=1024 * 500,
                            boundary=None, charset='utf-8'):
    """Encode a dict of values (either strings or file descriptors or
    :class:`FileStorage` objects.) into a multipart encoded string stored
    in a file descriptor.
    """
    if boundary is None:
        boundary = '---------------WerkzeugFormPart_%s%s' % (time(), random())
    _closure = [BytesIO(), 0, False]

    if use_tempfile:
        def write_binary(string):
            stream, total_length, on_disk = _closure
            if on_disk:
                stream.write(string)
            else:
                length = len(string)
                if length + _closure[1] <= threshold:
                    stream.write(string)
                else:
                    new_stream = TemporaryFile('wb+')
                    new_stream.write(stream.getvalue())
                    new_stream.write(string)
                    _closure[0] = new_stream
                    _closure[2] = True
                _closure[1] = total_length + length
    else:
        write_binary = _closure[0].write

    def write(string):
        write_binary(string.encode(charset))

    if not isinstance(values, MultiDict):
        values = MultiDict(values)

    for key, values in iterlists(values):
        for value in values:
            write('--%s\r\nContent-Disposition: form-data; name="%s"' %
                  (boundary, key))
            reader = getattr(value, 'read', None)
            if reader is not None:
                filename = getattr(value, 'filename',
                                   getattr(value, 'name', None))
                content_type = getattr(value, 'content_type', None)
                if content_type is None:
                    content_type = filename and \
                        mimetypes.guess_type(filename)[0] or \
                        'application/octet-stream'
                if filename is not None:
                    write('; filename="%s"\r\n' % filename)
                else:
                    write('\r\n')
                write('Content-Type: %s\r\n\r\n' % content_type)
                while 1:
                    chunk = reader(16384)
                    if not chunk:
                        break
                    write_binary(chunk)
            else:
                if not isinstance(value, string_types):
                    value = str(value)
                else:
                    value = to_bytes(value, charset)
                write('\r\n\r\n')
                write_binary(value)
            write('\r\n')
    write('--%s--\r\n' % boundary)

    length = int(_closure[0].tell())
    _closure[0].seek(0)
    return _closure[0], length, boundary
Esempio n. 56
0
 def dummy_application(environ, start_response):
     start_response('200 OK', [('Content-Type', 'text/plain')])
     yield to_bytes(environ['SCRIPT_NAME'])
def handle_request(app, event, context):
    if event.get("source") in ["aws.events", "serverless-plugin-warmup"]:
        return {}

    if u"multiValueHeaders" in event:
        headers = Headers(event[u"multiValueHeaders"])
    else:
        headers = Headers(event[u"headers"])

    if u"amazonaws.com" in headers.get(u"Host", u""):
        script_name = "/{}".format(event[u"requestContext"].get(u"stage", ""))
    else:
        script_name = ""

    # If a user is using a custom domain on API Gateway, they may have a base
    # path in their URL. This allows us to strip it out via an optional
    # environment variable.
    path_info = event[u"path"]
    base_path = os.environ.get("API_GATEWAY_BASE_PATH", "")
    if base_path:
        script_name = "/" + base_path

        if path_info.startswith(script_name):
            path_info = path_info[len(script_name):]

    body = event[u"body"] or ""
    if event.get("isBase64Encoded", False):
        body = base64.b64decode(body)
    if isinstance(body, string_types):
        body = to_bytes(body, charset="utf-8")

    environ = {
        "CONTENT_LENGTH":
        str(len(body)),
        "CONTENT_TYPE":
        headers.get(u"Content-Type", ""),
        "PATH_INFO":
        path_info,
        "QUERY_STRING":
        encode_query_string(event),
        "REMOTE_ADDR":
        event[u"requestContext"].get(u"identity", {}).get(u"sourceIp", ""),
        "REMOTE_USER":
        event[u"requestContext"].get(u"authorizer",
                                     {}).get(u"principalId", ""),
        "REQUEST_METHOD":
        event[u"httpMethod"],
        "SCRIPT_NAME":
        script_name,
        "SERVER_NAME":
        headers.get(u"Host", "lambda"),
        "SERVER_PORT":
        headers.get(u"X-Forwarded-Port", "80"),
        "SERVER_PROTOCOL":
        "HTTP/1.1",
        "wsgi.errors":
        sys.stderr,
        "wsgi.input":
        BytesIO(body),
        "wsgi.multiprocess":
        False,
        "wsgi.multithread":
        False,
        "wsgi.run_once":
        False,
        "wsgi.url_scheme":
        headers.get(u"X-Forwarded-Proto", "http"),
        "wsgi.version": (1, 0),
        "API_GATEWAY_AUTHORIZER":
        event[u"requestContext"].get(u"authorizer"),
        "event":
        event,
        "context":
        context,
    }

    for key, value in environ.items():
        if isinstance(value, string_types):
            environ[key] = wsgi_encoding_dance(value)

    for key, value in headers.items():
        key = "HTTP_" + key.upper().replace("-", "_")
        if key not in ("HTTP_CONTENT_TYPE", "HTTP_CONTENT_LENGTH"):
            environ[key] = value

    response = Response.from_app(app, environ)

    returndict = {u"statusCode": response.status_code}

    if u"multiValueHeaders" in event:
        returndict[u"multiValueHeaders"] = group_headers(response.headers)
    else:
        returndict[u"headers"] = split_headers(response.headers)

    if event.get("requestContext").get("elb"):
        # If the request comes from ALB we need to add a status description
        returndict["statusDescription"] = u"%d %s" % (
            response.status_code,
            HTTP_STATUS_CODES[response.status_code],
        )

    if response.data:
        mimetype = response.mimetype or "text/plain"
        if (mimetype.startswith("text/")
                or mimetype in TEXT_MIME_TYPES) and not response.headers.get(
                    "Content-Encoding", ""):
            returndict["body"] = response.get_data(as_text=True)
            returndict["isBase64Encoded"] = False
        else:
            returndict["body"] = base64.b64encode(
                response.data).decode("utf-8")
            returndict["isBase64Encoded"] = True

    return returndict
Esempio n. 58
0
def dump_cookie(key, value='', max_age=None, expires=None, path='/',
                domain=None, secure=False, httponly=False,
                charset='utf-8', sync_expires=True, max_size=4093,
                samesite=None):
    """Creates a new Set-Cookie header without the ``Set-Cookie`` prefix
    The parameters are the same as in the cookie Morsel object in the
    Python standard library but it accepts unicode data, too.

    On Python 3 the return value of this function will be a unicode
    string, on Python 2 it will be a native string.  In both cases the
    return value is usually restricted to ascii as the vast majority of
    values are properly escaped, but that is no guarantee.  If a unicode
    string is returned it's tunneled through latin1 as required by
    PEP 3333.

    The return value is not ASCII safe if the key contains unicode
    characters.  This is technically against the specification but
    happens in the wild.  It's strongly recommended to not use
    non-ASCII values for the keys.

    :param max_age: should be a number of seconds, or `None` (default) if
                    the cookie should last only as long as the client's
                    browser session.  Additionally `timedelta` objects
                    are accepted, too.
    :param expires: should be a `datetime` object or unix timestamp.
    :param path: limits the cookie to a given path, per default it will
                 span the whole domain.
    :param domain: Use this if you want to set a cross-domain cookie. For
                   example, ``domain=".example.com"`` will set a cookie
                   that is readable by the domain ``www.example.com``,
                   ``foo.example.com`` etc. Otherwise, a cookie will only
                   be readable by the domain that set it.
    :param secure: The cookie will only be available via HTTPS
    :param httponly: disallow JavaScript to access the cookie.  This is an
                     extension to the cookie standard and probably not
                     supported by all browsers.
    :param charset: the encoding for unicode values.
    :param sync_expires: automatically set expires if max_age is defined
                         but expires not.
    :param max_size: Warn if the final header value exceeds this size. The
        default, 4093, should be safely `supported by most browsers
        <cookie_>`_. Set to 0 to disable this check.
    :param samesite: Limits the scope of the cookie such that it will only
                     be attached to requests if those requests are "same-site".

    .. _`cookie`: http://browsercookielimits.squawky.net/
    """
    key = to_bytes(key, charset)
    value = to_bytes(value, charset)

    if path is not None:
        path = iri_to_uri(path, charset)
    domain = _make_cookie_domain(domain)
    if isinstance(max_age, timedelta):
        max_age = (max_age.days * 60 * 60 * 24) + max_age.seconds
    if expires is not None:
        if not isinstance(expires, string_types):
            expires = cookie_date(expires)
    elif max_age is not None and sync_expires:
        expires = to_bytes(cookie_date(time() + max_age))

    samesite = samesite.title() if samesite else None
    if samesite not in ('Strict', 'Lax', None):
        raise ValueError("invalid SameSite value; must be 'Strict', 'Lax' or None")

    buf = [key + b'=' + _cookie_quote(value)]

    # XXX: In theory all of these parameters that are not marked with `None`
    # should be quoted.  Because stdlib did not quote it before I did not
    # want to introduce quoting there now.
    for k, v, q in ((b'Domain', domain, True),
                    (b'Expires', expires, False,),
                    (b'Max-Age', max_age, False),
                    (b'Secure', secure, None),
                    (b'HttpOnly', httponly, None),
                    (b'Path', path, False),
                    (b'SameSite', samesite, False)):
        if q is None:
            if v:
                buf.append(k)
            continue

        if v is None:
            continue

        tmp = bytearray(k)
        if not isinstance(v, (bytes, bytearray)):
            v = to_bytes(text_type(v), charset)
        if q:
            v = _cookie_quote(v)
        tmp += b'=' + v
        buf.append(bytes(tmp))

    # The return value will be an incorrectly encoded latin1 header on
    # Python 3 for consistency with the headers object and a bytestring
    # on Python 2 because that's how the API makes more sense.
    rv = b'; '.join(buf)
    if not PY2:
        rv = rv.decode('latin1')

    # Warn if the final value of the cookie is less than the limit. If the
    # cookie is too large, then it may be silently ignored, which can be quite
    # hard to debug.
    cookie_size = len(rv)

    if max_size and cookie_size > max_size:
        value_size = len(value)
        warnings.warn(
            'The "{key}" cookie is too large: the value was {value_size} bytes'
            ' but the header required {extra_size} extra bytes. The final size'
            ' was {cookie_size} bytes but the limit is {max_size} bytes.'
            ' Browsers may silently ignore cookies larger than this.'.format(
                key=key,
                value_size=value_size,
                extra_size=cookie_size - value_size,
                cookie_size=cookie_size,
                max_size=max_size
            ),
            stacklevel=2
        )

    return rv