Ejemplo n.º 1
0
 def __call__(self, environ, start_response):
     stream = LimitedStream(environ['wsgi.input'], int(environ.get('CONTENT_LENGTH') or 0))
     environ['wsgi.input'] = stream
     app_iter = self.app(environ, start_response)
     try:
         stream.exhaust()
         for event in app_iter:
             yield event
     finally:
         if hasattr(app_iter, 'close'):
             app_iter.close()
Ejemplo n.º 2
0
 def __call__(self, environ, start_response):
     stream = LimitedStream(environ['wsgi.input'], 512 * 1024 * 1024)
     environ['wsgi.input'] = stream
     app_iter = self.app(environ, start_response)
     try:
         stream.exhaust()
         for event in app_iter:
             yield event
     finally:
         if hasattr(app_iter, 'close'):
             app_iter.close()
Ejemplo n.º 3
0
 def __call__(self, environ, start_response):
     stream = LimitedStream(environ["wsgi.input"], int(environ["CONTENT_LENGTH"] or 0))
     environ["wsgi.input"] = stream
     app_iter = self.app(environ, start_response)
     try:
         stream.exhaust()
         for event in app_iter:
             yield event
     finally:
         if hasattr(app_iter, "close"):
             app_iter.close()
Ejemplo n.º 4
0
 def __call__(self, environ, start_response):
     stream = LimitedStream(environ['wsgi.input'],
                            int(environ.get('CONTENT_LENGTH', 0) or 0))
     environ['wsgi.input'] = stream
     app_iter = self.app(environ, start_response)
     try:
         stream.exhaust()
         for event in app_iter:
             yield event
     finally:
         if hasattr(app_iter, 'close'):
             app_iter.close()
Ejemplo n.º 5
0
    def __call__(self, environ, start_response):
        content_length = environ.get('CONTENT_LENGTH',0)
        content_length = 0 if content_length is '' else content_length

        stream = LimitedStream(environ.get('wsgi.input'),
                               int(content_length))
        environ['wsgi.input'] = stream
        app_iter = self.app(environ, start_response)
        try:
            stream.exhaust()
            for event in app_iter:
                yield event
        finally:
            if hasattr(app_iter, 'close'):
                app_iter.close()
Ejemplo n.º 6
0
    def __call__(self, environ, start_response):
        length = 0
        if 'CONTENT_LENGTH' in environ:
            try:
                length = int(environ['CONTENT_LENGTH'])
            except ValueError:
                pass
        stream = LimitedStream(environ['wsgi.input'], length)

        environ['wsgi.input'] = stream
        app_iter = self.app(environ, start_response)
        try:
            stream.exhaust()
            for event in app_iter:
                yield event
        finally:
            if hasattr(app_iter, 'close'):
                app_iter.close()
Ejemplo n.º 7
0
def parse_multipart(
    file,
    boundary,
    content_length,
    stream_factory=None,
    charset="utf-8",
    errors="ignore",
    buffer_size=10 * 1024,
    max_form_memory_size=None,
):
    """Parse a multipart/form-data stream.  This is invoked by
    :func:`utils.parse_form_data` if the content type matches.  Currently it
    exists for internal usage only, but could be exposed as separate
    function if it turns out to be useful and if we consider the API stable.
    """
    # XXX: this function does not support multipart/mixed.  I don't know of
    #      any browser that supports this, but it should be implemented
    #      nonetheless.

    # make sure the buffer size is divisible by four so that we can base64
    # decode chunk by chunk
    assert buffer_size % 4 == 0, "buffer size has to be divisible by 4"
    # also the buffer size has to be at least 1024 bytes long or long headers
    # will freak out the system
    assert buffer_size >= 1024, "buffer size has to be at least 1KB"

    if stream_factory is None:
        stream_factory = default_stream_factory

    if not boundary:
        raise ValueError("Missing boundary")
    if not is_valid_multipart_boundary(boundary):
        raise ValueError("Invalid boundary: %s" % boundary)
    if len(boundary) > buffer_size:  # pragma: no cover
        # this should never happen because we check for a minimum size
        # of 1024 and boundaries may not be longer than 200.  The only
        # situation when this happen is for non debug builds where
        # the assert i skipped.
        raise ValueError("Boundary longer than buffer size")

    total_content_length = content_length
    next_part = "--" + boundary
    last_part = next_part + "--"

    form = []
    files = []
    in_memory = 0

    # convert the file into a limited stream with iteration capabilities
    file = LimitedStream(file, content_length)
    iterator = chain(make_line_iter(file, buffer_size=buffer_size), _empty_string_iter)

    try:
        terminator = _find_terminator(iterator)
        if terminator != next_part:
            raise ValueError("Expected boundary at start of multipart data")

        while terminator != last_part:
            headers = parse_multipart_headers(iterator)
            disposition = headers.get("content-disposition")
            if disposition is None:
                raise ValueError("Missing Content-Disposition header")
            disposition, extra = parse_options_header(disposition)
            name = extra.get("name")

            transfer_encoding = headers.get("content-transfer-encoding")
            try_decode = transfer_encoding is not None and transfer_encoding in _supported_multipart_encodings

            filename = extra.get("filename")

            # if no content type is given we stream into memory.  A list is
            # used as a temporary container.
            if filename is None:
                is_file = False
                container = []
                _write = container.append
                guard_memory = max_form_memory_size is not None

            # otherwise we parse the rest of the headers and ask the stream
            # factory for something we can write in.
            else:
                content_type = headers.get("content-type")
                content_type = parse_options_header(content_type)[0] or "text/plain"
                is_file = True
                guard_memory = False
                if filename is not None:
                    filename = _fix_ie_filename(_decode_unicode(filename, charset, errors))
                try:
                    content_length = int(headers["content-length"])
                except (KeyError, ValueError):
                    content_length = 0
                container = stream_factory(total_content_length, content_type, filename, content_length)
                _write = container.write

            buf = ""
            for line in iterator:
                if not line:
                    raise ValueError("unexpected end of stream")

                if line[:2] == "--":
                    terminator = line.rstrip()
                    if terminator in (next_part, last_part):
                        break

                if try_decode:
                    try:
                        line = line.decode(transfer_encoding)
                    except:
                        raise ValueError("could not decode transfer " "encoded chunk")

                # we have something in the buffer from the last iteration.
                # this is usually a newline delimiter.
                if buf:
                    _write(buf)
                    buf = ""

                # If the line ends with windows CRLF we write everything except
                # the last two bytes.  In all other cases however we write
                # everything except the last byte.  If it was a newline, that's
                # fine, otherwise it does not matter because we will write it
                # the next iteration.  this ensures we do not write the
                # final newline into the stream.  That way we do not have to
                # truncate the stream.
                if line[-2:] == "\r\n":
                    buf = "\r\n"
                    cutoff = -2
                else:
                    buf = line[-1]
                    cutoff = -1
                _write(line[:cutoff])

                # if we write into memory and there is a memory size limit we
                # count the number of bytes in memory and raise an exception if
                # there is too much data in memory.
                if guard_memory:
                    in_memory += len(line)
                    if in_memory > max_form_memory_size:
                        from werkzeug.exceptions import RequestEntityTooLarge

                        raise RequestEntityTooLarge()
            else:  # pragma: no cover
                raise ValueError("unexpected end of part")

            if is_file:
                container.seek(0)
                files.append((name, FileStorage(container, filename, name, content_type, content_length, headers)))
            else:
                form.append((name, _decode_unicode("".join(container), charset, errors)))
    finally:
        # make sure the whole input stream is read
        file.exhaust()

    return form, files
Ejemplo n.º 8
0
def parse_multipart(file,
                    boundary,
                    content_length,
                    stream_factory=None,
                    charset='utf-8',
                    errors='ignore',
                    buffer_size=10240,
                    max_form_memory_size=None):
    if stream_factory is None:
        stream_factory = default_stream_factory
    if not boundary:
        raise ValueError('Missing boundary')
    if not is_valid_multipart_boundary(boundary):
        raise ValueError('Invalid boundary: %s' % boundary)
    if len(boundary) > buffer_size:
        raise ValueError('Boundary longer than buffer size')
    total_content_length = content_length
    next_part = '--' + boundary
    last_part = next_part + '--'
    form = []
    files = []
    in_memory = 0
    file = LimitedStream(file, content_length)
    iterator = chain(make_line_iter(file, buffer_size=buffer_size),
                     _empty_string_iter)
    try:
        terminator = _find_terminator(iterator)
        if terminator != next_part:
            raise ValueError('Expected boundary at start of multipart data')
        while terminator != last_part:
            headers = parse_multipart_headers(iterator)
            disposition = headers.get('content-disposition')
            if disposition is None:
                raise ValueError('Missing Content-Disposition header')
            disposition, extra = parse_options_header(disposition)
            name = extra.get('name')
            transfer_encoding = headers.get('content-transfer-encoding')
            try_decode = transfer_encoding is not None and transfer_encoding in _supported_multipart_encodings
            filename = extra.get('filename')
            if filename is None:
                is_file = False
                container = []
                _write = container.append
                guard_memory = max_form_memory_size is not None
            else:
                content_type = headers.get('content-type')
                content_type = parse_options_header(
                    content_type)[0] or 'text/plain'
                is_file = True
                guard_memory = False
                if filename is not None:
                    filename = _fix_ie_filename(
                        _decode_unicode(filename, charset, errors))
                try:
                    content_length = int(headers['content-length'])
                except (KeyError, ValueError):
                    content_length = 0

                container = stream_factory(total_content_length, content_type,
                                           filename, content_length)
                _write = container.write
            buf = ''
            for line in iterator:
                if not line:
                    raise ValueError('unexpected end of stream')
                if line[:2] == '--':
                    terminator = line.rstrip()
                    if terminator in (next_part, last_part):
                        break
                if try_decode:
                    try:
                        line = line.decode(transfer_encoding)
                    except:
                        raise ValueError(
                            'could not decode transfer encoded chunk')

                if buf:
                    _write(buf)
                    buf = ''
                if line[-2:] == '\r\n':
                    buf = '\r\n'
                    cutoff = -2
                else:
                    buf = line[-1]
                    cutoff = -1
                _write(line[:cutoff])
                if guard_memory:
                    in_memory += len(line)
                    if in_memory > max_form_memory_size:
                        from werkzeug.exceptions import RequestEntityTooLarge
                        raise RequestEntityTooLarge()
            else:
                raise ValueError('unexpected end of part')

            if is_file:
                container.seek(0)
                files.append(
                    (name,
                     FileStorage(container, filename, name, content_type,
                                 content_length, headers)))
            else:
                form.append(
                    (name, _decode_unicode(''.join(container), charset,
                                           errors)))

    finally:
        file.exhaust()

    return (form, files)
Ejemplo n.º 9
0
def parse_multipart(file, boundary, content_length, stream_factory = None, charset = 'utf-8', errors = 'ignore', buffer_size = 10240, max_form_memory_size = None):
    if stream_factory is None:
        stream_factory = default_stream_factory
    if not boundary:
        raise ValueError('Missing boundary')
    if not is_valid_multipart_boundary(boundary):
        raise ValueError('Invalid boundary: %s' % boundary)
    if len(boundary) > buffer_size:
        raise ValueError('Boundary longer than buffer size')
    total_content_length = content_length
    next_part = '--' + boundary
    last_part = next_part + '--'
    form = []
    files = []
    in_memory = 0
    file = LimitedStream(file, content_length)
    iterator = chain(make_line_iter(file, buffer_size=buffer_size), _empty_string_iter)
    try:
        terminator = _find_terminator(iterator)
        if terminator != next_part:
            raise ValueError('Expected boundary at start of multipart data')
        while terminator != last_part:
            headers = parse_multipart_headers(iterator)
            disposition = headers.get('content-disposition')
            if disposition is None:
                raise ValueError('Missing Content-Disposition header')
            disposition, extra = parse_options_header(disposition)
            name = extra.get('name')
            transfer_encoding = headers.get('content-transfer-encoding')
            try_decode = transfer_encoding is not None and transfer_encoding in _supported_multipart_encodings
            filename = extra.get('filename')
            if filename is None:
                is_file = False
                container = []
                _write = container.append
                guard_memory = max_form_memory_size is not None
            else:
                content_type = headers.get('content-type')
                content_type = parse_options_header(content_type)[0] or 'text/plain'
                is_file = True
                guard_memory = False
                if filename is not None:
                    filename = _fix_ie_filename(_decode_unicode(filename, charset, errors))
                try:
                    content_length = int(headers['content-length'])
                except (KeyError, ValueError):
                    content_length = 0

                container = stream_factory(total_content_length, content_type, filename, content_length)
                _write = container.write
            buf = ''
            for line in iterator:
                if not line:
                    raise ValueError('unexpected end of stream')
                if line[:2] == '--':
                    terminator = line.rstrip()
                    if terminator in (next_part, last_part):
                        break
                if try_decode:
                    try:
                        line = line.decode(transfer_encoding)
                    except:
                        raise ValueError('could not decode transfer encoded chunk')

                if buf:
                    _write(buf)
                    buf = ''
                if line[-2:] == '\r\n':
                    buf = '\r\n'
                    cutoff = -2
                else:
                    buf = line[-1]
                    cutoff = -1
                _write(line[:cutoff])
                if guard_memory:
                    in_memory += len(line)
                    if in_memory > max_form_memory_size:
                        from werkzeug.exceptions import RequestEntityTooLarge
                        raise RequestEntityTooLarge()
            else:
                raise ValueError('unexpected end of part')

            if is_file:
                container.seek(0)
                files.append((name, FileStorage(container, filename, name, content_type, content_length, headers)))
            else:
                form.append((name, _decode_unicode(''.join(container), charset, errors)))

    finally:
        file.exhaust()

    return (form, files)
Ejemplo n.º 10
0
def parse_multipart(file,
                    boundary,
                    content_length,
                    stream_factory=None,
                    charset='utf-8',
                    errors='ignore',
                    buffer_size=10 * 1024,
                    max_form_memory_size=None):
    """Parse a multipart/form-data stream.  This is invoked by
    :func:`utils.parse_form_data` if the content type matches.  Currently it
    exists for internal usage only, but could be exposed as separate
    function if it turns out to be useful and if we consider the API stable.
    """
    # XXX: this function does not support multipart/mixed.  I don't know of
    #      any browser that supports this, but it should be implemented
    #      nonetheless.

    # make sure the buffer size is divisible by four so that we can base64
    # decode chunk by chunk
    assert buffer_size % 4 == 0, 'buffer size has to be divisible by 4'
    # also the buffer size has to be at least 1024 bytes long or long headers
    # will freak out the system
    assert buffer_size >= 1024, 'buffer size has to be at least 1KB'

    if stream_factory is None:
        stream_factory = default_stream_factory

    if not boundary:
        raise ValueError('Missing boundary')
    if not is_valid_multipart_boundary(boundary):
        raise ValueError('Invalid boundary: %s' % boundary)
    if len(boundary) > buffer_size:
        raise ValueError('Boundary longer than buffer size')

    total_content_length = content_length
    next_part = '--' + boundary
    last_part = next_part + '--'

    form = []
    files = []
    in_memory = 0

    # convert the file into a limited stream with iteration capabilities
    file = LimitedStream(file, content_length)
    iterator = chain(make_line_iter(file, buffer_size=buffer_size),
                     _empty_string_iter)

    try:
        terminator = _find_terminator(iterator)
        if terminator != next_part:
            raise ValueError('Expected boundary at start of multipart data')

        while terminator != last_part:
            headers = parse_multipart_headers(iterator)
            disposition = headers.get('content-disposition')
            if disposition is None:
                raise ValueError('Missing Content-Disposition header')
            disposition, extra = parse_options_header(disposition)
            name = extra.get('name')

            transfer_encoding = headers.get('content-transfer-encoding')
            try_decode = transfer_encoding is not None and \
                         transfer_encoding in _supported_multipart_encodings

            filename = extra.get('filename')

            # if no content type is given we stream into memory.  A list is
            # used as a temporary container.
            if filename is None:
                is_file = False
                container = []
                _write = container.append
                guard_memory = max_form_memory_size is not None

            # otherwise we parse the rest of the headers and ask the stream
            # factory for something we can write in.
            else:
                content_type = headers.get('content-type')
                content_type = parse_options_header(content_type)[0] \
                    or 'text/plain'
                is_file = True
                guard_memory = False
                if filename is not None:
                    filename = _fix_ie_filename(
                        _decode_unicode(filename, charset, errors))
                try:
                    content_length = int(headers['content-length'])
                except (KeyError, ValueError):
                    content_length = 0
                container = stream_factory(total_content_length, content_type,
                                           filename, content_length)
                _write = container.write

            buf = ''
            for line in iterator:
                if not line:
                    raise ValueError('unexpected end of stream')

                if line[:2] == '--':
                    terminator = line.rstrip()
                    if terminator in (next_part, last_part):
                        break

                if try_decode:
                    try:
                        line = line.decode(transfer_encoding)
                    except:
                        raise ValueError('could not decode transfer '
                                         'encoded chunk')

                # we have something in the buffer from the last iteration.
                # this is usually a newline delimiter.
                if buf:
                    _write(buf)
                    buf = ''

                # If the line ends with windows CRLF we write everything except
                # the last two bytes.  In all other cases however we write
                # everything except the last byte.  If it was a newline, that's
                # fine, otherwise it does not matter because we will write it
                # the next iteration.  this ensures we do not write the
                # final newline into the stream.  That way we do not have to
                # truncate the stream.
                if line[-2:] == '\r\n':
                    buf = '\r\n'
                    cutoff = -2
                else:
                    buf = line[-1]
                    cutoff = -1
                _write(line[:cutoff])

                # if we write into memory and there is a memory size limit we
                # count the number of bytes in memory and raise an exception if
                # there is too much data in memory.
                if guard_memory:
                    in_memory += len(line)
                    if in_memory > max_form_memory_size:
                        from werkzeug.exceptions import RequestEntityTooLarge
                        raise RequestEntityTooLarge()
            else:
                raise ValueError('unexpected end of part')

            if is_file:
                container.seek(0)
                files.append(
                    (name,
                     FileStorage(container, filename, name, content_type,
                                 content_length, headers)))
            else:
                form.append(
                    (name, _decode_unicode(''.join(container), charset,
                                           errors)))
    finally:
        # make sure the whole input stream is read
        file.exhaust()

    return form, files