Example #1
0
def test_multi_part_line_breaks():
    test_stream = StringIO('abcdef\r\nghijkl\r\nmnopqrstuvwxyz\r\nABCDEFGHIJK')
    lines = list(make_line_iter(test_stream, limit=1024, buffer_size=16))
    assert lines == ['abcdef\r\n', 'ghijkl\r\n', 'mnopqrstuvwxyz\r\n', 'ABCDEFGHIJK']

    test_stream = StringIO('abc\r\nThis line is broken by the buffer length.\r\nFoo bar baz')
    lines = list(make_line_iter(test_stream, limit=1024, buffer_size=24))
    assert lines == ['abc\r\n', 'This line is broken by the buffer length.\r\n', 'Foo bar baz']
Example #2
0
    def test_multi_part_line_breaks(self):
        data = 'abcdef\r\nghijkl\r\nmnopqrstuvwxyz\r\nABCDEFGHIJK'
        test_stream = StringIO(data)
        lines = list(wsgi.make_line_iter(test_stream, limit=len(data), buffer_size=16))
        self.assert_equal(lines, ['abcdef\r\n', 'ghijkl\r\n', 'mnopqrstuvwxyz\r\n', 'ABCDEFGHIJK'])

        data = 'abc\r\nThis line is broken by the buffer length.\r\nFoo bar baz'
        test_stream = StringIO(data)
        lines = list(wsgi.make_line_iter(test_stream, limit=len(data), buffer_size=24))
        self.assert_equal(lines, ['abc\r\n', 'This line is broken by the buffer length.\r\n', 'Foo bar baz'])
Example #3
0
    def test_multi_part_line_breaks_bytes(self):
        data = b"abcdef\r\nghijkl\r\nmnopqrstuvwxyz\r\nABCDEFGHIJK"
        test_stream = BytesIO(data)
        lines = list(wsgi.make_line_iter(test_stream, limit=len(data), buffer_size=16))
        self.assert_equal(lines, [b"abcdef\r\n", b"ghijkl\r\n", b"mnopqrstuvwxyz\r\n", b"ABCDEFGHIJK"])

        data = b"abc\r\nThis line is broken by the buffer length." b"\r\nFoo bar baz"
        test_stream = BytesIO(data)
        lines = list(wsgi.make_line_iter(test_stream, limit=len(data), buffer_size=24))
        self.assert_equal(lines, [b"abc\r\n", b"This line is broken by the " b"buffer length.\r\n", b"Foo bar baz"])
Example #4
0
    def test_multi_part_line_breaks(self):
        data = 'abcdef\r\nghijkl\r\nmnopqrstuvwxyz\r\nABCDEFGHIJK'
        test_stream = StringIO(data)
        lines = list(wsgi.make_line_iter(test_stream, limit=len(data), buffer_size=16))
        self.assert_equal(lines, ['abcdef\r\n', 'ghijkl\r\n', 'mnopqrstuvwxyz\r\n', 'ABCDEFGHIJK'])

        data = 'abc\r\nThis line is broken by the buffer length.\r\nFoo bar baz'
        test_stream = StringIO(data)
        lines = list(wsgi.make_line_iter(test_stream, limit=len(data), buffer_size=24))
        self.assert_equal(lines, ['abc\r\n', 'This line is broken by the buffer length.\r\n', 'Foo bar baz'])
Example #5
0
def test_multi_part_line_breaks():
    data = "abcdef\r\nghijkl\r\nmnopqrstuvwxyz\r\nABCDEFGHIJK"
    test_stream = NativeStringIO(data)
    lines = list(wsgi.make_line_iter(test_stream, limit=len(data), buffer_size=16))
    assert lines == ["abcdef\r\n", "ghijkl\r\n", "mnopqrstuvwxyz\r\n", "ABCDEFGHIJK"]

    data = "abc\r\nThis line is broken by the buffer length." "\r\nFoo bar baz"
    test_stream = NativeStringIO(data)
    lines = list(wsgi.make_line_iter(test_stream, limit=len(data), buffer_size=24))
    assert lines == ["abc\r\n", "This line is broken by the buffer " "length.\r\n", "Foo bar baz"]
Example #6
0
def test_multi_part_line_breaks():
    data = "abcdef\r\nghijkl\r\nmnopqrstuvwxyz\r\nABCDEFGHIJK"
    test_stream = NativeStringIO(data)
    lines = list(wsgi.make_line_iter(test_stream, limit=len(data), buffer_size=16))
    assert lines == ["abcdef\r\n", "ghijkl\r\n", "mnopqrstuvwxyz\r\n", "ABCDEFGHIJK"]

    data = "abc\r\nThis line is broken by the buffer length.\r\nFoo bar baz"
    test_stream = NativeStringIO(data)
    lines = list(wsgi.make_line_iter(test_stream, limit=len(data), buffer_size=24))
    assert lines == [
        "abc\r\n",
        "This line is broken by the buffer length.\r\n",
        "Foo bar baz",
    ]
Example #7
0
def test_multi_part_line_breaks():
    test_stream = StringIO('abcdef\r\nghijkl\r\nmnopqrstuvwxyz\r\nABCDEFGHIJK')
    lines = list(make_line_iter(test_stream, limit=1024, buffer_size=16))
    assert lines == [
        'abcdef\r\n', 'ghijkl\r\n', 'mnopqrstuvwxyz\r\n', 'ABCDEFGHIJK'
    ]

    test_stream = StringIO(
        'abc\r\nThis line is broken by the buffer length.\r\nFoo bar baz')
    lines = list(make_line_iter(test_stream, limit=1024, buffer_size=24))
    assert lines == [
        'abc\r\n', 'This line is broken by the buffer length.\r\n',
        'Foo bar baz'
    ]
Example #8
0
def test_multi_part_line_breaks_bytes():
    data = b'abcdef\r\nghijkl\r\nmnopqrstuvwxyz\r\nABCDEFGHIJK'
    test_stream = BytesIO(data)
    lines = list(wsgi.make_line_iter(test_stream, limit=len(data),
                                     buffer_size=16))
    assert lines == [b'abcdef\r\n', b'ghijkl\r\n', b'mnopqrstuvwxyz\r\n',
                     b'ABCDEFGHIJK']

    data = b'abc\r\nThis line is broken by the buffer length.' \
        b'\r\nFoo bar baz'
    test_stream = BytesIO(data)
    lines = list(wsgi.make_line_iter(test_stream, limit=len(data),
                                     buffer_size=24))
    assert lines == [b'abc\r\n', b'This line is broken by the buffer '
                     b'length.\r\n', b'Foo bar baz']
Example #9
0
def test_multi_part_line_breaks_bytes():
    data = b'abcdef\r\nghijkl\r\nmnopqrstuvwxyz\r\nABCDEFGHIJK'
    test_stream = BytesIO(data)
    lines = list(wsgi.make_line_iter(test_stream, limit=len(data),
                                     buffer_size=16))
    assert lines == [b'abcdef\r\n', b'ghijkl\r\n', b'mnopqrstuvwxyz\r\n',
                     b'ABCDEFGHIJK']

    data = b'abc\r\nThis line is broken by the buffer length.' \
        b'\r\nFoo bar baz'
    test_stream = BytesIO(data)
    lines = list(wsgi.make_line_iter(test_stream, limit=len(data),
                                     buffer_size=24))
    assert lines == [b'abc\r\n', b'This line is broken by the buffer '
                     b'length.\r\n', b'Foo bar baz']
Example #10
0
def test_multi_part_line_breaks_problematic():
    data = 'abc\rdef\r\nghi'
    for x in range(1, 10):
        test_stream = NativeStringIO(data)
        lines = list(
            wsgi.make_line_iter(test_stream, limit=len(data), buffer_size=4))
        assert lines == ['abc\r', 'def\r\n', 'ghi']
Example #11
0
def test_multi_part_line_breaks_problematic():
    data = "abc\rdef\r\nghi"
    for _ in range(1, 10):
        test_stream = io.StringIO(data)
        lines = list(
            wsgi.make_line_iter(test_stream, limit=len(data), buffer_size=4))
        assert lines == ["abc\r", "def\r\n", "ghi"]
Example #12
0
 def test_multi_part_line_breaks_problematic(self):
     data = 'abc\rdef\r\nghi'
     for x in range(1, 10):
         test_stream = NativeStringIO(data)
         lines = list(wsgi.make_line_iter(test_stream, limit=len(data),
                                          buffer_size=4))
         self.assert_equal(lines, ['abc\r', 'def\r\n', 'ghi'])
Example #13
0
def test_lines_longer_buffer_size():
    data = "1234567890\n1234567890\n"
    for bufsize in range(1, 15):
        lines = list(
            wsgi.make_line_iter(io.StringIO(data),
                                limit=len(data),
                                buffer_size=bufsize))
        assert lines == ["1234567890\n", "1234567890\n"]
Example #14
0
def test_lines_longer_buffer_size():
    data = '1234567890\n1234567890\n'
    for bufsize in range(1, 15):
        lines = list(
            wsgi.make_line_iter(NativeStringIO(data),
                                limit=len(data),
                                buffer_size=4))
        assert lines == ['1234567890\n', '1234567890\n']
Example #15
0
 def test_lines_longer_buffer_size(self):
     data = '1234567890\n1234567890\n'
     for bufsize in xrange(1, 15):
         lines = list(
             wsgi.make_line_iter(NativeStringIO(data),
                                 limit=len(data),
                                 buffer_size=4))
         self.assert_equal(lines, ['1234567890\n', '1234567890\n'])
Example #16
0
 def insert_data(environ, start_response):
     with eliot.start_action(action_type='api:insert-data') as action:
         instream = make_line_iter(
             get_input_stream(environ, safe_fallback=False))
         lines = (json.loads(line.decode('utf-8')) for line in instream)
         keys = db.insert(lines)
         action.add_success_fields(inserted_count=len(keys))
         return {'message': 'Inserted OK', 'keys': keys}
Example #17
0
def test_lines_longer_buffer_size():
    data = "1234567890\n1234567890\n"
    for bufsize in range(1, 15):
        lines = list(
            wsgi.make_line_iter(
                NativeStringIO(data), limit=len(data), buffer_size=bufsize
            )
        )
        assert lines == ["1234567890\n", "1234567890\n"]
Example #18
0
def test_multi_part_line_breaks_bytes():
    data = b"abcdef\r\nghijkl\r\nmnopqrstuvwxyz\r\nABCDEFGHIJK"
    test_stream = BytesIO(data)
    lines = list(wsgi.make_line_iter(test_stream, limit=len(data), buffer_size=16))
    assert lines == [
        b"abcdef\r\n",
        b"ghijkl\r\n",
        b"mnopqrstuvwxyz\r\n",
        b"ABCDEFGHIJK",
    ]

    data = b"abc\r\nThis line is broken by the buffer length." b"\r\nFoo bar baz"
    test_stream = BytesIO(data)
    lines = list(wsgi.make_line_iter(test_stream, limit=len(data), buffer_size=24))
    assert lines == [
        b"abc\r\n",
        b"This line is broken by the buffer " b"length.\r\n",
        b"Foo bar baz",
    ]
Example #19
0
def test_lines_longer_buffer_size_cap():
    data = "1234567890\n1234567890\n"
    for bufsize in range(1, 15):
        lines = list(
            wsgi.make_line_iter(
                io.StringIO(data),
                limit=len(data),
                buffer_size=bufsize,
                cap_at_buffer=True,
            ))
        assert len(lines[0]) == bufsize or lines[0].endswith("\n")
Example #20
0
def test_lines_longer_buffer_size_cap():
    data = "1234567890\n1234567890\n"
    for bufsize in range(1, 15):
        lines = list(
            wsgi.make_line_iter(
                NativeStringIO(data),
                limit=len(data),
                buffer_size=bufsize,
                cap_at_buffer=True,
            )
        )
        assert len(lines[0]) == bufsize or lines[0].endswith("\n")
Example #21
0
    def parse(self, file, boundary, content_length):
        next_part = '--' + boundary
        last_part = next_part + '--'

        form = []
        files = []
        in_memory = 0

        iterator = chain(make_line_iter(file, limit=content_length,
                                        buffer_size=self.buffer_size),
                         _empty_string_iter)

        terminator = self._find_terminator(iterator)
        if terminator != next_part:
            self.fail('Expected boundary at start of multipart data')

        while terminator != last_part:
            headers = parse_multipart_headers(iterator)

            disposition = headers.get('content-disposition')
            if disposition is None:
                self.fail('Missing Content-Disposition header')
            disposition, extra = parse_options_header(disposition)
            transfer_encoding = self.get_part_encoding(headers)
            name = extra.get('name')
            filename = extra.get('filename')
            part_charset = self.get_part_charset(headers)

            # if no content type is given we stream into memory.  A list is
            # used as a temporary container.
            if filename is None:
                is_file = False
                container = []
                _write = container.append
                guard_memory = self.max_form_memory_size is not None

            # otherwise we parse the rest of the headers and ask the stream
            # factory for something we can write in.
            else:
                is_file = True
                guard_memory = False
                filename, container = self.start_file_streaming(
                    filename, headers, content_length)
                _write = container.write

            buf = ''
            for line in iterator:
                if not line:
                    self.fail('unexpected end of stream')

                if line[:2] == '--':
                    terminator = line.rstrip()
                    if terminator in (next_part, last_part):
                        break

                if transfer_encoding is not None:
                    try:
                        line = line.decode(transfer_encoding)
                    except Exception:
                        self.fail('could not decode transfer encoded chunk')

                # we have something in the buffer from the last iteration.
                # this is usually a newline delimiter.
                if buf:
                    _write(buf)
                    buf = ''

                # If the line ends with windows CRLF we write everything except
                # the last two bytes.  In all other cases however we write
                # everything except the last byte.  If it was a newline, that's
                # fine, otherwise it does not matter because we will write it
                # the next iteration.  this ensures we do not write the
                # final newline into the stream.  That way we do not have to
                # truncate the stream.  However we do have to make sure that
                # if something else than a newline is in there we write it
                # out.
                if line[-2:] == '\r\n':
                    buf = '\r\n'
                    cutoff = -2
                else:
                    buf = line[-1]
                    cutoff = -1
                _write(line[:cutoff])

                # if we write into memory and there is a memory size limit we
                # count the number of bytes in memory and raise an exception if
                # there is too much data in memory.
                if guard_memory:
                    in_memory += len(line)
                    if in_memory > self.max_form_memory_size:
                        self.in_memory_threshold_reached(in_memory)
            else: # pragma: no cover
                raise ValueError('unexpected end of part')

            # if we have a leftover in the buffer that is not a newline
            # character we have to flush it, otherwise we will chop of
            # certain values.
            if buf not in ('', '\r', '\n', '\r\n'):
                _write(buf)

            if is_file:
                container.seek(0)
                files.append((name, FileStorage(container, filename, name,
                                                headers=headers)))
            else:
                form.append((name, _decode_unicode(''.join(container),
                                                   part_charset, self.errors)))

        return self.cls(form), self.cls(files)
Example #22
0
    def parse_lines(self, file, boundary, content_length):
        """Generate parts of
        ``('begin_form', (headers, name))``
        ``('begin_file', (headers, name, filename))``
        ``('cont', bytestring)``
        ``('end', None)``

        Always obeys the grammar
        parts = ( begin_form cont* end |
                  begin_file cont* end )*
        """
        next_part = b'--' + boundary
        last_part = next_part + b'--'

        iterator = chain(make_line_iter(file, limit=content_length,
                                        buffer_size=self.buffer_size),
                         _empty_string_iter)

        terminator = self._find_terminator(iterator)

        if terminator == last_part:
            return
        elif terminator != next_part:
            self.fail('Expected boundary at start of multipart data')

        while terminator != last_part:
            headers = parse_multipart_headers(iterator)

            disposition = headers.get('content-disposition')
            if disposition is None:
                self.fail('Missing Content-Disposition header')
            disposition, extra = parse_options_header(disposition)
            transfer_encoding = self.get_part_encoding(headers)
            name = extra.get('name')
            filename = extra.get('filename')

            # if no content type is given we stream into memory.  A list is
            # used as a temporary container.
            if filename is None:
                yield _begin_form, (headers, name)

            # otherwise we parse the rest of the headers and ask the stream
            # factory for something we can write in.
            else:
                yield _begin_file, (headers, name, filename)

            buf = b''
            for line in iterator:
                if not line:
                    self.fail('unexpected end of stream')

                if line[:2] == b'--':
                    terminator = line.rstrip()
                    if terminator in (next_part, last_part):
                        break

                if transfer_encoding is not None:
                    if transfer_encoding == 'base64':
                        transfer_encoding = 'base64_codec'
                    try:
                        line = codecs.decode(line, transfer_encoding)
                    except Exception:
                        self.fail('could not decode transfer encoded chunk')

                # we have something in the buffer from the last iteration.
                # this is usually a newline delimiter.
                if buf:
                    yield _cont, buf
                    buf = b''

                # If the line ends with windows CRLF we write everything except
                # the last two bytes.  In all other cases however we write
                # everything except the last byte.  If it was a newline, that's
                # fine, otherwise it does not matter because we will write it
                # the next iteration.  this ensures we do not write the
                # final newline into the stream.  That way we do not have to
                # truncate the stream.  However we do have to make sure that
                # if something else than a newline is in there we write it
                # out.
                if line[-2:] == b'\r\n':
                    buf = b'\r\n'
                    cutoff = -2
                else:
                    buf = line[-1:]
                    cutoff = -1
                yield _cont, line[:cutoff]

            else: # pragma: no cover
                raise ValueError('unexpected end of part')

            # if we have a leftover in the buffer that is not a newline
            # character we have to flush it, otherwise we will chop of
            # certain values.
            if buf not in (b'', b'\r', b'\n', b'\r\n'):
                yield _cont, buf

            yield _end, None
Example #23
0
    def parse_lines(self, file, boundary, content_length):
        """Generate parts of
        ``('begin_form', (headers, name))``
        ``('begin_file', (headers, name, filename))``
        ``('cont', bytestring)``
        ``('end', None)``

        Always obeys the grammar
        parts = ( begin_form cont* end |
                  begin_file cont* end )*
        """
        next_part = '--' + boundary
        last_part = next_part + '--'

        iterator = chain(
            make_line_iter(file,
                           limit=content_length,
                           buffer_size=self.buffer_size), _empty_string_iter)

        terminator = self._find_terminator(iterator)
        if terminator != next_part:
            self.fail('Expected boundary at start of multipart data')

        while terminator != last_part:
            headers = parse_multipart_headers(iterator)

            disposition = headers.get('content-disposition')
            if disposition is None:
                self.fail('Missing Content-Disposition header')
            disposition, extra = parse_options_header(disposition)
            transfer_encoding = self.get_part_encoding(headers)
            name = extra.get('name')
            filename = extra.get('filename')

            # if no content type is given we stream into memory.  A list is
            # used as a temporary container.
            if filename is None:
                yield _begin_form, (headers, name)

            # otherwise we parse the rest of the headers and ask the stream
            # factory for something we can write in.
            else:
                yield _begin_file, (headers, name, filename)

            buf = ''
            for line in iterator:
                if not line:
                    self.fail('unexpected end of stream')

                if line[:2] == '--':
                    terminator = line.rstrip()
                    if terminator in (next_part, last_part):
                        break

                if transfer_encoding is not None:
                    try:
                        line = line.decode(transfer_encoding)
                    except Exception:
                        self.fail('could not decode transfer encoded chunk')

                # we have something in the buffer from the last iteration.
                # this is usually a newline delimiter.
                if buf:
                    yield _cont, buf
                    buf = ''

                # If the line ends with windows CRLF we write everything except
                # the last two bytes.  In all other cases however we write
                # everything except the last byte.  If it was a newline, that's
                # fine, otherwise it does not matter because we will write it
                # the next iteration.  this ensures we do not write the
                # final newline into the stream.  That way we do not have to
                # truncate the stream.  However we do have to make sure that
                # if something else than a newline is in there we write it
                # out.
                if line[-2:] == '\r\n':
                    buf = '\r\n'
                    cutoff = -2
                else:
                    buf = line[-1]
                    cutoff = -1
                yield _cont, line[:cutoff]

            else:  # pragma: no cover
                raise ValueError('unexpected end of part')

            # if we have a leftover in the buffer that is not a newline
            # character we have to flush it, otherwise we will chop of
            # certain values.
            if buf not in ('', '\r', '\n', '\r\n'):
                yield _cont, buf

            yield _end, None
Example #24
0
def test_iter_functions_support_iterators():
    data = ['abcdef\r\nghi', 'jkl\r\nmnopqrstuvwxyz\r', '\nABCDEFGHIJK']
    lines = list(wsgi.make_line_iter(data))
    assert lines == [
        'abcdef\r\n', 'ghijkl\r\n', 'mnopqrstuvwxyz\r\n', 'ABCDEFGHIJK'
    ]
Example #25
0
def test_multi_part_line_breaks_problematic():
    data = "abc\rdef\r\nghi"
    for _ in range(1, 10):
        test_stream = NativeStringIO(data)
        lines = list(wsgi.make_line_iter(test_stream, limit=len(data), buffer_size=4))
        assert lines == ["abc\r", "def\r\n", "ghi"]
Example #26
0
 def test_lines_longer_buffer_size(self):
     data = '1234567890\n1234567890\n'
     for bufsize in xrange(1, 15):
         lines = list(wsgi.make_line_iter(NativeStringIO(data), limit=len(data),
                                          buffer_size=4))
         self.assert_equal(lines, ['1234567890\n', '1234567890\n'])
Example #27
0
 def test_iter_functions_support_iterators(self):
     data = ['abcdef\r\nghi', 'jkl\r\nmnopqrstuvwxyz\r', '\nABCDEFGHIJK']
     lines = list(wsgi.make_line_iter(data))
     self.assert_equal(lines, ['abcdef\r\n', 'ghijkl\r\n',
                               'mnopqrstuvwxyz\r\n', 'ABCDEFGHIJK'])
Example #28
0
def test_lines_longer_buffer_size_cap():
    data = '1234567890\n1234567890\n'
    for bufsize in range(1, 15):
        lines = list(wsgi.make_line_iter(NativeStringIO(data), limit=len(data),
                                         buffer_size=4, cap_at_buffer=True))
        assert lines == ['1234', '5678', '90\n', '1234', '5678', '90\n']
Example #29
0
def parse_multipart(file,
                    boundary,
                    content_length,
                    stream_factory=None,
                    charset='utf-8',
                    errors='ignore',
                    buffer_size=10 * 1024,
                    max_form_memory_size=None):
    """Parse a multipart/form-data stream.  This is invoked by
    :func:`utils.parse_form_data` if the content type matches.  Currently it
    exists for internal usage only, but could be exposed as separate
    function if it turns out to be useful and if we consider the API stable.
    """
    # XXX: this function does not support multipart/mixed.  I don't know of
    #      any browser that supports this, but it should be implemented
    #      nonetheless.

    # make sure the buffer size is divisible by four so that we can base64
    # decode chunk by chunk
    assert buffer_size % 4 == 0, 'buffer size has to be divisible by 4'
    # also the buffer size has to be at least 1024 bytes long or long headers
    # will freak out the system
    assert buffer_size >= 1024, 'buffer size has to be at least 1KB'

    if stream_factory is None:
        stream_factory = default_stream_factory

    if not boundary:
        raise ValueError('Missing boundary')
    if not is_valid_multipart_boundary(boundary):
        raise ValueError('Invalid boundary: %s' % boundary)
    if len(boundary) > buffer_size:
        raise ValueError('Boundary longer than buffer size')

    total_content_length = content_length
    next_part = '--' + boundary
    last_part = next_part + '--'

    form = []
    files = []
    in_memory = 0

    # convert the file into a limited stream with iteration capabilities
    file = LimitedStream(file, content_length)
    iterator = chain(make_line_iter(file, buffer_size=buffer_size),
                     _empty_string_iter)

    try:
        terminator = _find_terminator(iterator)
        if terminator != next_part:
            raise ValueError('Expected boundary at start of multipart data')

        while terminator != last_part:
            headers = parse_multipart_headers(iterator)
            disposition = headers.get('content-disposition')
            if disposition is None:
                raise ValueError('Missing Content-Disposition header')
            disposition, extra = parse_options_header(disposition)
            name = extra.get('name')

            transfer_encoding = headers.get('content-transfer-encoding')
            try_decode = transfer_encoding is not None and \
                         transfer_encoding in _supported_multipart_encodings

            filename = extra.get('filename')

            # if no content type is given we stream into memory.  A list is
            # used as a temporary container.
            if filename is None:
                is_file = False
                container = []
                _write = container.append
                guard_memory = max_form_memory_size is not None

            # otherwise we parse the rest of the headers and ask the stream
            # factory for something we can write in.
            else:
                content_type = headers.get('content-type')
                content_type = parse_options_header(content_type)[0] \
                    or 'text/plain'
                is_file = True
                guard_memory = False
                if filename is not None:
                    filename = _fix_ie_filename(
                        _decode_unicode(filename, charset, errors))
                try:
                    content_length = int(headers['content-length'])
                except (KeyError, ValueError):
                    content_length = 0
                container = stream_factory(total_content_length, content_type,
                                           filename, content_length)
                _write = container.write

            buf = ''
            for line in iterator:
                if not line:
                    raise ValueError('unexpected end of stream')

                if line[:2] == '--':
                    terminator = line.rstrip()
                    if terminator in (next_part, last_part):
                        break

                if try_decode:
                    try:
                        line = line.decode(transfer_encoding)
                    except:
                        raise ValueError('could not decode transfer '
                                         'encoded chunk')

                # we have something in the buffer from the last iteration.
                # this is usually a newline delimiter.
                if buf:
                    _write(buf)
                    buf = ''

                # If the line ends with windows CRLF we write everything except
                # the last two bytes.  In all other cases however we write
                # everything except the last byte.  If it was a newline, that's
                # fine, otherwise it does not matter because we will write it
                # the next iteration.  this ensures we do not write the
                # final newline into the stream.  That way we do not have to
                # truncate the stream.
                if line[-2:] == '\r\n':
                    buf = '\r\n'
                    cutoff = -2
                else:
                    buf = line[-1]
                    cutoff = -1
                _write(line[:cutoff])

                # if we write into memory and there is a memory size limit we
                # count the number of bytes in memory and raise an exception if
                # there is too much data in memory.
                if guard_memory:
                    in_memory += len(line)
                    if in_memory > max_form_memory_size:
                        from werkzeug.exceptions import RequestEntityTooLarge
                        raise RequestEntityTooLarge()
            else:
                raise ValueError('unexpected end of part')

            if is_file:
                container.seek(0)
                files.append(
                    (name,
                     FileStorage(container, filename, name, content_type,
                                 content_length, headers)))
            else:
                form.append(
                    (name, _decode_unicode(''.join(container), charset,
                                           errors)))
    finally:
        # make sure the whole input stream is read
        file.exhaust()

    return form, files
Example #30
0
def parse_multipart(file, boundary, content_length, stream_factory = None, charset = 'utf-8', errors = 'ignore', buffer_size = 10240, max_form_memory_size = None):
    if stream_factory is None:
        stream_factory = default_stream_factory
    if not boundary:
        raise ValueError('Missing boundary')
    if not is_valid_multipart_boundary(boundary):
        raise ValueError('Invalid boundary: %s' % boundary)
    if len(boundary) > buffer_size:
        raise ValueError('Boundary longer than buffer size')
    total_content_length = content_length
    next_part = '--' + boundary
    last_part = next_part + '--'
    form = []
    files = []
    in_memory = 0
    file = LimitedStream(file, content_length)
    iterator = chain(make_line_iter(file, buffer_size=buffer_size), _empty_string_iter)
    try:
        terminator = _find_terminator(iterator)
        if terminator != next_part:
            raise ValueError('Expected boundary at start of multipart data')
        while terminator != last_part:
            headers = parse_multipart_headers(iterator)
            disposition = headers.get('content-disposition')
            if disposition is None:
                raise ValueError('Missing Content-Disposition header')
            disposition, extra = parse_options_header(disposition)
            name = extra.get('name')
            transfer_encoding = headers.get('content-transfer-encoding')
            try_decode = transfer_encoding is not None and transfer_encoding in _supported_multipart_encodings
            filename = extra.get('filename')
            if filename is None:
                is_file = False
                container = []
                _write = container.append
                guard_memory = max_form_memory_size is not None
            else:
                content_type = headers.get('content-type')
                content_type = parse_options_header(content_type)[0] or 'text/plain'
                is_file = True
                guard_memory = False
                if filename is not None:
                    filename = _fix_ie_filename(_decode_unicode(filename, charset, errors))
                try:
                    content_length = int(headers['content-length'])
                except (KeyError, ValueError):
                    content_length = 0

                container = stream_factory(total_content_length, content_type, filename, content_length)
                _write = container.write
            buf = ''
            for line in iterator:
                if not line:
                    raise ValueError('unexpected end of stream')
                if line[:2] == '--':
                    terminator = line.rstrip()
                    if terminator in (next_part, last_part):
                        break
                if try_decode:
                    try:
                        line = line.decode(transfer_encoding)
                    except:
                        raise ValueError('could not decode transfer encoded chunk')

                if buf:
                    _write(buf)
                    buf = ''
                if line[-2:] == '\r\n':
                    buf = '\r\n'
                    cutoff = -2
                else:
                    buf = line[-1]
                    cutoff = -1
                _write(line[:cutoff])
                if guard_memory:
                    in_memory += len(line)
                    if in_memory > max_form_memory_size:
                        from werkzeug.exceptions import RequestEntityTooLarge
                        raise RequestEntityTooLarge()
            else:
                raise ValueError('unexpected end of part')

            if is_file:
                container.seek(0)
                files.append((name, FileStorage(container, filename, name, content_type, content_length, headers)))
            else:
                form.append((name, _decode_unicode(''.join(container), charset, errors)))

    finally:
        file.exhaust()

    return (form, files)
Example #31
0
 def test_iter_functions_support_iterators(self):
     data = ["abcdef\r\nghi", "jkl\r\nmnopqrstuvwxyz\r", "\nABCDEFGHIJK"]
     lines = list(wsgi.make_line_iter(data))
     self.assert_equal(lines, ["abcdef\r\n", "ghijkl\r\n", "mnopqrstuvwxyz\r\n", "ABCDEFGHIJK"])
Example #32
0
def test_iter_functions_support_iterators():
    data = ["abcdef\r\nghi", "jkl\r\nmnopqrstuvwxyz\r", "\nABCDEFGHIJK"]
    lines = list(wsgi.make_line_iter(data))
    assert lines == [
        "abcdef\r\n", "ghijkl\r\n", "mnopqrstuvwxyz\r\n", "ABCDEFGHIJK"
    ]
Example #33
0
def parse_multipart(
    file,
    boundary,
    content_length,
    stream_factory=None,
    charset="utf-8",
    errors="ignore",
    buffer_size=10 * 1024,
    max_form_memory_size=None,
):
    """Parse a multipart/form-data stream.  This is invoked by
    :func:`utils.parse_form_data` if the content type matches.  Currently it
    exists for internal usage only, but could be exposed as separate
    function if it turns out to be useful and if we consider the API stable.
    """
    # XXX: this function does not support multipart/mixed.  I don't know of
    #      any browser that supports this, but it should be implemented
    #      nonetheless.

    # make sure the buffer size is divisible by four so that we can base64
    # decode chunk by chunk
    assert buffer_size % 4 == 0, "buffer size has to be divisible by 4"
    # also the buffer size has to be at least 1024 bytes long or long headers
    # will freak out the system
    assert buffer_size >= 1024, "buffer size has to be at least 1KB"

    if stream_factory is None:
        stream_factory = default_stream_factory

    if not boundary:
        raise ValueError("Missing boundary")
    if not is_valid_multipart_boundary(boundary):
        raise ValueError("Invalid boundary: %s" % boundary)
    if len(boundary) > buffer_size:  # pragma: no cover
        # this should never happen because we check for a minimum size
        # of 1024 and boundaries may not be longer than 200.  The only
        # situation when this happen is for non debug builds where
        # the assert i skipped.
        raise ValueError("Boundary longer than buffer size")

    total_content_length = content_length
    next_part = "--" + boundary
    last_part = next_part + "--"

    form = []
    files = []
    in_memory = 0

    # convert the file into a limited stream with iteration capabilities
    file = LimitedStream(file, content_length)
    iterator = chain(make_line_iter(file, buffer_size=buffer_size), _empty_string_iter)

    try:
        terminator = _find_terminator(iterator)
        if terminator != next_part:
            raise ValueError("Expected boundary at start of multipart data")

        while terminator != last_part:
            headers = parse_multipart_headers(iterator)
            disposition = headers.get("content-disposition")
            if disposition is None:
                raise ValueError("Missing Content-Disposition header")
            disposition, extra = parse_options_header(disposition)
            name = extra.get("name")

            transfer_encoding = headers.get("content-transfer-encoding")
            try_decode = transfer_encoding is not None and transfer_encoding in _supported_multipart_encodings

            filename = extra.get("filename")

            # if no content type is given we stream into memory.  A list is
            # used as a temporary container.
            if filename is None:
                is_file = False
                container = []
                _write = container.append
                guard_memory = max_form_memory_size is not None

            # otherwise we parse the rest of the headers and ask the stream
            # factory for something we can write in.
            else:
                content_type = headers.get("content-type")
                content_type = parse_options_header(content_type)[0] or "text/plain"
                is_file = True
                guard_memory = False
                if filename is not None:
                    filename = _fix_ie_filename(_decode_unicode(filename, charset, errors))
                try:
                    content_length = int(headers["content-length"])
                except (KeyError, ValueError):
                    content_length = 0
                container = stream_factory(total_content_length, content_type, filename, content_length)
                _write = container.write

            buf = ""
            for line in iterator:
                if not line:
                    raise ValueError("unexpected end of stream")

                if line[:2] == "--":
                    terminator = line.rstrip()
                    if terminator in (next_part, last_part):
                        break

                if try_decode:
                    try:
                        line = line.decode(transfer_encoding)
                    except:
                        raise ValueError("could not decode transfer " "encoded chunk")

                # we have something in the buffer from the last iteration.
                # this is usually a newline delimiter.
                if buf:
                    _write(buf)
                    buf = ""

                # If the line ends with windows CRLF we write everything except
                # the last two bytes.  In all other cases however we write
                # everything except the last byte.  If it was a newline, that's
                # fine, otherwise it does not matter because we will write it
                # the next iteration.  this ensures we do not write the
                # final newline into the stream.  That way we do not have to
                # truncate the stream.
                if line[-2:] == "\r\n":
                    buf = "\r\n"
                    cutoff = -2
                else:
                    buf = line[-1]
                    cutoff = -1
                _write(line[:cutoff])

                # if we write into memory and there is a memory size limit we
                # count the number of bytes in memory and raise an exception if
                # there is too much data in memory.
                if guard_memory:
                    in_memory += len(line)
                    if in_memory > max_form_memory_size:
                        from werkzeug.exceptions import RequestEntityTooLarge

                        raise RequestEntityTooLarge()
            else:  # pragma: no cover
                raise ValueError("unexpected end of part")

            if is_file:
                container.seek(0)
                files.append((name, FileStorage(container, filename, name, content_type, content_length, headers)))
            else:
                form.append((name, _decode_unicode("".join(container), charset, errors)))
    finally:
        # make sure the whole input stream is read
        file.exhaust()

    return form, files
Example #34
0
    def parse_lines(self, file, boundary, content_length, cap_at_buffer=True):
        """Generate parts of
        ``('begin_form', (headers, name))``
        ``('begin_file', (headers, name, filename))``
        ``('cont', bytestring)``
        ``('end', None)``

        Always obeys the grammar
        parts = ( begin_form cont* end |
                  begin_file cont* end )*
        """
        next_part = b"--" + boundary
        last_part = next_part + b"--"

        iterator = chain(
            make_line_iter(
                file,
                limit=content_length,
                buffer_size=self.buffer_size,
                cap_at_buffer=cap_at_buffer,
            ),
            _empty_string_iter,
        )

        terminator = self._find_terminator(iterator)

        if terminator == last_part:
            return
        elif terminator != next_part:
            self.fail("Expected boundary at start of multipart data")

        while terminator != last_part:
            headers = parse_multipart_headers(iterator)

            disposition = headers.get("content-disposition")
            if disposition is None:
                self.fail("Missing Content-Disposition header")
            disposition, extra = parse_options_header(disposition)
            transfer_encoding = self.get_part_encoding(headers)
            name = extra.get("name")

            # Accept filename* to support non-ascii filenames as per rfc2231
            filename = extra.get("filename") or extra.get("filename*")

            # if no content type is given we stream into memory.  A list is
            # used as a temporary container.
            if filename is None:
                yield _begin_form, (headers, name)

            # otherwise we parse the rest of the headers and ask the stream
            # factory for something we can write in.
            else:
                yield _begin_file, (headers, name, filename)

            buf = b""
            for line in iterator:
                if not line:
                    self.fail("unexpected end of stream")

                if line[:2] == b"--":
                    terminator = line.rstrip()
                    if terminator in (next_part, last_part):
                        break

                if transfer_encoding is not None:
                    if transfer_encoding == "base64":
                        transfer_encoding = "base64_codec"
                    try:
                        line = codecs.decode(line, transfer_encoding)
                    except Exception:
                        self.fail("could not decode transfer encoded chunk")

                # we have something in the buffer from the last iteration.
                # this is usually a newline delimiter.
                if buf:
                    yield _cont, buf
                    buf = b""

                # If the line ends with windows CRLF we write everything except
                # the last two bytes.  In all other cases however we write
                # everything except the last byte.  If it was a newline, that's
                # fine, otherwise it does not matter because we will write it
                # the next iteration.  this ensures we do not write the
                # final newline into the stream.  That way we do not have to
                # truncate the stream.  However we do have to make sure that
                # if something else than a newline is in there we write it
                # out.
                if line[-2:] == b"\r\n":
                    buf = b"\r\n"
                    cutoff = -2
                else:
                    buf = line[-1:]
                    cutoff = -1
                yield _cont, line[:cutoff]

            else:  # pragma: no cover
                raise ValueError("unexpected end of part")

            # if we have a leftover in the buffer that is not a newline
            # character we have to flush it, otherwise we will chop of
            # certain values.
            if buf not in (b"", b"\r", b"\n", b"\r\n"):
                yield _cont, buf

            yield _end, None
Example #35
0
def parse_multipart(file,
                    boundary,
                    content_length,
                    stream_factory=None,
                    charset='utf-8',
                    errors='ignore',
                    buffer_size=10240,
                    max_form_memory_size=None):
    if stream_factory is None:
        stream_factory = default_stream_factory
    if not boundary:
        raise ValueError('Missing boundary')
    if not is_valid_multipart_boundary(boundary):
        raise ValueError('Invalid boundary: %s' % boundary)
    if len(boundary) > buffer_size:
        raise ValueError('Boundary longer than buffer size')
    total_content_length = content_length
    next_part = '--' + boundary
    last_part = next_part + '--'
    form = []
    files = []
    in_memory = 0
    file = LimitedStream(file, content_length)
    iterator = chain(make_line_iter(file, buffer_size=buffer_size),
                     _empty_string_iter)
    try:
        terminator = _find_terminator(iterator)
        if terminator != next_part:
            raise ValueError('Expected boundary at start of multipart data')
        while terminator != last_part:
            headers = parse_multipart_headers(iterator)
            disposition = headers.get('content-disposition')
            if disposition is None:
                raise ValueError('Missing Content-Disposition header')
            disposition, extra = parse_options_header(disposition)
            name = extra.get('name')
            transfer_encoding = headers.get('content-transfer-encoding')
            try_decode = transfer_encoding is not None and transfer_encoding in _supported_multipart_encodings
            filename = extra.get('filename')
            if filename is None:
                is_file = False
                container = []
                _write = container.append
                guard_memory = max_form_memory_size is not None
            else:
                content_type = headers.get('content-type')
                content_type = parse_options_header(
                    content_type)[0] or 'text/plain'
                is_file = True
                guard_memory = False
                if filename is not None:
                    filename = _fix_ie_filename(
                        _decode_unicode(filename, charset, errors))
                try:
                    content_length = int(headers['content-length'])
                except (KeyError, ValueError):
                    content_length = 0

                container = stream_factory(total_content_length, content_type,
                                           filename, content_length)
                _write = container.write
            buf = ''
            for line in iterator:
                if not line:
                    raise ValueError('unexpected end of stream')
                if line[:2] == '--':
                    terminator = line.rstrip()
                    if terminator in (next_part, last_part):
                        break
                if try_decode:
                    try:
                        line = line.decode(transfer_encoding)
                    except:
                        raise ValueError(
                            'could not decode transfer encoded chunk')

                if buf:
                    _write(buf)
                    buf = ''
                if line[-2:] == '\r\n':
                    buf = '\r\n'
                    cutoff = -2
                else:
                    buf = line[-1]
                    cutoff = -1
                _write(line[:cutoff])
                if guard_memory:
                    in_memory += len(line)
                    if in_memory > max_form_memory_size:
                        from werkzeug.exceptions import RequestEntityTooLarge
                        raise RequestEntityTooLarge()
            else:
                raise ValueError('unexpected end of part')

            if is_file:
                container.seek(0)
                files.append(
                    (name,
                     FileStorage(container, filename, name, content_type,
                                 content_length, headers)))
            else:
                form.append(
                    (name, _decode_unicode(''.join(container), charset,
                                           errors)))

    finally:
        file.exhaust()

    return (form, files)