def received(self, data): """ Receives the HTTP stream for one request. Returns the number of bytes consumed. Sets the completed flag once both the header and the body have been received. """ if self.completed: return 0 # Can't consume any more. datalen = len(data) br = self.body_rcv if br is None: # In header. s = self.header_plus + data index = find_double_newline(s) if index >= 0: # Header finished. header_plus = s[:index] consumed = len(data) - (len(s) - index) # Remove preceeding blank lines. header_plus = header_plus.lstrip() if not header_plus: self.empty = True self.completed = True else: try: self.parse_header(header_plus) except ParsingError as e: self.error = BadRequest(e.args[0]) self.completed = True else: if self.body_rcv is None: # no content-length header and not a t-e: chunked # request self.completed = True if self.content_length > 0: max_body = self.adj.max_request_body_size # we won't accept this request if the content-length # is too large if self.content_length >= max_body: self.error = RequestEntityTooLarge( 'exceeds max_body of %s' % max_body) self.completed = True self.headers_finished = True return consumed else: # Header not finished yet. self.header_bytes_received += datalen max_header = self.adj.max_request_header_size if self.header_bytes_received >= max_header: # malformed header, we need to construct some request # on our own. we disregard the incoming(?) requests HTTP # version and just use 1.0. IOW someone just sent garbage # over the wire self.parse_header(b'GET / HTTP/1.0\n') self.error = RequestHeaderFieldsTooLarge( 'exceeds max_header of %s' % max_header) self.completed = True self.header_plus = s return datalen else: # In body. consumed = br.received(data) self.body_bytes_received += consumed max_body = self.adj.max_request_body_size if self.body_bytes_received >= max_body: # this will only be raised during t-e: chunked requests self.error = RequestEntityTooLarge('exceeds max_body of %s' % max_body) self.completed = True elif br.error: # garbage in chunked encoding input probably self.error = br.error self.completed = True elif br.completed: # The request (with the body) is ready to use. self.completed = True if self.chunked: # We've converted the chunked transfer encoding request # body into a normal request body, so we know its content # length; set the header here. We already popped the # TRANSFER_ENCODING header in parse_header, so this will # appear to the client to be an entirely non-chunked HTTP # request with a valid content-length. self.headers['CONTENT_LENGTH'] = str(br.__len__()) return consumed
def received(self, data): """ Receives the HTTP stream for one request. Returns the number of bytes consumed. Sets the completed flag once both the header and the body have been received. """ if self.completed: return 0 # Can't consume any more. datalen = len(data) br = self.body_rcv if br is None: # In header. max_header = self.adj.max_request_header_size s = self.header_plus + data index = find_double_newline(s) consumed = 0 if index >= 0: # If the headers have ended, and we also have part of the body # message in data we still want to validate we aren't going # over our limit for received headers. self.header_bytes_received += index consumed = datalen - (len(s) - index) else: self.header_bytes_received += datalen consumed = datalen # If the first line + headers is over the max length, we return a # RequestHeaderFieldsTooLarge error rather than continuing to # attempt to parse the headers. if self.header_bytes_received >= max_header: self.parse_header(b"GET / HTTP/1.0\r\n") self.error = RequestHeaderFieldsTooLarge( "exceeds max_header of %s" % max_header) self.completed = True return consumed if index >= 0: # Header finished. header_plus = s[:index] # Remove preceeding blank lines. This is suggested by # https://tools.ietf.org/html/rfc7230#section-3.5 to support # clients sending an extra CR LF after another request when # using HTTP pipelining header_plus = header_plus.lstrip() if not header_plus: self.empty = True self.completed = True else: try: self.parse_header(header_plus) except ParsingError as e: self.error = BadRequest(e.args[0]) self.completed = True except TransferEncodingNotImplemented as e: self.error = ServerNotImplemented(e.args[0]) self.completed = True else: if self.body_rcv is None: # no content-length header and not a t-e: chunked # request self.completed = True if self.content_length > 0: max_body = self.adj.max_request_body_size # we won't accept this request if the content-length # is too large if self.content_length >= max_body: self.error = RequestEntityTooLarge( "exceeds max_body of %s" % max_body) self.completed = True self.headers_finished = True return consumed # Header not finished yet. self.header_plus = s return datalen else: # In body. consumed = br.received(data) self.body_bytes_received += consumed max_body = self.adj.max_request_body_size if self.body_bytes_received >= max_body: # this will only be raised during t-e: chunked requests self.error = RequestEntityTooLarge("exceeds max_body of %s" % max_body) self.completed = True elif br.error: # garbage in chunked encoding input probably self.error = br.error self.completed = True elif br.completed: # The request (with the body) is ready to use. self.completed = True if self.chunked: # We've converted the chunked transfer encoding request # body into a normal request body, so we know its content # length; set the header here. We already popped the # TRANSFER_ENCODING header in parse_header, so this will # appear to the client to be an entirely non-chunked HTTP # request with a valid content-length. self.headers["CONTENT_LENGTH"] = str(br.__len__()) return consumed
def _makeOne(self): from waitress.utilities import BadRequest return BadRequest(1)
def received(self, s): # Returns the number of bytes consumed. if self.completed: return 0 orig_size = len(s) while s: rm = self.chunk_remainder if rm > 0: # Receive the remainder of a chunk. to_write = s[:rm] self.buf.append(to_write) written = len(to_write) s = s[written:] self.chunk_remainder -= written if self.chunk_remainder == 0: self.validate_chunk_end = True elif self.validate_chunk_end: s = self.chunk_end + s pos = s.find(b"\r\n") if pos < 0 and len(s) < 2: self.chunk_end = s s = b"" else: self.chunk_end = b"" if pos == 0: # Chop off the terminating CR LF from the chunk s = s[2:] else: self.error = BadRequest("Chunk not properly terminated") self.all_chunks_received = True # Always exit this loop self.validate_chunk_end = False elif not self.all_chunks_received: # Receive a control line. s = self.control_line + s pos = s.find(b"\r\n") if pos < 0: # Control line not finished. self.control_line = s s = b"" else: # Control line finished. line = s[:pos] s = s[pos + 2 :] self.control_line = b"" line = line.strip() if line: # Begin a new chunk. semi = line.find(b";") if semi >= 0: # discard extension info. line = line[:semi] try: sz = int(line.strip(), 16) # hexadecimal except ValueError: # garbage in input self.error = BadRequest("garbage in chunked encoding input") sz = 0 if sz > 0: # Start a new chunk. self.chunk_remainder = sz else: # Finished chunks. self.all_chunks_received = True # else expect a control line. else: # Receive the trailer. trailer = self.trailer + s if trailer.startswith(b"\r\n"): # No trailer. self.completed = True return orig_size - (len(trailer) - 2) pos = find_double_newline(trailer) if pos < 0: # Trailer not finished. self.trailer = trailer s = b"" else: # Finished the trailer. self.completed = True self.trailer = trailer[:pos] return orig_size - (len(trailer) - pos) return orig_size
def received(self, s): # Returns the number of bytes consumed. if self.completed: return 0 orig_size = len(s) while s: rm = self.chunk_remainder if rm > 0: # Receive the remainder of a chunk. to_write = s[:rm] self.buf.append(to_write) written = len(to_write) s = s[written:] self.chunk_remainder -= written elif not self.all_chunks_received: # Receive a control line. s = self.control_line + s pos = s.find(b'\n') if pos < 0: # Control line not finished. self.control_line = s s = '' else: # Control line finished. line = s[:pos] s = s[pos + 1:] self.control_line = b'' line = line.strip() if line: # Begin a new chunk. semi = line.find(b';') if semi >= 0: # discard extension info. line = line[:semi] try: sz = int(line.strip(), 16) # hexadecimal except ValueError: # garbage in input self.error = BadRequest( 'garbage in chunked encoding input') sz = 0 if sz > 0: # Start a new chunk. self.chunk_remainder = sz else: # Finished chunks. self.all_chunks_received = True # else expect a control line. else: # Receive the trailer. trailer = self.trailer + s if trailer.startswith(b'\r\n'): # No trailer. self.completed = True return orig_size - (len(trailer) - 2) elif trailer.startswith(b'\n'): # No trailer. self.completed = True return orig_size - (len(trailer) - 1) pos = find_double_newline(trailer) if pos < 0: # Trailer not finished. self.trailer = trailer s = b'' else: # Finished the trailer. self.completed = True self.trailer = trailer[:pos] return orig_size - (len(trailer) - pos) return orig_size
def received(self, data): """ Receives the HTTP stream for one request. Returns the number of bytes consumed. Sets the completed flag once both the header and the body have been received. """ if self.completed: return 0 # Can't consume any more. datalen = len(data) br = self.body_rcv if br is None: # In header. s = self.header_plus + data index = find_double_newline(s) if index >= 0: # Header finished. header_plus = s[:index] consumed = len(data) - (len(s) - index) # Remove preceeding blank lines. header_plus = header_plus.lstrip() if not header_plus: self.empty = True self.completed = True else: try: self.parse_header(header_plus) except ParsingError as e: self.error = BadRequest(e.args[0]) self.completed = True else: if self.body_rcv is None: # no content-length header and not a t-e: chunked # request self.completed = True if self.content_length > 0: max_body = self.adj.max_request_body_size # we won't accept this request if the content-length # is too large if self.content_length >= max_body: self.error = RequestEntityTooLarge( 'exceeds max_body of %s' % max_body) self.completed = True self.headers_finished = True return consumed else: # Header not finished yet. self.header_bytes_received += datalen max_header = self.adj.max_request_header_size if self.header_bytes_received >= max_header: self.parse_header(b'GET / HTTP/1.0\n') self.error = RequestHeaderFieldsTooLarge( 'exceeds max_header of %s' % max_header) self.completed = True self.header_plus = s return datalen else: # In body. consumed = br.received(data) self.body_bytes_received += consumed max_body = self.adj.max_request_body_size if self.body_bytes_received >= max_body: # this will only be raised during t-e: chunked requests self.error = RequestEntityTooLarge('exceeds max_body of %s' % max_body) self.completed = True elif br.error: # garbage in chunked encoding input probably self.error = br.error self.completed = True elif br.completed: self.completed = True return consumed
def received(self, s): # Returns the number of bytes consumed. if self.completed: return 0 orig_size = len(s) while s: rm = self.chunk_remainder if rm > 0: # Receive the remainder of a chunk. to_write = s[:rm] self.buf.append(to_write) written = len(to_write) s = s[written:] self.chunk_remainder -= written if self.chunk_remainder == 0: self.validate_chunk_end = True elif self.validate_chunk_end: s = self.chunk_end + s pos = s.find(b"\r\n") if pos < 0 and len(s) < 2: self.chunk_end = s s = b"" else: self.chunk_end = b"" if pos == 0: # Chop off the terminating CR LF from the chunk s = s[2:] else: self.error = BadRequest( "Chunk not properly terminated") self.all_chunks_received = True # Always exit this loop self.validate_chunk_end = False elif not self.all_chunks_received: # Receive a control line. s = self.control_line + s pos = s.find(b"\r\n") if pos < 0: # Control line not finished. self.control_line = s s = b"" else: # Control line finished. line = s[:pos] s = s[pos + 2:] self.control_line = b"" if line: # Begin a new chunk. semi = line.find(b";") if semi >= 0: extinfo = line[semi:] valid_ext_info = CHUNK_EXT_RE.match(extinfo) if not valid_ext_info: self.error = BadRequest( "Invalid chunk extension") self.all_chunks_received = True break line = line[:semi] if not ONLY_HEXDIG_RE.match(line): self.error = BadRequest("Invalid chunk size") self.all_chunks_received = True break # Can not fail due to matching against the regular # expression above sz = int(line, 16) # hexadecimal if sz > 0: # Start a new chunk. self.chunk_remainder = sz else: # Finished chunks. self.all_chunks_received = True # else expect a control line. else: # Receive the trailer. trailer = self.trailer + s if trailer.startswith(b"\r\n"): # No trailer. self.completed = True return orig_size - (len(trailer) - 2) pos = find_double_newline(trailer) if pos < 0: # Trailer not finished. self.trailer = trailer s = b"" else: # Finished the trailer. self.completed = True self.trailer = trailer[:pos] return orig_size - (len(trailer) - pos) return orig_size