async def _iter_content(self): if self._closed: raise OperationNotAllowed( 'This stream is closed; no further operations on it are permitted.' ) if self.eof: return if self._iteration_started: raise OperationNotAllowed( 'This stream is already being iterated over.') self._iteration_started = True if self._buffer: next_chunk = self._buffer self._buffer = b'' self._pos += len(next_chunk) yield next_chunk while self._bytes_remaining > 0: event = await self._receive() # PERF(kgriffs): Use try...except because we normally expect the # 'body' key to be present. try: next_chunk = event['body'] except KeyError: pass else: # NOTE(kgriffs): No need to yield empty body chunks. if not next_chunk: continue next_chunk_len = len(next_chunk) if next_chunk_len <= self._bytes_remaining: self._bytes_remaining -= next_chunk_len self._pos += next_chunk_len else: # NOTE(kgriffs): We received more data than expected, # so truncate to the expected length. next_chunk = next_chunk[:self._bytes_remaining] self._pos += self._bytes_remaining self._bytes_remaining = 0 yield next_chunk # NOTE(kgriffs): Per the ASGI spec, more_body is optional # and should be considered False if not present. # NOTE(kgriffs): This also handles the case of receiving # the event: {'type': 'http.disconnect'} # PERF(kgriffs): event.get() is more elegant, but uses a # few more CPU cycles. if not ('more_body' in event and event['more_body']): self._bytes_remaining = 0
def __aiter__(self): if self._iteration_started: raise OperationNotAllowed( 'This stream is already being iterated over.') self._iteration_started = True # PERF(vytas): Do not wrap unless needed. if self._buffer_len > self._buffer_pos: return self._iter_with_buffer() return self._source
async def read(self, size=None): """Read some or all of the remaining bytes in the request body. Warning: A size should always be specified, unless you can be certain that you have enough free memory for the entire request body, and that you have configured your web server to limit request bodies to a reasonable size (to guard against malicious requests). Warning: Apps may not use both ``read()`` and the asynchronous iterator interface to consume the same request body; the only time that it is safe to do so is when one or the other method is used to completely read the entire body *before* the other method is even attempted. Therefore, it is important to always call :meth:`~.exhaust` or :meth:`~.close` if a body has only been partially read and the remaining data is to be ignored. Keyword Args: size (int): The maximum number of bytes to read. The actual amount of data that can be read will depend on how much is available, and may be smaller than the amount requested. If the size is -1 or not specified, all remaining data is read and returned. Returns: bytes: The request body data, or ``b''`` if the body is empty or has already been consumed. """ if self._closed: raise OperationNotAllowed( 'This stream is closed; no further operations on it are permitted.' ) if self.eof: return b'' if size is None or size == -1: return await self.readall() if size <= 0: return b'' if self._buffer: num_bytes_available = len(self._buffer) chunks = [self._buffer] else: num_bytes_available = 0 chunks = [] while self._bytes_remaining > 0 and num_bytes_available < size: event = await self._receive() # PERF(kgriffs): Use try..except because we normally expect the # 'body' key to be present. try: next_chunk = event['body'] except KeyError: pass else: next_chunk_len = len(next_chunk) if next_chunk_len <= self._bytes_remaining: chunks.append(next_chunk) self._bytes_remaining -= next_chunk_len num_bytes_available += next_chunk_len else: # NOTE(kgriffs): Do not read more data than we are # expecting. This *should* never happen, but better # safe than sorry. chunks.append(next_chunk[:self._bytes_remaining]) self._bytes_remaining = 0 num_bytes_available += self._bytes_remaining # NOTE(kgriffs): This also handles the case of receiving # the event: {'type': 'http.disconnect'} if not ('more_body' in event and event['more_body']): self._bytes_remaining = 0 self._buffer = chunks[0] if len(chunks) == 1 else b''.join(chunks) if num_bytes_available <= size: data = self._buffer self._buffer = b'' else: data = self._buffer[:size] self._buffer = self._buffer[size:] self._pos += len(data) return data
async def readall(self): """Read and return all remaining data in the request body. Warning: Only use this method when you can be certain that you have enough free memory for the entire request body, and that you have configured your web server to limit request bodies to a reasonable size (to guard against malicious requests). Returns: bytes: The request body data, or ``b''`` if the body is empty or has already been consumed. """ if self._closed: raise OperationNotAllowed( 'This stream is closed; no further operations on it are permitted.' ) if self.eof: return b'' if self._buffer: next_chunk = self._buffer self._buffer = b'' chunks = [next_chunk] else: chunks = [] while self._bytes_remaining > 0: event = await self._receive() # PERF(kgriffs): Use try..except because we normally expect the # 'body' key to be present. try: next_chunk = event['body'] except KeyError: pass else: next_chunk_len = len(next_chunk) if next_chunk_len <= self._bytes_remaining: chunks.append(next_chunk) self._bytes_remaining -= next_chunk_len else: # NOTE(kgriffs): Do not read more data than we are # expecting. This *should* never happen if the # server enforces the content-length header, but # it is better to be safe than sorry. chunks.append(next_chunk[:self._bytes_remaining]) self._bytes_remaining = 0 # NOTE(kgriffs): This also handles the case of receiving # the event: {'type': 'http.disconnect'} if not ('more_body' in event and event['more_body']): self._bytes_remaining = 0 data = chunks[0] if len(chunks) == 1 else b''.join(chunks) self._pos += len(data) return data