Example #1
0
 def write_soon(self, data):
     if not self.connected:
         # if the socket is closed then interrupt the task so that it
         # can cleanup possibly before the app_iter is exhausted
         raise ClientDisconnected
     if data:
         # the async mainloop might be popping data off outbuf; we can
         # block here waiting for it because we're in a task thread
         with self.outbuf_lock:
             self._flush_outbufs_below_high_watermark()
             if not self.connected:
                 raise ClientDisconnected
             num_bytes = len(data)
             if data.__class__ is ReadOnlyFileBasedBuffer:
                 # they used wsgi.file_wrapper
                 self.outbufs.append(data)
                 nextbuf = OverflowableBuffer(self.adj.outbuf_overflow)
                 self.outbufs.append(nextbuf)
                 self.current_outbuf_count = 0
             else:
                 if self.current_outbuf_count > self.adj.outbuf_high_watermark:
                     # rotate to a new buffer if the current buffer has hit
                     # the watermark to avoid it growing unbounded
                     nextbuf = OverflowableBuffer(self.adj.outbuf_overflow)
                     self.outbufs.append(nextbuf)
                     self.current_outbuf_count = 0
                 self.outbufs[-1].append(data)
                 self.current_outbuf_count += num_bytes
             self.total_outbufs_len += num_bytes
             if self.total_outbufs_len >= self.adj.send_bytes:
                 self.server.pull_trigger()
         return num_bytes
     return 0
Example #2
0
 def write_soon(self, data):
     if not self.connected:
         # if the socket is closed then interrupt the task so that it
         # can cleanup possibly before the app_iter is exhausted
         raise ClientDisconnected
     if data:
         # the async mainloop might be popping data off outbuf; we can
         # block here waiting for it because we're in a task thread
         with self.outbuf_lock:
             # check again after acquiring the lock to ensure we the
             # outbufs are not closed
             if not self.connected:  # pragma: no cover
                 raise ClientDisconnected
             if data.__class__ is ReadOnlyFileBasedBuffer:
                 # they used wsgi.file_wrapper
                 self.outbufs.append(data)
                 nextbuf = OverflowableBuffer(self.adj.outbuf_overflow)
                 self.outbufs.append(nextbuf)
             else:
                 self.outbufs[-1].append(data)
             num_bytes = len(data)
             self.total_outbufs_len += num_bytes
         # XXX We might eventually need to pull the trigger here (to
         # instruct select to stop blocking), but it slows things down so
         # much that I'll hold off for now; "server push" on otherwise
         # unbusy systems may suffer.
         return num_bytes
     return 0
Example #3
0
 def __init__(
     self,
     server,
     sock,
     addr,
     adj,
     map=None,
 ):
     self.server = server
     self.addr = addr
     self.adj = adj
     self.outbuf = OverflowableBuffer(adj.outbuf_overflow)
     self.creation_time = self.last_activity = time.time()
     asyncore.dispatcher.__init__(self, sock, map=map)
Example #4
0
    def __init__(self, server, sock, addr, adj, map=None):
        self.server = server
        self.adj = adj
        self.outbufs = [OverflowableBuffer(adj.outbuf_overflow)]
        self.creation_time = self.last_activity = time.time()
        self.sendbuf_len = sock.getsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF)

        # task_lock used to push/pop requests
        self.task_lock = threading.Lock()
        # outbuf_lock used to access any outbuf (expected to use an RLock)
        self.outbuf_lock = threading.Condition()

        wasyncore.dispatcher.__init__(self, sock, map=map)

        # Don't let wasyncore.dispatcher throttle self.addr on us.
        self.addr = addr
Example #5
0
 def write_soon(self, data):
     if data:
         # the async mainloop might be popping data off outbuf; we can
         # block here waiting for it because we're in a task thread
         with self.outbuf_lock:
             if data.__class__ is ReadOnlyFileBasedBuffer:
                 # they used wsgi.file_wrapper
                 self.outbufs.append(data)
                 nextbuf = OverflowableBuffer(self.adj.outbuf_overflow)
                 self.outbufs.append(nextbuf)
             else:
                 self.outbufs[-1].append(data)
         # XXX We might eventually need to pull the trigger here (to
         # instruct select to stop blocking), but it slows things down so
         # much that I'll hold off for now; "server push" on otherwise
         # unbusy systems may suffer.
         return len(data)
     return 0
Example #6
0
    def __init__(
        self,
        server,
        sock,
        addr,
        adj,
        map=None,
    ):
        self.server = server
        self.addr = addr
        self.adj = adj
        self.outbufs = [OverflowableBuffer(adj.outbuf_overflow)]
        self.creation_time = self.last_activity = time.time()

        # task_lock used to push/pop requests
        self.task_lock = thread.allocate_lock()
        # outbuf_lock used to access any outbuf
        self.outbuf_lock = thread.allocate_lock()

        asyncore.dispatcher.__init__(self, sock, map=map)
Example #7
0
    def __init__(
        self,
        server,
        sock,
        addr,
        adj,
        map=None,
    ):
        self.server = server
        self.adj = adj
        self.outbufs = [OverflowableBuffer(adj.outbuf_overflow)]
        self.total_outbufs_len = 0
        self.creation_time = self.last_activity = time.time()

        # task_lock used to push/pop requests
        self.task_lock = threading.Lock()
        # outbuf_lock used to access any outbuf
        self.outbuf_lock = threading.RLock()

        wasyncore.dispatcher.__init__(self, sock, map=map)

        # Don't let wasyncore.dispatcher throttle self.addr on us.
        self.addr = addr
Example #8
0
    def parse_header(self, header_plus):
        """
        Parses the header_plus block of text (the headers plus the
        first line of the request).
        """
        index = header_plus.find(b"\r\n")
        if index >= 0:
            first_line = header_plus[:index].rstrip()
            header = header_plus[index + 2:]
        else:
            raise ParsingError("HTTP message header invalid")

        if b"\r" in first_line or b"\n" in first_line:
            raise ParsingError("Bare CR or LF found in HTTP message")

        self.first_line = first_line  # for testing

        lines = get_header_lines(header)

        headers = self.headers
        for line in lines:
            header = HEADER_FIELD.match(line)

            if not header:
                raise ParsingError("Invalid header")

            key, value = header.group("name", "value")

            if b"_" in key:
                # TODO(xistence): Should we drop this request instead?
                continue

            # Only strip off whitespace that is considered valid whitespace by
            # RFC7230, don't strip the rest
            value = value.strip(b" \t")
            key1 = tostr(key.upper().replace(b"-", b"_"))
            # If a header already exists, we append subsequent values
            # seperated by a comma. Applications already need to handle
            # the comma seperated values, as HTTP front ends might do
            # the concatenation for you (behavior specified in RFC2616).
            try:
                headers[key1] += tostr(b", " + value)
            except KeyError:
                headers[key1] = tostr(value)

        # command, uri, version will be bytes
        command, uri, version = crack_first_line(first_line)
        version = tostr(version)
        command = tostr(command)
        self.command = command
        self.version = version
        (
            self.proxy_scheme,
            self.proxy_netloc,
            self.path,
            self.query,
            self.fragment,
        ) = split_uri(uri)
        self.url_scheme = self.adj.url_scheme
        connection = headers.get("CONNECTION", "")

        if version == "1.0":
            if connection.lower() != "keep-alive":
                self.connection_close = True

        if version == "1.1":
            # since the server buffers data from chunked transfers and clients
            # never need to deal with chunked requests, downstream clients
            # should not see the HTTP_TRANSFER_ENCODING header; we pop it
            # here
            te = headers.pop("TRANSFER_ENCODING", "")

            # NB: We can not just call bare strip() here because it will also
            # remove other non-printable characters that we explicitly do not
            # want removed so that if someone attempts to smuggle a request
            # with these characters we don't fall prey to it.
            #
            # For example \x85 is stripped by default, but it is not considered
            # valid whitespace to be stripped by RFC7230.
            encodings = [
                encoding.strip(" \t").lower() for encoding in te.split(",")
                if encoding
            ]

            for encoding in encodings:
                # Out of the transfer-codings listed in
                # https://tools.ietf.org/html/rfc7230#section-4 we only support
                # chunked at this time.

                # Note: the identity transfer-coding was removed in RFC7230:
                # https://tools.ietf.org/html/rfc7230#appendix-A.2 and is thus
                # not supported
                if encoding not in {"chunked"}:
                    raise TransferEncodingNotImplemented(
                        "Transfer-Encoding requested is not supported.")

            if encodings and encodings[-1] == "chunked":
                self.chunked = True
                buf = OverflowableBuffer(self.adj.inbuf_overflow)
                self.body_rcv = ChunkedReceiver(buf)
            elif encodings:  # pragma: nocover
                raise TransferEncodingNotImplemented(
                    "Transfer-Encoding requested is not supported.")

            expect = headers.get("EXPECT", "").lower()
            self.expect_continue = expect == "100-continue"
            if connection.lower() == "close":
                self.connection_close = True

        if not self.chunked:
            try:
                cl = int(headers.get("CONTENT_LENGTH", 0))
            except ValueError:
                raise ParsingError("Content-Length is invalid")

            self.content_length = cl
            if cl > 0:
                buf = OverflowableBuffer(self.adj.inbuf_overflow)
                self.body_rcv = FixedStreamReceiver(cl, buf)
Example #9
0
    def parse_header(self, header_plus):
        """
        Parses the header_plus block of text (the headers plus the
        first line of the request).
        """
        index = header_plus.find(b'\n')
        if index >= 0:
            first_line = header_plus[:index].rstrip()
            header = header_plus[index + 1:]
        else:
            first_line = header_plus.rstrip()
            header = b''

        self.first_line = first_line  # for testing

        lines = get_header_lines(header)

        headers = self.headers
        for line in lines:
            index = line.find(b':')
            if index > 0:
                key = line[:index]
                if b'_' in key:
                    continue
                value = line[index + 1:].strip()
                key1 = tostr(key.upper().replace(b'-', b'_'))
                # If a header already exists, we append subsequent values
                # seperated by a comma. Applications already need to handle
                # the comma seperated values, as HTTP front ends might do
                # the concatenation for you (behavior specified in RFC2616).
                try:
                    headers[key1] += tostr(b', ' + value)
                except KeyError:
                    headers[key1] = tostr(value)
            # else there's garbage in the headers?

        # command, uri, version will be bytes
        command, uri, version = crack_first_line(first_line)
        version = tostr(version)
        command = tostr(command)
        self.command = command
        self.version = version
        (self.proxy_scheme, self.proxy_netloc, self.path, self.query,
         self.fragment) = split_uri(uri)
        self.url_scheme = self.adj.url_scheme
        connection = headers.get('CONNECTION', '')

        if version == '1.0':
            if connection.lower() != 'keep-alive':
                self.connection_close = True

        if version == '1.1':
            # since the server buffers data from chunked transfers and clients
            # never need to deal with chunked requests, downstream clients
            # should not see the HTTP_TRANSFER_ENCODING header; we pop it
            # here
            te = headers.pop('TRANSFER_ENCODING', '')
            if te.lower() == 'chunked':
                self.chunked = True
                buf = OverflowableBuffer(self.adj.inbuf_overflow)
                self.body_rcv = ChunkedReceiver(buf)
            expect = headers.get('EXPECT', '').lower()
            self.expect_continue = expect == '100-continue'
            if connection.lower() == 'close':
                self.connection_close = True

        if not self.chunked:
            try:
                cl = int(headers.get('CONTENT_LENGTH', 0))
            except ValueError:
                cl = 0
            self.content_length = cl
            if cl > 0:
                buf = OverflowableBuffer(self.adj.inbuf_overflow)
                self.body_rcv = FixedStreamReceiver(cl, buf)
Example #10
0
 def _makeOne(self, overflow=10):
     from waitress.buffers import OverflowableBuffer
     return OverflowableBuffer(overflow)
    def _makeOne(self, overflow=10):
        from waitress.buffers import OverflowableBuffer

        buf = OverflowableBuffer(overflow)
        self.buffers_to_close.append(buf)
        return buf