Example #1
0
 def write_soon(self, data):
     if not self.connected:
         # if the socket is closed then interrupt the task so that it
         # can cleanup possibly before the app_iter is exhausted
         raise ClientDisconnected
     if data:
         # the async mainloop might be popping data off outbuf; we can
         # block here waiting for it because we're in a task thread
         with self.outbuf_lock:
             self._flush_outbufs_below_high_watermark()
             if not self.connected:
                 raise ClientDisconnected
             num_bytes = len(data)
             if data.__class__ is ReadOnlyFileBasedBuffer:
                 # they used wsgi.file_wrapper
                 self.outbufs.append(data)
                 nextbuf = OverflowableBuffer(self.adj.outbuf_overflow)
                 self.outbufs.append(nextbuf)
                 self.current_outbuf_count = 0
             else:
                 if self.current_outbuf_count > self.adj.outbuf_high_watermark:
                     # rotate to a new buffer if the current buffer has hit
                     # the watermark to avoid it growing unbounded
                     nextbuf = OverflowableBuffer(self.adj.outbuf_overflow)
                     self.outbufs.append(nextbuf)
                     self.current_outbuf_count = 0
                 self.outbufs[-1].append(data)
                 self.current_outbuf_count += num_bytes
             self.total_outbufs_len += num_bytes
             if self.total_outbufs_len >= self.adj.send_bytes:
                 self.server.pull_trigger()
         return num_bytes
     return 0
Example #2
0
 def __init__(
     self,
     server,
     sock,
     addr,
     adj,
     map=None,
 ):
     self.server = server
     self.addr = addr
     self.adj = adj
     self.outbuf = OverflowableBuffer(adj.outbuf_overflow)
     self.creation_time = self.last_activity = time.time()
     asyncore.dispatcher.__init__(self, sock, map=map)
Example #3
0
 def write_soon(self, data):
     if not self.connected:
         # if the socket is closed then interrupt the task so that it
         # can cleanup possibly before the app_iter is exhausted
         raise ClientDisconnected
     if data:
         # the async mainloop might be popping data off outbuf; we can
         # block here waiting for it because we're in a task thread
         with self.outbuf_lock:
             # check again after acquiring the lock to ensure we the
             # outbufs are not closed
             if not self.connected:  # pragma: no cover
                 raise ClientDisconnected
             if data.__class__ is ReadOnlyFileBasedBuffer:
                 # they used wsgi.file_wrapper
                 self.outbufs.append(data)
                 nextbuf = OverflowableBuffer(self.adj.outbuf_overflow)
                 self.outbufs.append(nextbuf)
             else:
                 self.outbufs[-1].append(data)
             num_bytes = len(data)
             self.total_outbufs_len += num_bytes
         # XXX We might eventually need to pull the trigger here (to
         # instruct select to stop blocking), but it slows things down so
         # much that I'll hold off for now; "server push" on otherwise
         # unbusy systems may suffer.
         return num_bytes
     return 0
Example #4
0
 def __init__(
         self,
         server,
         sock,
         addr,
         adj,
         map=None,
         ):
     self.server = server
     self.addr = addr
     self.adj = adj
     self.outbuf = OverflowableBuffer(adj.outbuf_overflow)
     self.creation_time = self.last_activity = time.time()
     asyncore.dispatcher.__init__(self, sock, map=map)
Example #5
0
    def __init__(self, server, sock, addr, adj, map=None):
        self.server = server
        self.adj = adj
        self.outbufs = [OverflowableBuffer(adj.outbuf_overflow)]
        self.creation_time = self.last_activity = time.time()
        self.sendbuf_len = sock.getsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF)

        # task_lock used to push/pop requests
        self.task_lock = threading.Lock()
        # outbuf_lock used to access any outbuf (expected to use an RLock)
        self.outbuf_lock = threading.Condition()

        wasyncore.dispatcher.__init__(self, sock, map=map)

        # Don't let wasyncore.dispatcher throttle self.addr on us.
        self.addr = addr
Example #6
0
 def write_soon(self, data):
     if data:
         # the async mainloop might be popping data off outbuf; we can
         # block here waiting for it because we're in a task thread
         with self.outbuf_lock:
             if data.__class__ is ReadOnlyFileBasedBuffer:
                 # they used wsgi.file_wrapper
                 self.outbufs.append(data)
                 nextbuf = OverflowableBuffer(self.adj.outbuf_overflow)
                 self.outbufs.append(nextbuf)
             else:
                 self.outbufs[-1].append(data)
         # XXX We might eventually need to pull the trigger here (to
         # instruct select to stop blocking), but it slows things down so
         # much that I'll hold off for now; "server push" on otherwise
         # unbusy systems may suffer.
         return len(data)
     return 0
Example #7
0
    def __init__(
        self,
        server,
        sock,
        addr,
        adj,
        map=None,
    ):
        self.server = server
        self.addr = addr
        self.adj = adj
        self.outbufs = [OverflowableBuffer(adj.outbuf_overflow)]
        self.creation_time = self.last_activity = time.time()

        # task_lock used to push/pop requests
        self.task_lock = thread.allocate_lock()
        # outbuf_lock used to access any outbuf
        self.outbuf_lock = thread.allocate_lock()

        asyncore.dispatcher.__init__(self, sock, map=map)
Example #8
0
    def __init__(
        self,
        server,
        sock,
        addr,
        adj,
        map=None,
    ):
        self.server = server
        self.adj = adj
        self.outbufs = [OverflowableBuffer(adj.outbuf_overflow)]
        self.total_outbufs_len = 0
        self.creation_time = self.last_activity = time.time()

        # task_lock used to push/pop requests
        self.task_lock = threading.Lock()
        # outbuf_lock used to access any outbuf
        self.outbuf_lock = threading.RLock()

        wasyncore.dispatcher.__init__(self, sock, map=map)

        # Don't let wasyncore.dispatcher throttle self.addr on us.
        self.addr = addr
Example #9
0
    def parse_header(self, header_plus):
        """
        Parses the header_plus block of text (the headers plus the
        first line of the request).
        """
        index = header_plus.find(b"\r\n")
        if index >= 0:
            first_line = header_plus[:index].rstrip()
            header = header_plus[index + 2:]
        else:
            raise ParsingError("HTTP message header invalid")

        if b"\r" in first_line or b"\n" in first_line:
            raise ParsingError("Bare CR or LF found in HTTP message")

        self.first_line = first_line  # for testing

        lines = get_header_lines(header)

        headers = self.headers
        for line in lines:
            header = HEADER_FIELD.match(line)

            if not header:
                raise ParsingError("Invalid header")

            key, value = header.group("name", "value")

            if b"_" in key:
                # TODO(xistence): Should we drop this request instead?
                continue

            # Only strip off whitespace that is considered valid whitespace by
            # RFC7230, don't strip the rest
            value = value.strip(b" \t")
            key1 = tostr(key.upper().replace(b"-", b"_"))
            # If a header already exists, we append subsequent values
            # seperated by a comma. Applications already need to handle
            # the comma seperated values, as HTTP front ends might do
            # the concatenation for you (behavior specified in RFC2616).
            try:
                headers[key1] += tostr(b", " + value)
            except KeyError:
                headers[key1] = tostr(value)

        # command, uri, version will be bytes
        command, uri, version = crack_first_line(first_line)
        version = tostr(version)
        command = tostr(command)
        self.command = command
        self.version = version
        (
            self.proxy_scheme,
            self.proxy_netloc,
            self.path,
            self.query,
            self.fragment,
        ) = split_uri(uri)
        self.url_scheme = self.adj.url_scheme
        connection = headers.get("CONNECTION", "")

        if version == "1.0":
            if connection.lower() != "keep-alive":
                self.connection_close = True

        if version == "1.1":
            # since the server buffers data from chunked transfers and clients
            # never need to deal with chunked requests, downstream clients
            # should not see the HTTP_TRANSFER_ENCODING header; we pop it
            # here
            te = headers.pop("TRANSFER_ENCODING", "")

            # NB: We can not just call bare strip() here because it will also
            # remove other non-printable characters that we explicitly do not
            # want removed so that if someone attempts to smuggle a request
            # with these characters we don't fall prey to it.
            #
            # For example \x85 is stripped by default, but it is not considered
            # valid whitespace to be stripped by RFC7230.
            encodings = [
                encoding.strip(" \t").lower() for encoding in te.split(",")
                if encoding
            ]

            for encoding in encodings:
                # Out of the transfer-codings listed in
                # https://tools.ietf.org/html/rfc7230#section-4 we only support
                # chunked at this time.

                # Note: the identity transfer-coding was removed in RFC7230:
                # https://tools.ietf.org/html/rfc7230#appendix-A.2 and is thus
                # not supported
                if encoding not in {"chunked"}:
                    raise TransferEncodingNotImplemented(
                        "Transfer-Encoding requested is not supported.")

            if encodings and encodings[-1] == "chunked":
                self.chunked = True
                buf = OverflowableBuffer(self.adj.inbuf_overflow)
                self.body_rcv = ChunkedReceiver(buf)
            elif encodings:  # pragma: nocover
                raise TransferEncodingNotImplemented(
                    "Transfer-Encoding requested is not supported.")

            expect = headers.get("EXPECT", "").lower()
            self.expect_continue = expect == "100-continue"
            if connection.lower() == "close":
                self.connection_close = True

        if not self.chunked:
            try:
                cl = int(headers.get("CONTENT_LENGTH", 0))
            except ValueError:
                raise ParsingError("Content-Length is invalid")

            self.content_length = cl
            if cl > 0:
                buf = OverflowableBuffer(self.adj.inbuf_overflow)
                self.body_rcv = FixedStreamReceiver(cl, buf)
Example #10
0
    def parse_header(self, header_plus):
        """
        Parses the header_plus block of text (the headers plus the
        first line of the request).
        """
        index = header_plus.find(b'\n')
        if index >= 0:
            first_line = header_plus[:index].rstrip()
            header = header_plus[index + 1:]
        else:
            first_line = header_plus.rstrip()
            header = b''

        self.first_line = first_line  # for testing

        lines = get_header_lines(header)

        headers = self.headers
        for line in lines:
            index = line.find(b':')
            if index > 0:
                key = line[:index]
                if b'_' in key:
                    continue
                value = line[index + 1:].strip()
                key1 = tostr(key.upper().replace(b'-', b'_'))
                # If a header already exists, we append subsequent values
                # seperated by a comma. Applications already need to handle
                # the comma seperated values, as HTTP front ends might do
                # the concatenation for you (behavior specified in RFC2616).
                try:
                    headers[key1] += tostr(b', ' + value)
                except KeyError:
                    headers[key1] = tostr(value)
            # else there's garbage in the headers?

        # command, uri, version will be bytes
        command, uri, version = crack_first_line(first_line)
        version = tostr(version)
        command = tostr(command)
        self.command = command
        self.version = version
        (self.proxy_scheme, self.proxy_netloc, self.path, self.query,
         self.fragment) = split_uri(uri)
        self.url_scheme = self.adj.url_scheme
        connection = headers.get('CONNECTION', '')

        if version == '1.0':
            if connection.lower() != 'keep-alive':
                self.connection_close = True

        if version == '1.1':
            # since the server buffers data from chunked transfers and clients
            # never need to deal with chunked requests, downstream clients
            # should not see the HTTP_TRANSFER_ENCODING header; we pop it
            # here
            te = headers.pop('TRANSFER_ENCODING', '')
            if te.lower() == 'chunked':
                self.chunked = True
                buf = OverflowableBuffer(self.adj.inbuf_overflow)
                self.body_rcv = ChunkedReceiver(buf)
            expect = headers.get('EXPECT', '').lower()
            self.expect_continue = expect == '100-continue'
            if connection.lower() == 'close':
                self.connection_close = True

        if not self.chunked:
            try:
                cl = int(headers.get('CONTENT_LENGTH', 0))
            except ValueError:
                cl = 0
            self.content_length = cl
            if cl > 0:
                buf = OverflowableBuffer(self.adj.inbuf_overflow)
                self.body_rcv = FixedStreamReceiver(cl, buf)
Example #11
0
 def _makeOne(self, overflow=10):
     from waitress.buffers import OverflowableBuffer
     return OverflowableBuffer(overflow)
    def _makeOne(self, overflow=10):
        from waitress.buffers import OverflowableBuffer

        buf = OverflowableBuffer(overflow)
        self.buffers_to_close.append(buf)
        return buf
Example #13
0
class HTTPChannel(logging_dispatcher, object):
    """Channel that switches between asynchronous and synchronous mode.

    Set self.requests = [somerequest] before using a channel in a thread other
    than the thread handling the main loop.

    Set self.requests = [] to give the channel back to the thread handling
    the main loop.
    """
    task_class = WSGITask
    error_task_class = ErrorTask
    parser_class = HTTPRequestParser

    request = None  # A request parser instance
    last_activity = 0  # Time of last activity
    will_close = False  # set to True to close the socket.
    requests = ()  # currently pending requests
    sent_continue = False  # used as a latch after sending 100 continue
    task_lock = thread.allocate_lock()  # lock used to push/pop requests
    force_flush = False  # indicates a need to flush the outbuf

    #
    # ASYNCHRONOUS METHODS (including __init__)
    #

    def __init__(
        self,
        server,
        sock,
        addr,
        adj,
        map=None,
    ):
        self.server = server
        self.addr = addr
        self.adj = adj
        self.outbuf = OverflowableBuffer(adj.outbuf_overflow)
        self.creation_time = self.last_activity = time.time()
        asyncore.dispatcher.__init__(self, sock, map=map)

    def writable(self):
        # if there's data in the out buffer or we've been instructed to close
        # the channel (possibly by our server maintenance logic), run
        # handle_write
        return bool(self.outbuf) or self.will_close

    def handle_write(self):
        # Precondition: there's data in the out buffer to be sent, or
        # there's a pending will_close request
        if not self.connected:
            # we dont want to close the channel twice
            return

        # try to flush any pending output
        if not self.requests:
            # 1. There are no running tasks, so we don't need to try to lock
            #    the outbuf before sending
            # 2. The data in the out buffer should be sent as soon as possible
            #    because it's either data left over from task output
            #    or a 100 Continue line sent within "received".
            flush = self._flush_some
        elif self.force_flush:
            # 1. There's a running task, so we need to try to lock
            #    the outbuf before sending
            # 2. This is the last chunk sent by the Nth of M tasks in a
            #    sequence on this channel, so flush it regardless of whether
            #    it's >= self.adj.send_bytes.  We need to do this now, or it
            #    won't get done.
            flush = self._flush_some_if_lockable
            self.force_flush = False
        elif (len(self.outbuf) >= self.adj.send_bytes):
            # 1. There's a running task, so we need to try to lock
            #    the outbuf before sending
            # 2. Only try to send if the data in the out buffer is larger
            #    than self.adj_bytes to avoid TCP fragmentation
            flush = self._flush_some_if_lockable
            self.force_flush = False
        else:
            # 1. There's not enough data in the out buffer to bother to send
            #    right now.
            flush = None

        if flush:
            try:
                flush()
            except socket.error:
                if self.adj.log_socket_errors:
                    self.logger.exception('Socket error')
                self.will_close = True

        if self.will_close:
            self.handle_close()

    def readable(self):
        # We might want to create a new task.  We can only do this if:
        # 1. We're not already about to close the connection.
        # 2. There's no already currently running task(s).
        # 3. There's no data in the output buffer that needs to be sent
        #    before we potentially create a new task.
        return not (self.will_close or self.requests or self.outbuf)

    def handle_read(self):
        try:
            data = self.recv(self.adj.recv_bytes)
        except socket.error:
            if self.adj.log_socket_errors:
                self.logger.exception('Socket error')
            self.handle_close()
            return
        if data:
            self.last_activity = time.time()
            self.received(data)

    def received(self, data):
        """
        Receives input asynchronously and assigns a task to the channel.
        """
        # Preconditions: there's no task(s) already running
        request = self.request
        requests = []

        if not data:
            return False

        while data:
            if request is None:
                request = self.parser_class(self.adj)
            n = request.received(data)
            if request.expect_continue and request.headers_finished:
                # guaranteed by parser to be a 1.1 request
                request.expect_continue = False
                if not self.sent_continue:
                    # there's no current task, so we don't need to try to
                    # lock the outbuf to append to it.
                    self.outbuf.append(b'HTTP/1.1 100 Continue\r\n\r\n')
                    self.sent_expect_continue = True
                    self._flush_some()
                    request.completed = False
            if request.completed:
                # The request (with the body) is ready to use.
                self.request = None
                if not request.empty:
                    requests.append(request)
                request = None
            else:
                self.request = request
            if n >= len(data):
                break
            data = data[n:]

        if requests:
            self.requests = requests
            self.server.add_task(self)

        return True

    def _flush_some_if_lockable(self):
        # Since our task may be appending to the outbuf, we try to acquire
        # the lock, but we don't block if we can't.
        outbuf = self.outbuf
        locked = outbuf.lock.acquire(0)
        if locked:
            try:
                self._flush_some()
            finally:
                outbuf.lock.release()

    def _flush_some(self):
        # Send as much data as possible to our client
        outbuf = self.outbuf
        outbuflen = len(outbuf)
        sent = 0
        while outbuflen > 0:
            chunk = outbuf.get(self.adj.send_bytes)
            num_sent = self.send(chunk)
            if num_sent:
                outbuf.skip(num_sent, True)
                outbuflen -= num_sent
                sent += num_sent
            else:
                break
        if sent:
            self.last_activity = time.time()
            return True
        return False

    def handle_close(self):
        self.connected = False
        asyncore.dispatcher.close(self)

    def add_channel(self, map=None):
        """See asyncore.dispatcher

        This hook keeps track of opened channels.
        """
        asyncore.dispatcher.add_channel(self, map)
        self.server.active_channels[self._fileno] = self

    def del_channel(self, map=None):
        """See asyncore.dispatcher

        This hook keeps track of closed channels.
        """
        fd = self._fileno  # next line sets this to None
        asyncore.dispatcher.del_channel(self, map)
        ac = self.server.active_channels
        if fd in ac:
            del ac[fd]

    #
    # SYNCHRONOUS METHODS
    #

    def write_soon(self, data):
        if data:
            # the async mainloop might be popping data off outbuf; we can
            # block here waiting for it because we're in a thread
            with self.outbuf.lock:
                self.outbuf.append(data)
            # XXX We might eventually need to pull the trigger here (to
            # instruct select to stop blocking), but it slows things down so
            # much that I'll hold off for now; "server push" on otherwise
            # unbusy systems may suffer.
            return len(data)
        return 0

    def service(self):
        """Execute all pending requests """
        with self.task_lock:
            while self.requests:
                request = self.requests[0]
                if request.error:
                    task = self.error_task_class(self, request)
                else:
                    task = self.task_class(self, request)
                try:
                    task.service()
                except:
                    self.logger.exception('Exception when serving %s' %
                                          task.request.path)
                    if not task.wrote_header:
                        if self.adj.expose_tracebacks:
                            body = traceback.format_exc()
                        else:
                            body = ('The server encountered an unexpected '
                                    'internal server error')
                        request = self.parser_class(self.adj)
                        request.error = InternalServerError(body)
                        task = self.error_task_class(self, request)
                        task.service()  # must not fail
                    else:
                        task.close_on_finish = True
                # we cannot allow self.requests to drop to empty til
                # here; otherwise the mainloop gets confused
                if task.close_on_finish:
                    self.will_close = True
                    self.requests = []
                else:
                    self.requests.pop(0)

        self.force_flush = True
        self.server.pull_trigger()
        self.last_activity = time.time()

    def cancel(self):
        """ Cancels all pending requests """
        self.force_flush = True
        self.last_activity = time.time()
        self.requests = []

    def defer(self):
        pass
Example #14
0
class HTTPChannel(logging_dispatcher, object):
    """Channel that switches between asynchronous and synchronous mode.

    Set self.requests = [somerequest] before using a channel in a thread other
    than the thread handling the main loop.

    Set self.requests = [] to give the channel back to the thread handling
    the main loop.
    """
    task_class = WSGITask
    error_task_class = ErrorTask
    parser_class = HTTPRequestParser

    request = None               # A request parser instance
    last_activity = 0            # Time of last activity
    will_close = False           # set to True to close the socket.
    requests = ()                # currently pending requests
    sent_continue = False        # used as a latch after sending 100 continue
    task_lock = thread.allocate_lock()  # lock used to push/pop requests
    force_flush = False          # indicates a need to flush the outbuf

    #
    # ASYNCHRONOUS METHODS (including __init__)
    #

    def __init__(
            self,
            server,
            sock,
            addr,
            adj,
            map=None,
            ):
        self.server = server
        self.addr = addr
        self.adj = adj
        self.outbuf = OverflowableBuffer(adj.outbuf_overflow)
        self.creation_time = self.last_activity = time.time()
        asyncore.dispatcher.__init__(self, sock, map=map)

    def writable(self):
        # if there's data in the out buffer or we've been instructed to close
        # the channel (possibly by our server maintenance logic), run
        # handle_write
        return bool(self.outbuf) or self.will_close

    def handle_write(self):
        # Precondition: there's data in the out buffer to be sent, or
        # there's a pending will_close request
        if not self.connected:
            # we dont want to close the channel twice
            return

        # try to flush any pending output
        if not self.requests:
            # 1. There are no running tasks, so we don't need to try to lock
            #    the outbuf before sending
            # 2. The data in the out buffer should be sent as soon as possible
            #    because it's either data left over from task output
            #    or a 100 Continue line sent within "received".
            flush = self._flush_some
        elif self.force_flush:
            # 1. There's a running task, so we need to try to lock
            #    the outbuf before sending
            # 2. This is the last chunk sent by the Nth of M tasks in a
            #    sequence on this channel, so flush it regardless of whether
            #    it's >= self.adj.send_bytes.  We need to do this now, or it
            #    won't get done.
            flush = self._flush_some_if_lockable
            self.force_flush = False
        elif (len(self.outbuf) >= self.adj.send_bytes):
            # 1. There's a running task, so we need to try to lock
            #    the outbuf before sending
            # 2. Only try to send if the data in the out buffer is larger
            #    than self.adj_bytes to avoid TCP fragmentation
            flush = self._flush_some_if_lockable
            self.force_flush = False
        else:
            # 1. There's not enough data in the out buffer to bother to send
            #    right now.
            flush = None

        if flush:
            try:
                flush()
            except socket.error:
                if self.adj.log_socket_errors:
                    self.logger.exception('Socket error')
                self.will_close = True

        if self.will_close:
            self.handle_close()

    def readable(self):
        # We might want to create a new task.  We can only do this if:
        # 1. We're not already about to close the connection.
        # 2. There's no already currently running task(s).
        # 3. There's no data in the output buffer that needs to be sent
        #    before we potentially create a new task.
        return not (self.will_close or self.requests or self.outbuf)

    def handle_read(self):
        try:
            data = self.recv(self.adj.recv_bytes)
        except socket.error:
            if self.adj.log_socket_errors:
                self.logger.exception('Socket error')
            self.handle_close()
            return
        if data:
            self.last_activity = time.time()
            self.received(data)

    def received(self, data):
        """
        Receives input asynchronously and assigns a task to the channel.
        """
        # Preconditions: there's no task(s) already running
        request = self.request
        requests = []

        if not data:
            return False

        while data:
            if request is None:
                request = self.parser_class(self.adj)
            n = request.received(data)
            if request.expect_continue and request.headers_finished:
                # guaranteed by parser to be a 1.1 request
                request.expect_continue = False
                if not self.sent_continue:
                    # there's no current task, so we don't need to try to
                    # lock the outbuf to append to it.
                    self.outbuf.append(b'HTTP/1.1 100 Continue\r\n\r\n')
                    self.sent_expect_continue = True
                    self._flush_some()
                    request.completed = False
            if request.completed:
                # The request (with the body) is ready to use.
                self.request = None
                if not request.empty:
                    requests.append(request)
                request = None
            else:
                self.request = request
            if n >= len(data):
                break
            data = data[n:]

        if requests:
            self.requests = requests
            self.server.add_task(self)

        return True

    def _flush_some_if_lockable(self):
        # Since our task may be appending to the outbuf, we try to acquire
        # the lock, but we don't block if we can't.
        outbuf = self.outbuf
        locked = outbuf.lock.acquire(0)
        if locked:
            try:
                self._flush_some()
            finally:
                outbuf.lock.release()

    def _flush_some(self):
        # Send as much data as possible to our client
        outbuf = self.outbuf
        outbuflen = len(outbuf)
        sent = 0
        while outbuflen > 0:
            chunk = outbuf.get(self.adj.send_bytes)
            num_sent = self.send(chunk)
            if num_sent:
                outbuf.skip(num_sent, True)
                outbuflen -= num_sent
                sent += num_sent
            else:
                break
        if sent:
            self.last_activity = time.time()
            return True
        return False

    def handle_close(self):
        self.connected = False
        asyncore.dispatcher.close(self)

    def add_channel(self, map=None):
        """See asyncore.dispatcher

        This hook keeps track of opened channels.
        """
        asyncore.dispatcher.add_channel(self, map)
        self.server.active_channels[self._fileno] = self

    def del_channel(self, map=None):
        """See asyncore.dispatcher

        This hook keeps track of closed channels.
        """
        fd = self._fileno # next line sets this to None
        asyncore.dispatcher.del_channel(self, map)
        ac = self.server.active_channels
        if fd in ac:
            del ac[fd]

    #
    # SYNCHRONOUS METHODS
    #

    def write_soon(self, data):
        if data:
            # the async mainloop might be popping data off outbuf; we can
            # block here waiting for it because we're in a thread
            with self.outbuf.lock:
                self.outbuf.append(data)
            # XXX We might eventually need to pull the trigger here (to
            # instruct select to stop blocking), but it slows things down so
            # much that I'll hold off for now; "server push" on otherwise
            # unbusy systems may suffer.
            return len(data)
        return 0

    def service(self):
        """Execute all pending requests """
        with self.task_lock:
            while self.requests:
                request = self.requests[0]
                if request.error:
                    task = self.error_task_class(self, request)
                else:
                    task = self.task_class(self, request)
                try:
                    task.service()
                except:
                    self.logger.exception('Exception when serving %s' %
                                          task.request.path)
                    if not task.wrote_header:
                        if self.adj.expose_tracebacks:
                            body = traceback.format_exc()
                        else:
                            body = ('The server encountered an unexpected '
                                    'internal server error')
                        request = self.parser_class(self.adj)
                        request.error = InternalServerError(body)
                        task = self.error_task_class(self, request)
                        task.service() # must not fail
                    else:
                        task.close_on_finish = True
                # we cannot allow self.requests to drop to empty til
                # here; otherwise the mainloop gets confused
                if task.close_on_finish:
                    self.will_close = True
                    self.requests = []
                else:
                    self.requests.pop(0)

        self.force_flush = True
        self.server.pull_trigger()
        self.last_activity = time.time()

    def cancel(self):
        """ Cancels all pending requests """
        self.force_flush = True
        self.last_activity = time.time()
        self.requests = []

    def defer(self):
        pass