Exemple #1
0
    def work(self):
        error_log.info('Entering endless loop of processing sockets.')

        while True:
            sock, address = (None, None)

            # noinspection PyBroadException
            try:
                if not self.connections:
                    self.recv_new_sockets()

                sock, address = self.connections.popleft()
                self.handle_connection(socket=sock, address=address)
            except SignalReceivedException as err:
                if err.signum == ws.signals.SIGTERM:
                    error_log.info('Breaking work() loop due to signal %s.',
                                   ws.signals.Signals(err.signum).name)
                    break
                else:
                    error_log.exception('Unknown signal during work() loop')
            except KeyboardInterrupt:
                break
            except Exception:
                error_log.exception('Exception occurred during work() loop.')
                continue
            finally:
                if sock:
                    sock.shutdown(ws.sockets.SHUT_RDWR, pass_silently=True)
                    sock.close(pass_silently=True)

        # noinspection PyUnreachableCode
        self.cleanup()

        return 0
Exemple #2
0
    def cleanup(self):
        """ Closing listening socket and reap children.

        This method sleeps for the maximum timeout of SIGTERM signal sent to
        a worker. (Worker.sigterm_timeout)
        """
        # don't cleanup workers because their sockets were already closed
        # during the self.fork_worker() call
        if self.execution_context == self.ExecutionContext.worker:
            return

        error_log.info("Closing server's listening socket")
        try:
            self.sock.close()
        except OSError:
            error_log.exception('close() on listening socket failed.')

        ws.signals.signal(ws.signals.SIGCHLD, ws.signals.SIG_DFL)
        active_workers = [
            worker for worker in self.workers.values()
            if worker.pid not in self.reaped_pids
        ]
        for worker in active_workers:
            worker.terminate()

        if active_workers:
            timeout = max(worker.sigterm_timeout for worker in active_workers)
            error_log.info('Waiting %s seconds for children to finish.',
                           timeout)
            time.sleep(timeout)

        for worker in active_workers:
            pid, exit = os.waitpid(worker.pid, os.WNOHANG)
            if not pid:
                worker.kill_if_hanged()
Exemple #3
0
    def __next__(self):
        if self.current_chunk:
            try:
                return next(self.current_chunk)
            except StopIteration:
                pass
        elif self.socket_broke:
            raise StopIteration()

        if self.connected_on + self.connection_timeout < time.time():
            raise ClientSocketException(code='CS_CONNECTION_TIMED_OUT')

        try:
            chunk = self.sock.recv(self.__class__.buffer_size)
        except ws.sockets.TimeoutException as e:
            error_log.warning('Socket timed out while receiving request.')
            raise ClientSocketException(code='CS_PEER_SEND_IS_TOO_SLOW') from e

        error_log.debug3('Read chunk %s', chunk)

        if chunk == b'':
            error_log.info('Socket %d broke', self.sock.fileno())
            self.socket_broke = True
            raise ClientSocketException(code='CS_PEER_NOT_SENDING')

        self.current_chunk = iter(chunk)

        return next(self.current_chunk)
Exemple #4
0
    def listen(self):
        assert all(isinstance(w, WorkerProcess) for w in self.workers.values())
        assert len(self.workers) == self.process_count_limit

        error_log.info('Listening with backlog %s...', self.tcp_backlog_size)
        self.sock.listen(self.tcp_backlog_size)

        while True:
            # TODO add rate limiting on rate of clients sending connections
            # instead of on the HTTP syntax (which will be deferred to workers)
            try:
                sock, address = self.sock.accept()
            except OSError as err:
                error_log.warning('accept() raised ERRNO=%s with MSG=%s',
                                  err.errno, err.strerror)

                # TODO perhaps reopen failed listening sockets.
                assert err.errno not in (errno.EBADF, errno.EFAULT,
                                         errno.EINVAL, errno.ENOTSOCK,
                                         errno.EOPNOTSUPP)
                # don't break the listening loop just because one accept failed
                continue

            self.accepted_connections += 1
            passed = self.distribute_connection(client_socket=sock,
                                                address=address)
            if not passed:
                # TODO fork and reply quickly with a 503
                error_log.warning(
                    'Could not distribute connection %s / %s to '
                    'workers. Dropping connection.', sock, address)

            sock.close(pass_silently=True)

            # duplicate the set so SIGCHLD handler doesn't cause problems
            to_remove = frozenset(self.reaped_pids)

            for pid in to_remove:
                old_worker = self.workers.pop(pid)
                old_worker.close_ipc()
                self.reaped_pids.remove(pid)

            # call outside of loop to avoid a race condition where a
            # worker_process get's forked with a pid in self.reaped_pids
            missing = self.process_count_limit - len(self.workers)
            if missing > 0:
                self.fork_workers(missing)

            for worker_process in self.workers.values():
                worker_process.kill_if_hanged()

            if ws.signals.SIGUSR1 in self.received_signals:
                for pid in self.workers:
                    ws.signals.kill(pid, ws.signals.SIGUSR1)
                self.received_signals.remove(ws.signals.SIGUSR1)
Exemple #5
0
 def reindex_files(self):
     error_log.info('Reindexing files under dir %s', self.document_root)
     file_keys = set()
     for dir_path, dir_names, file_names in os.walk(self.document_root):
         for fn in file_names:
             fp = os.path.join(dir_path, fn)
             stat = os.stat(fp)
             file_keys.add((stat.st_ino, stat.st_dev))
     self.file_keys = frozenset(file_keys)
     error_log.debug3('Indexed file keys are: %s', self.file_keys)
     self.reindex_is_scheduled = False
Exemple #6
0
    def handle_connection(self,
                          socket,
                          address,
                          quick_reply_with=None,
                          ssl_only=config.getboolean('ssl', 'strict')):
        assert isinstance(socket, ws.sockets.Socket)
        assert isinstance(address, collections.Sequence)

        error_log.debug3('handle_connection()')

        if self.rate_controller.is_banned(address[0]):
            socket.close(pass_silently=True)
            return

        wrapped_sock = socket

        if self.ssl_ctx:
            if socket.client_uses_ssl():
                wrapped_sock = ws.sockets.SSLSocket.from_sock(
                    sock=socket, context=self.ssl_ctx, server_side=True)
            elif ssl_only:
                quick_reply_with = hutils.build_response(403)
            else:
                error_log.info('Client on %s / %s does not use SSL/TLS',
                               socket, address)

        conn_worker = ws.cworker.ConnectionWorker(
            sock=wrapped_sock,
            address=address,
            auth_scheme=self.auth_scheme,
            static_files=self.static_files,
            worker_ctx={'request_stats': self.request_stats})
        try:
            with conn_worker:
                conn_worker.process_connection(
                    quick_reply_with=quick_reply_with)
        finally:
            self.rate_controller.record_handled_connection(
                ip_address=address[0], status_codes=conn_worker.status_codes())
            for exchange_stats in conn_worker.generate_stats():
                for stat_name, val in exchange_stats.items():
                    self.request_stats[stat_name]['total'] += val
                    self.request_stats[stat_name]['count'] += 1
            for exchange in conn_worker.exchanges:
                access_log.log(**exchange)
Exemple #7
0
    def cleanup(self):
        error_log.info('Cleaning up... %s total leftover connections.',
                       len(self.connections))
        self.fd_transport.discard()

        for sock, address in self.connections:
            # noinspection PyBroadException
            try:
                res = hutils.build_response(503)
                self.handle_connection(socket=sock,
                                       address=address,
                                       quick_reply_with=res)
            except Exception:
                error_log.exception(
                    'Error while cleaning up client on '
                    '%s / %s', sock, address)
            finally:
                sock.close(pass_silently=True)
Exemple #8
0
def profile(enabled=True):
    if not enabled:
        yield
        return

    error_log.info('Starting profiling.')
    profiler = cProfile.Profile()
    profiler.enable()
    profile_log.profile('Enabled profiling')
    try:
        yield
    finally:
        profiler.disable()
        profile_log.profile('Disabled profiling')
        s = io.StringIO()
        ps = pstats.Stats(profiler, stream=s)
        ps = ps.sort_stats('cumulative')
        ps.print_stats()
        profile_log.profile('cProfiler results:\n %s', s.getvalue())
Exemple #9
0
def sys_has_fork_support():
    error_log.info('Checking if system has fork support by doing a '
                   'dummy fork...')
    try:
        pid = os.fork()
    except OSError as err:
        if err.errno == errno.ENOSYS:
            error_log.critical('System does not have fork() support.')
            return False
        else:
            return True

    if pid == 0:
        # noinspection PyProtectedMember
        os._exit(0)
    else:
        error_log.info('Fork successful. Cleaning up dummy child '
                       '(pid={:d})...'.format(pid))
        os.waitpid(pid, 0)

    return True
Exemple #10
0
    def fork_workers(self, count=1):
        error_log.info('Forking %s workers', self.process_count_limit)
        assert isinstance(count, int)

        for _ in range(count):
            fd_transport = ws.sockets.FDTransport()
            pid = os.fork()
            if pid:
                self.execution_context = self.ExecutionContext.main
                error_log.debug('Forked worker with pid=%s', pid)
                ws.sockets.randomize_ssl_after_fork()
                fd_transport.mode = 'sender'
                wp = WorkerProcess(pid=pid, fd_transport=fd_transport)
                self.workers[wp.pid] = wp
            else:
                self.execution_context = self.ExecutionContext.worker
                ws.signals.reset_handlers(excluding={ws.signals.SIGTERM})
                ws.signals.signal(ws.signals.SIGCHLD, ws.signals.SIG_IGN)
                # noinspection PyBroadException
                try:
                    ws.logs.setup_worker_handlers()
                    fd_transport.mode = 'receiver'
                    for other_worker in self.workers.values():
                        other_worker.close_ipc()
                    self.sock.close()
                    os.close(0)
                    os.close(1)
                    os.close(2)
                    with profile(WORKER_PROFILING_ON):
                        worker = ws.worker.Worker(fd_transport=fd_transport)
                        exit_code = worker.work()
                except BaseException:
                    error_log.exception('Worker failed.')
                    exit_code = 1

                # noinspection PyProtectedMember
                os._exit(exit_code)
Exemple #11
0
def main():
    # Main process should never exit from the server.listen() loop unless
    # an exception occurs.
    fd_limit = subprocess.check_output(['ulimit', '-n'], shell=True)
    error_log.info('ulimit -n is "%s"', fd_limit.decode('ascii'))
    server = Server()
    server.setup()
    with profile(SERVER_PROFILING_ON):
        # noinspection PyBroadException
        try:
            server.listen()
        except SignalReceivedException as err:
            if err.signum == ws.signals.SIGTERM:
                error_log.info('SIGTERM signal broke listen() loop.')
            else:
                error_log.exception('Unknown signal broke listen() loop.')
        except KeyboardInterrupt:
            error_log.info('KeyboardInterrupt broke listen() loop.')
        except BaseException:
            error_log.exception('Unhandled exception broke listen() loop.')
        finally:
            server.cleanup()
Exemple #12
0
def raising_signal_handler(signum, stack_info):
    error_log.info('Received signum %s in raising signal handler.', signum)
    signame = siglib.Signals(signum).name
    raise SignalReceivedException(msg='Received signal {}'.format(signame),
                                  code='DEFAULT_HANDLER_CAUGHT_SIGNAL',
                                  signum=signum)
Exemple #13
0
def kill(pid, signum):
    signame = siglib.Signals(signum).name
    error_log.info('Sending %s to pid %s.', signame, pid)
    os.kill(pid, signum)
Exemple #14
0
def signal(signum, handler):
    signame = siglib.Signals(signum).name
    error_log.info('Setting handler of %s to %s', signame, handler)
    siglib.signal(signum, handler)
Exemple #15
0
def execute_script(request, socket, body_start=b''):
    assert isinstance(socket, (ws.sockets.SSLSocket, ws.sockets.Socket))
    assert isinstance(body_start, (bytes, bytearray))
    assert can_handle_request(request)

    uri = request.request_line.request_target
    cgi_script = find_cgi_script(uri)
    error_log.info('Executing CGI script %s', cgi_script.name)
    script_env = prepare_cgi_script_env(request, socket)
    error_log.debug('CGIScript environment will be: %s', script_env)

    has_body = 'Content-Length' in request.headers

    try:
        proc = subprocess.Popen(
            args=os.path.abspath(cgi_script.script_path),
            env=script_env,
            stdin=subprocess.PIPE if has_body else socket.fileno(),
            stdout=socket.fileno())
    except (OSError, ValueError):
        error_log.exception(
            'Failed to open subprocess for cgi script {}'.format(
                cgi_script.name))
        return ws.http.utils.build_response(500)

    # noinspection PyBroadException
    try:
        if not has_body:
            return

        error_log.debug('Request to CGI has body. Writing to stdin...')
        start = time.time()

        length = int(request.headers['Content-Length'])
        read_bytes = 0
        chunk_size = 4096
        timeout_reached = False
        while read_bytes < length and not timeout_reached:
            if body_start:
                chunk = body_start
                body_start = b''
            else:
                chunk = socket.recv(chunk_size)
                read_bytes += len(chunk)
            if not chunk:
                break

            while chunk:
                avail = select.select([], [proc.stdin], [], cgi_script.timeout)
                _, wlist, _ = avail
                if wlist:
                    written = wlist[0].write(chunk)
                    chunk = chunk[written:]
                else:
                    timeout_reached = True
                    break
            if time.time() - start > cgi_script.timeout:
                timeout_reached = True

        if timeout_reached:
            error_log.warning(
                'CGI script %s took too long to read body. '
                'Leaving process alive but no more data will '
                'be piped to it.', cgi_script)
    except ws.sockets.TimeoutException:
        error_log.warning("Client sent data too slowly - socket timed out.")
    except Exception:
        error_log.exception('Failed to write body to CGI script.')
    finally:
        try:
            proc.stdin.close()
        except OSError as err:
            error_log.warning(
                'Closing CGI stdin pipe failed. ERRNO=%s MSG=%s.', err.errno,
                err.strerror)
Exemple #16
0
 def setup(self):
     """ Bind socket and pre-fork workers. """
     error_log.info('Binding server on %s:%s', self.host, self.port)
     self.sock.bind((self.host, self.port))
     self.fork_workers(self.process_count_limit)
Exemple #17
0
import io
import os
import re

import ws.http.utils
from ws.config import config
from ws.err import *
from ws.http.structs import HTTPResponse, HTTPStatusLine, HTTPHeaders
from ws.logs import error_log

STATIC_ROUTE = config['routes']['static']
STATIC_DIR = os.path.realpath(os.path.abspath(
    config['resources']['static_dir']
))

error_log.info('Configured static route is %s. Directory is %s',
               STATIC_ROUTE, STATIC_DIR)

if not STATIC_ROUTE.endswith('/'):
    raise SysError(msg="routes.static must end with a '/'",
                   code='CONFIG_BAD_STATIC_ROUTE')
if not os.path.isdir(STATIC_DIR):
    raise SysError(msg='resources.static_dir field must be a directory',
                   code='CONFIG_BAD_STATIC_DIR')


class StaticFiles:
    def __init__(self, document_root=STATIC_DIR, route_prefix=STATIC_ROUTE):
        self.document_root = document_root
        self.route_prefix = route_prefix
        self.file_keys = frozenset()
        self.reindex_is_scheduled = False