Exemplo n.º 1
0
    def _create_sockets(self, servers_config):

        """ Creates server objects (TcpServer or TcpSSLServer). """

        self.poller = Poller()

        for config in servers_config:
            address = (config.get('host', None), config.get('port', 0))
            if 'ssl' in config:
                ssl_config = config['ssl']
                server = TcpSSLServer(address, certfile=ssl_config.get('certfile'), keyfile=ssl_config.get('keyfile'),
                    ca_certs=ssl_config.get('ca_certs'), cert_reqs=ssl.CERT_REQUIRED)
            else:
                server = TcpServer(address)
            self.servers.append(server)
            self.protocols[server] = config.get('protocol', 'jsonrpc')
            self.poller.register(server)
            self.poller.bind(server, 'newclient', self.new_client)
Exemplo n.º 2
0
    def __call__(self, pipe, servers, queue, process_name_format, shared_clients_count, **kwargs):

        """ This method is called in a new process.

        :param pipe: The pipe used to communicate with the main process.
        :param servers: A list of listening servers sockets (with inherited file descriptors).
        :param queue: The queue that will be used to send log messages to the main process.
        """

        self._process_name_format = process_name_format
        self._pipe = pipe
        self._servers = servers
        self._queue = queue
        self._shared_clients_count = shared_clients_count
        self._clients_poller = Poller()
        self._clients_handlers = {} # Mapping socket -> ClientHandler

        self._init_logging(queue)
        self._update_process_name()

        logger.debug('Started and initialized the worker process')

        while True:
            try:
                # Here pipe.poll() is non-blocking, whereas poller.poll() is.
                # This means that we could wait "WORKER_POLL_BLOCKING_DURATION"
                # seconds between commands handling (including new client handling).
                if self._pipe.poll():
                    self.trigger_command(pipe.recv())
                self._clients_poller.poll(WORKER_POLL_BLOCKING_DURATION)
            except KeyboardInterrupt:
                break
            except:
                logger.exception('An error occured in a worker')

        return 0
Exemplo n.º 3
0
class Worker:

    """ This class represents a worker process.

    The :meth:`__call__` method will be run in a new process after :func:`os.fork`, and will wait
    for commands sent by the main process. The process runs in an infinite loop which does the following :

        #. Checks if some data is available in its pipe. The pipe is used to communicate with the main process,
            thanks to :class:`Command` objects (pickled).
        #. If a command is available, calls the ``trigger_command_<command_name>`` method.

    Basically, the main command is ``new_client``. When a worker process recieved it, it will call
    :meth:`BaseServer.accept` on the specified socket, and starts a new thread to handle the client.

    The main process may have created more than one server sockets (encrypted or not, for different protocols, etc),
    so the ``new_client`` command is sent with an argument : the server socket to use to accept the connection.

    Because we can't pass a file descriptor in a pipe (anyway, on Windows), we send the position (an integer)
    of the server in the server list that has been passed to the worker :meth:``__call__`` method.
    """

    def __init__(self):

        # The process will start a thread per client. The pipe is not thread-safe, so this lock will be used
        # in send() and recv() methods, to protect the pipe access between clients threads.
        self._pipe_lock = threading.RLock()

        # This lock will protect the list of active threads.
        self._threads_lock = threading.RLock()

    def _update_process_name(self):
        """ Updates the process title, if setprotitle is installed. """
        if setproctitle:
            setproctitle.setproctitle(self.process_name)

    @property
    def process_name(self):

        """ If the module ``setproctitle`` is installed, Kiwi will use it to set a custom name in the process.

        You can custimize this name thanks to the configuration variable WORKERS_PROCESS_NAME. The following
        variables are available and will be replaced :

            * ``%(pyver)s`` : The python version, for example ``3.2``.
            * ``%(pyexec)s`` : The name of the python executable python3.2
            * ``%(pyscript)s`` : The name of the script that was started.
            * ``%(nb_clients)s`` : The number of connected client in this process.
            * ``%(worker)%s`` : The internal name of the worker (usually #1, #2, etc)

        :return: The formatted string
        """

        name = self._process_name_format % {
            'pyver' : platform.python_version(),
            'pyexec' : os.path.basename(sys.executable),
            'pyscript' : sys.argv[0],
            'worker' : self.process.name,
            'nb_clients' : self._shared_clients_count.value,
        }
        return name

    def _init_logging(self, queue):

        """ Init logging with :func:`logging.config.dictConfig`.

        We use a :class:`QueueHandler` to send logging message to the parent process via a pipe. It's the parent
        process which handles the logging and the dispatching of messages.
        """

        config = {
            'version': 1,
            'disable_existing_loggers' : True,
            'handlers': {
                'queue': {
                    'class': 'logging.handlers.QueueHandler',
                    'queue': queue,
                },
            },
            'root': {
                'level': logging.DEBUG,
                'handlers': ['queue']
            },
            'loggers' : {},
        }

        # We re-activate the logging configuration in the worker,
        # but with only one handler : the queue handler.
        for logger, logger_config in kiwi.settings.LOGGING['loggers'].items():
            config['loggers'][logger] = logger_config
            config['loggers'][logger]['handlers'] = []

        logging.config.dictConfig(config)

    def trigger_command(self, command):

        """ Handle the recieved command, and call the ``trigger_<command>`` method.

        The trigger method is called with the arguments passed to the command transparently, so passing a keyword
        argument to the command will make it be passed to the function.
        """

        method_name = 'trigger_command_' + command.name
        method = getattr(self, method_name, None)

        if method and callable(method):
            try:
                if 'returns' in command.kwargs:
                    del command.kwargs['returns']
                result = method(*command.args, **command.kwargs)
            except Exception as error:
                if command.returns:
                    self._pipe.send(error)
                raise
            else:
                if command.returns:
                    self._pipe.send(result)
        else:
            if command.returns:
                self._pipe.send(ValueError('Invalid command name: %s' % command.name))
            logger.warning('Recieved an unknown command: %s', command.name)

    def trigger_command_new_client(self, server_index, protocol):

        """ Called when a ``new_client`` command is recieved.

        The ``server_index`` argument is the index of the server in the list of servers passed as process
        arguments in ``__call__`` that we have to use to accept the connection.

        The ``protocol`` argument is the name of the protocol that must be used to talk with this client.

        :return: ``True`` if the connection was accepted.
        """

        server = self._servers[server_index]
        socket = server.accept()

        logger.info('Accepted new connection from %s:%s', socket.remote_ip, socket.remote_port)
        self._clients_poller.register(socket)
        self._clients_poller.bind(socket, 'data', self.trigger_client_data)
        self._clients_poller.bind(socket, 'closed', self.trigger_client_close)
        self._clients_handlers[socket] = ClientHandler(socket, protocol)
        self._shared_clients_count.value += 1
        self._update_process_name()

        return True

    def trigger_client_data(self, socket, data):

        """ This method is called when some data is available on a client socket. """

        self._clients_handlers[socket].handle_data(data)

    def trigger_client_close(self, socket):

        """ This method is called when a client socket is closed. """

        logger.info('Client closed connection: %s:%s', socket.remote_ip, socket.remote_port)

        self._shared_clients_count.value -= 1
        self._clients_poller.unregister(socket)
        self._clients_handlers[socket].handle_close()
        self._clients_handlers.pop(socket)
        self._update_process_name()

    def send(self, command_name, *args, returns=False, **kwargs):

        """ Sends the specified command to the worker.

        If ``returns`` is ``True``, this method won't return until the worker sent us the result of the command. This
        method can be called proc the main process to send data to the worker, or from the worker to send data to the
        main process, it works for both.

        This method is thread-safe, and must be used instead of directly writing to the pipe.
        """

        with self._pipe_lock:
            self._pipe.send(Command(command_name, *args, returns=returns, **kwargs))
            if returns:
                result = self.recv()
                if isinstance(result, Exception):
                    raise result
                return result

    def recv(self):
        """ Recieve data from the pipe in a thread-safe way. """
        with self._pipe_lock:
            return self._pipe.recv()

    def __call__(self, pipe, servers, queue, process_name_format, shared_clients_count, **kwargs):

        """ This method is called in a new process.

        :param pipe: The pipe used to communicate with the main process.
        :param servers: A list of listening servers sockets (with inherited file descriptors).
        :param queue: The queue that will be used to send log messages to the main process.
        """

        self._process_name_format = process_name_format
        self._pipe = pipe
        self._servers = servers
        self._queue = queue
        self._shared_clients_count = shared_clients_count
        self._clients_poller = Poller()
        self._clients_handlers = {} # Mapping socket -> ClientHandler

        self._init_logging(queue)
        self._update_process_name()

        logger.debug('Started and initialized the worker process')

        while True:
            try:
                # Here pipe.poll() is non-blocking, whereas poller.poll() is.
                # This means that we could wait "WORKER_POLL_BLOCKING_DURATION"
                # seconds between commands handling (including new client handling).
                if self._pipe.poll():
                    self.trigger_command(pipe.recv())
                self._clients_poller.poll(WORKER_POLL_BLOCKING_DURATION)
            except KeyboardInterrupt:
                break
            except:
                logger.exception('An error occured in a worker')

        return 0
Exemplo n.º 4
0
class Manager:

    """ This class manage a pool of workers.

    Basically, it is a wrapper for starting / stopping workers. It also handles communications between
    the workers thanks to pipes, and dispatch recieved commands.

    Because we use multiple processes, we can't share global variables. For example, if a client needs to see
    the list of currently connected users, it will send a request to the server. But the request will be handled
    by one process, and this process is not aware of connected clients in others processes.

    The solution is to use the manager. The worker will send a command to the manager, for example a ``get_connected_clients``
    command. The manager will then send a response to the worker, which will forward it to the client.

    Summary : workers never talk together, they always talk to the main process (via the manager), and the manager
    will do the appropriate task, for which it might have to send command to other workers.

    A manager also runs a :class:`QueueListener` in a separate thread to handle workers logging messages.

    :param servers_config: A tuple of dictionary containing the configuration of the server sockets. This dict is the one
        defined in the ``settings.py`` file, by default.
    :param workers_count: The number of worker processes to create.
    """

    def __init__(self, servers_config, workers_count=None):

        self.servers = []
        self.protocols = {} # Mapping server socket -> protocol, based on configuration
        self.workers = []
        self.workers_count = workers_count or multiprocessing.cpu_count() * 2

        self._create_sockets(servers_config)
        self._logging_queue = multiprocessing.Queue()

    def _create_sockets(self, servers_config):

        """ Creates server objects (TcpServer or TcpSSLServer). """

        self.poller = Poller()

        for config in servers_config:
            address = (config.get('host', None), config.get('port', 0))
            if 'ssl' in config:
                ssl_config = config['ssl']
                server = TcpSSLServer(address, certfile=ssl_config.get('certfile'), keyfile=ssl_config.get('keyfile'),
                    ca_certs=ssl_config.get('ca_certs'), cert_reqs=ssl.CERT_REQUIRED)
            else:
                server = TcpServer(address)
            self.servers.append(server)
            self.protocols[server] = config.get('protocol', 'jsonrpc')
            self.poller.register(server)
            self.poller.bind(server, 'newclient', self.new_client)

    def _create_worker(self):

        """ Creates a new worker and returns it.

        The created worker have some extra attributes :
            * ``pipe`` : The pipe that will used to send/recieve commands to/from the worker.
            * ``process`` : The :class:`multiprocessing.Process` object associated.
        """

        our_pipe, worker_pipe = multiprocessing.Pipe()

        worker = Worker()
        worker.clients = multiprocessing.Value('I', 0)
        worker._pipe = our_pipe # In child process, worker._pipe will be equal to worker_pipe !
        worker.process = multiprocessing.Process(target=worker, name='#%d' % (len(self.workers)+1),
            args=(worker_pipe, self.servers, self._logging_queue, kiwi.settings.WORKERS_PROCESS_NAME, worker.clients))
        # Settings daemon to True will force the process to exit if the main process exits.
        worker.process.daemon = True

        return worker

    def new_client(self, server):

        """ Method called by the poller when a new client is available on a socket. """

        workers = sorted(self.workers, key=lambda w: w.clients.value)

        for worker in workers:
            if worker.clients.value >= kiwi.settings.MAXIMUM_CONNECTIONS_PER_PROCESS:
                continue
            break
        else:
            client_socket = server.accept()
            client_socket.close()
            logger.error('Refused connection from %s:%s : too much connections.' % client_socket.remote_addres[:2])
            return

        # We send a new_client command to the worker process, passing it the server to use to accept() the connection.
        # We then wait for the worker process to send us a result (returns=True), to be sure that the connection
        # has been accepted before continuing to poll, to avoid select() returning again the same client.
        worker.send('new_client', server_index=self.servers.index(server), protocol=self.protocols[server], returns=True)

    def trigger_commands(self):

        """ Checks for available commands on worker pipes and execute them. """

        for worker in self.workers:

            if not worker._pipe.poll():
                continue

            command = worker._pipe.recv()
            method_name = 'trigger_command_' + command.name
            method = getattr(self, method_name, None)

            if method and callable(method):
                try:
                    if 'returns' in command.kwargs:
                        del command.kwargs['returns']
                    result = method(*command.args, **command.kwargs)
                except Exception as error:
                    if command.returns:
                        worker._pipe.send(error)
                    raise
                else:
                    if command.returns:
                        worker._pipe.send(result)
            else:
                if command.returns:
                    worker._pipe.send(ValueError('Invalid command name: %s' % command.name))
                logger.warning('Recieved an unknown command: %s', command.name)

    def spawn(self, count=None):
        """ Creates all the processes.

        If ``count`` is set, only ``count`` number of new processes will be spawned. Please note that this
        method does not check if processes already exists, it just spawns new processes.
        """
        if not count:
            count = self.workers_count
        for i in range(count):
            worker = self._create_worker()
            worker.process.start()
            self.workers.append(worker)

    def stop(self, worker=None):
        """ Stops one or all workers, if ``worker`` is ``None``. """
        workers = worker and [worker] or self.workers
        for worker in workers:
            os.kill(worker.process.pid, signal.SIGINT)

    def run(self):
        # This thread will handle logging messages
        logging_listener = logging.handlers.QueueListener(self._logging_queue, QueueListenerHandler())
        logging_listener.start()
        while True:
            try:
                self.poller.poll(MAIN_POLL_BLOCKING_DURATION)
            except KeyboardInterrupt:
                logger.info('Recieved a Ctrl+C/SIGINT, stopping...')
                break
            except:
                logger.exception('Error in main process loop')
        for worker in self.workers:
            worker.process.join()
        logging_listener.stop()