Ejemplo n.º 1
0
 def test_stop(self):
     """ Test the stopping of the main loop thread. """
     from supvisors.mainloop import SupvisorsMainLoop
     main_loop = SupvisorsMainLoop(self.supvisors)
     # try to stop main loop before it is started
     with self.assertRaises(RuntimeError):
         main_loop.stop()
     # stop main loop after it is started
     main_loop.loop = True
     with patch.object(main_loop, 'join') as mocked_join:
         main_loop.stop()
         self.assertFalse(main_loop.loop)
         self.assertEqual(1, mocked_join.call_count)
Ejemplo n.º 2
0
 def test_stop(self):
     """ Test the stopping of the main loop thread. """
     from supvisors.mainloop import SupvisorsMainLoop
     main_loop = SupvisorsMainLoop(self.supvisors)
     with patch.object(main_loop, 'join') as mocked_join:
         # try to stop main loop before it is started
         main_loop.stop()
         self.assertFalse(main_loop.stop_event.is_set())
         self.assertEqual(0, mocked_join.call_count)
         # stop main loop when alive
         with patch.object(main_loop, 'is_alive', return_value=True):
             main_loop.stop()
             self.assertTrue(main_loop.stop_event.is_set())
             self.assertEqual(1, mocked_join.call_count)
Ejemplo n.º 3
0
class SupervisorListener(object):
    """ This class subscribes directly to the internal Supervisor events.
    These events are published to all Supvisors instances.

    Attributes are:

        - supvisors: a reference to the Supvisors context,
        - address: the address name where this process is running,
        - main_loop: the Supvisors' event thread,
        - publisher: the ZeroMQ socket used to publish Supervisor events
        to all Supvisors threads.
    """
    def __init__(self, supvisors):
        """ Initialization of the attributes. """
        self.supvisors = supvisors
        # shortcuts for source code readability
        supvisors_short_cuts(self,
                             ['fsm', 'info_source', 'logger', 'statistician'])
        # test if statistics collector can be created for local host
        try:
            from supvisors.statscollector import instant_statistics
            self.collector = instant_statistics
        except ImportError:
            self.logger.warn('psutil not installed')
            self.logger.warn('this Supvisors will not publish statistics')
            self.collector = None
        # other attributes
        self.address = self.supvisors.address_mapper.local_address
        self.publisher = None
        self.main_loop = None
        # subscribe to internal events
        events.subscribe(events.SupervisorRunningEvent, self.on_running)
        events.subscribe(events.SupervisorStoppingEvent, self.on_stopping)
        events.subscribe(events.ProcessStateEvent, self.on_process)
        events.subscribe(events.Tick5Event, self.on_tick)
        events.subscribe(events.RemoteCommunicationEvent, self.on_remote_event)

    def on_running(self, event):
        """ Called when Supervisor is RUNNING.
        This method start the Supvisors main loop. """
        self.logger.info('local supervisord is RUNNING')
        # replace the default handler for web ui
        self.info_source.replace_default_handler()
        # create zmq sockets
        self.supvisors.zmq = SupervisorZmq(self.supvisors)
        # keep a reference to the internal events publisher
        self.publisher = self.supvisors.zmq.internal_publisher
        # start the main loop
        # env is needed to create XML-RPC proxy
        self.main_loop = SupvisorsMainLoop(self.supvisors)
        self.main_loop.start()

    def on_stopping(self, event):
        """ Called when Supervisor is STOPPING.
        This method stops the Supvisors main loop. """
        self.logger.warn('local supervisord is STOPPING')
        # force Supervisor to close HTTP servers
        # this will prevent any pending XML-RPC request to block the main loop
        self.info_source.close_httpservers()
        # stop the main loop
        self.logger.info('request to stop main loop')
        self.main_loop.stop()
        self.logger.info('end of main loop')
        # close zmq sockets
        self.supvisors.zmq.close()
        # unsubscribe from events
        events.clear()
        # finally, close logger
        self.logger.close()

    def on_process(self, event):
        """ Called when a ProcessEvent is sent by the local Supervisor.
        The event is published to all Supvisors instances. """
        event_name = events.getEventNameByType(event.__class__)
        self.logger.debug('got Process event from supervisord: {} {}'.format(
            event_name, event))
        # create payload from event
        payload = {
            'name': event.process.config.name,
            'group': event.process.group.config.name,
            'state': ProcessStates._from_string(event_name.split('_')[-1]),
            'now': int(time.time()),
            'pid': event.process.pid,
            'expected': event.expected
        }
        self.logger.debug('payload={}'.format(payload))
        self.publisher.send_process_event(payload)

    def on_tick(self, event):
        """ Called when a TickEvent is notified.
        The event is published to all Supvisors instances.
        Then statistics are published and periodic task is triggered. """
        self.logger.debug('got Tick event from supervisord: {}'.format(event))
        payload = {'when': event.when}
        self.publisher.send_tick_event(payload)
        # get and publish statistics at tick time (optional)
        if self.collector:
            status = self.supvisors.context.addresses[self.address]
            self.publisher.send_statistics(
                self.collector(status.pid_processes()))
        # periodic task
        addresses = self.fsm.on_timer_event()
        # pushes isolated addresses to main loop
        self.supvisors.zmq.pusher.send_isolate_addresses(addresses)

    def on_remote_event(self, event):
        """ Called when a RemoteCommunicationEvent is notified.
        This is used to sequence the events received from the Supvisors thread
        with the other events handled by the local Supervisor. """
        self.logger.debug(
            'got Remote event from supervisord: {}'.format(event))
        if event.type == RemoteCommEvents.SUPVISORS_AUTH:
            self.authorization(event.data)
        elif event.type == RemoteCommEvents.SUPVISORS_EVENT:
            self.unstack_event(event.data)
        elif event.type == RemoteCommEvents.SUPVISORS_INFO:
            self.unstack_info(event.data)

    def unstack_event(self, message):
        """ Unstack and process one event from the event queue. """
        event_type, event_address, event_data = json.loads(message)
        if event_type == InternalEventHeaders.TICK:
            self.logger.trace('got tick event from {}: {}'.format(
                event_address, event_data))
            self.fsm.on_tick_event(event_address, event_data)
        elif event_type == InternalEventHeaders.PROCESS:
            self.logger.trace('got process event from {}: {}'.format(
                event_address, event_data))
            self.fsm.on_process_event(event_address, event_data)
        elif event_type == InternalEventHeaders.STATISTICS:
            # this Supvisors could handle statistics
            # even if psutil is not installed
            self.logger.trace('got statistics event from {}: {}'.format(
                event_address, event_data))
            self.statistician.push_statistics(event_address, event_data)

    def unstack_info(self, message):
        """ Unstack the process info received. """
        # unstack the queue for process info
        address_name, info = json.loads(message)
        self.logger.trace(
            'got process info event from {}'.format(address_name))
        self.fsm.on_process_info(address_name, info)

    def authorization(self, data):
        """ Extract authorization and address from data and process event. """
        self.logger.trace('got authorization event: {}'.format(data))
        # split the line received
        address_name, authorized = tuple(x.split(':')[1] for x in data.split())
        self.fsm.on_authorization(address_name, boolean(authorized))

    def force_process_fatal(self, namespec):
        """ Publishes a fake process event showing a FATAL state for
        the process. """
        self.force_process_state(namespec, ProcessStates.FATAL)

    def force_process_unknown(self, namespec):
        """ Publishes a fake process event showing an UNKNOWN state for
        the process. """
        self.force_process_state(namespec, ProcessStates.UNKNOWN)

    def force_process_state(self, namespec, state):
        """ Publishes a fake process event showing a state for the process. """
        application_name, process_name = split_namespec(namespec)
        # create payload from event
        payload = {
            'processname': process_name,
            'groupname': application_name,
            'state': state,
            'now': int(time.time()),
            'pid': 0,
            'expected': False
        }
        self.logger.debug('payload={}'.format(payload))
        self.publisher.send_process_event(payload)
Ejemplo n.º 4
0
class SupervisorListener(object):
    """ This class subscribes directly to the internal Supervisor events.
    These events are published to all Supvisors instances.

    Attributes are:

        - supvisors: a reference to the Supvisors context,
        - address: the address name where this process is running,
        - main_loop: the Supvisors' event thread,
        - publisher: the ZeroMQ socket used to publish Supervisor events
        to all Supvisors threads.
    """

    def __init__(self, supvisors):
        """ Initialization of the attributes. """
        self.supvisors = supvisors
        # shortcuts for source code readability
        supvisors_short_cuts(self, ['fsm', 'info_source',
                                    'logger', 'statistician'])
        # test if statistics collector can be created for local host
        try:
            from supvisors.statscollector import instant_statistics
            self.collector = instant_statistics
        except ImportError:
            self.logger.warn('psutil not installed')
            self.logger.warn('this Supvisors will not publish statistics')
            self.collector = None
        # other attributes
        self.address = self.supvisors.address_mapper.local_address
        self.publisher = None
        self.main_loop = None
        # subscribe to internal events
        events.subscribe(events.SupervisorRunningEvent, self.on_running)
        events.subscribe(events.SupervisorStoppingEvent, self.on_stopping)
        events.subscribe(events.ProcessStateEvent, self.on_process)
        events.subscribe(events.Tick5Event, self.on_tick)
        events.subscribe(events.RemoteCommunicationEvent, self.on_remote_event)

    def on_running(self, event):
        """ Called when Supervisor is RUNNING.
        This method start the Supvisors main loop. """
        self.logger.info('local supervisord is RUNNING')
        # replace the default handler for web ui
        self.info_source.replace_default_handler()
        # create zmq sockets
        self.supvisors.zmq = SupervisorZmq(self.supvisors)
        # keep a reference to the internal events publisher
        self.publisher = self.supvisors.zmq.internal_publisher
        # start the main loop
        # env is needed to create XML-RPC proxy
        self.main_loop = SupvisorsMainLoop(self.supvisors)
        self.main_loop.start()


    def on_stopping(self, event):
        """ Called when Supervisor is STOPPING.
        This method stops the Supvisors main loop. """
        self.logger.warn('local supervisord is STOPPING')
        # force Supervisor to close HTTP servers
        # this will prevent any pending XML-RPC request to block the main loop
        self.info_source.close_httpservers()
        # stop the main loop
        self.logger.info('request to stop main loop')
        self.main_loop.stop()
        self.logger.info('end of main loop')
        # close zmq sockets
        self.supvisors.zmq.close()
        # unsubscribe from events
        events.clear()
        # finally, close logger
        self.logger.close()


    def on_process(self, event):
        """ Called when a ProcessEvent is sent by the local Supervisor.
        The event is published to all Supvisors instances. """
        event_name = events.getEventNameByType(event.__class__)
        self.logger.debug('got Process event from supervisord: {} {}'.format(
            event_name, event))
        # create payload from event
        payload = {'name': event.process.config.name,
            'group': event.process.group.config.name,
            'state': ProcessStates._from_string(event_name.split('_')[-1]),
            'now': int(time.time()),
            'pid': event.process.pid,
            'expected': event.expected}
        self.logger.debug('payload={}'.format(payload))
        self.publisher.send_process_event(payload)

    def on_tick(self, event):
        """ Called when a TickEvent is notified.
        The event is published to all Supvisors instances.
        Then statistics are published and periodic task is triggered. """
        self.logger.debug('got Tick event from supervisord: {}'.format(event))
        payload = {'when': event.when}
        self.publisher.send_tick_event(payload)
        # get and publish statistics at tick time (optional)
        if self.collector:
            status = self.supvisors.context.addresses[self.address]
            self.publisher.send_statistics(
                self.collector(status.pid_processes()))
        # periodic task
        addresses = self.fsm.on_timer_event()
        # pushes isolated addresses to main loop
        self.supvisors.zmq.pusher.send_isolate_addresses(addresses)

    def on_remote_event(self, event):
        """ Called when a RemoteCommunicationEvent is notified.
        This is used to sequence the events received from the Supvisors thread
        with the other events handled by the local Supervisor. """
        self.logger.debug('got Remote event from supervisord: {}'.format(event))
        if event.type == RemoteCommEvents.SUPVISORS_AUTH:
            self.authorization(event.data)
        elif event.type == RemoteCommEvents.SUPVISORS_EVENT:
            self.unstack_event(event.data)
        elif event.type == RemoteCommEvents.SUPVISORS_INFO:
            self.unstack_info(event.data)

    def unstack_event(self, message):
        """ Unstack and process one event from the event queue. """
        event_type, event_address, event_data = json.loads(message)
        if event_type == InternalEventHeaders.TICK:
            self.logger.trace('got tick event from {}: {}'.format(
                event_address, event_data))
            self.fsm.on_tick_event(event_address, event_data)
        elif event_type == InternalEventHeaders.PROCESS:
            self.logger.trace('got process event from {}: {}'.format(
                event_address, event_data))
            self.fsm.on_process_event(event_address, event_data)
        elif event_type == InternalEventHeaders.STATISTICS:
            # this Supvisors could handle statistics
            # even if psutil is not installed
            self.logger.trace('got statistics event from {}: {}'.format(
                event_address, event_data))
            self.statistician.push_statistics(event_address, event_data)

    def unstack_info(self, message):
        """ Unstack the process info received. """
        # unstack the queue for process info
        address_name, info = json.loads(message)
        self.logger.trace('got process info event from {}'.format(
            address_name))
        self.fsm.on_process_info(address_name, info)

    def authorization(self, data):
        """ Extract authorization and address from data and process event. """
        self.logger.trace('got authorization event: {}'.format(data))
        # split the line received
        address_name, authorized = tuple(x.split(':')[1] for x in data.split())
        self.fsm.on_authorization(address_name, boolean(authorized))

    def force_process_fatal(self, namespec):
        """ Publishes a fake process event showing a FATAL state for
        the process. """
        self.force_process_state(namespec, ProcessStates.FATAL)

    def force_process_unknown(self, namespec):
        """ Publishes a fake process event showing an UNKNOWN state for
        the process. """
        self.force_process_state(namespec, ProcessStates.UNKNOWN)

    def force_process_state(self, namespec, state):
        """ Publishes a fake process event showing a state for the process. """
        application_name, process_name = split_namespec(namespec)
        # create payload from event
        payload = {'processname': process_name,
            'groupname': application_name,
            'state': state,
            'now': int(time.time()),
            'pid': 0,
            'expected': False}
        self.logger.debug('payload={}'.format(payload))
        self.publisher.send_process_event(payload)