def test_main_loop_adds_pubsub_messages_to_event_queues(mp_fixture):
    """
    PUBSUB messages should be added to event queues.
    """
    mock_ctx = mock.MagicMock()

    event_q = MPQueue(ctx=mp_fixture)
    event_q.put(EventMessage("TEST", "PUBSUB", msg="1"))
    event_q.put(EventMessage("TEST", "PUBSUB", msg="2"))
    event_q.put(EventMessage("TEST", "PUBSUB", msg="3"))
    event_q.put(EventMessage("TEST", "END", msg="foo"))
    mock_ctx.event_queue = event_q

    # one processing loop before shutdown in set, at which point the loop
    # should exit with three messages still in the event queue
    mock_ctx.shutdown_event.is_set.return_value = False

    q1 = MPQueue(ctx=mp_fixture)
    q2 = MPQueue(ctx=mp_fixture)

    main_loop(mock_ctx, [q1, q2])

    assert q1.safe_close() == 3
    assert q2.safe_close() == 3

    event_q.safe_close()
예제 #2
0
    def test_script_worker_calls_correct_function_on_message_type(
            self, mock_env_fn, mock_load_fn, mock_run_fn, caplog):
        mp = multiprocessing.get_context()
        script = GitScript("git://test.py", GitArgs())
        env_evt = EventMessage("test", "ENV", script)
        load_evt = EventMessage("test", "LOAD", script)
        run_evt = EventMessage("test", "RUN", ("init", None))
        work_q = MPQueue(ctx=mp)
        work_q.put(env_evt)
        work_q.put(load_evt)
        work_q.put(run_evt)

        _proc_worker_wrapper_helper(mp,
                                    caplog,
                                    ScriptWorker,
                                    args=(work_q, ),
                                    expect_shutdown_evt=True)
        env_args, _ = mock_env_fn.call_args
        assert env_args[0].msg_type == env_evt.msg_type
        mock_env_fn.assert_called_once()

        load_args, _ = mock_load_fn.call_args
        assert load_args[0].msg_type == load_evt.msg_type
        mock_load_fn.assert_called_once()

        run_args, _ = mock_run_fn.call_args
        assert run_args[0].msg_type == run_evt.msg_type
        mock_run_fn.assert_called_once()
예제 #3
0
    def create(self, script: ExecutableScript, *,
               init_args: ProcedureInput) -> int:
        """
        Create a new Procedure that will, when executed, run the target Python
        script.

        Objects that can only be shared through inheritance, such as multiprocessing
        object, can be shared by providing them as init_args here. These arguments will
        be provided to the init function in the user script, where present.

        :param script: script URI, e.g. 'file://myscript.py'
        :param init_args: script initialisation arguments
        :return:
        """
        pid = next(self._pid_counter)
        LOGGER.debug("Creating ScriptWorker #%s for %s", pid, script)

        # msg box for messages from manager to child, like 'run main function'
        work_q = self.ctx.MPQueue()

        # prime the work queue with an initial message instructing it to set up environment,
        # load the child script and run init function of the script
        msg_src = self.__class__.__name__

        env = None
        if isinstance(script, GitScript) and script.create_env:
            env = self.em.create_env(script.git_args)
            env_msg = EventMessage(msg_src=msg_src, msg_type="ENV", msg=script)
            work_q.safe_put(env_msg)

            self.environments[pid] = env

        load_msg = EventMessage(msg_src=msg_src, msg_type="LOAD", msg=script)
        work_q.safe_put(load_msg)
        # ... and also to execute init
        init_msg = EventMessage(msg_src=msg_src,
                                msg_type="RUN",
                                msg=("init", None))
        work_q.safe_put(init_msg)

        self.script_queues[pid] = work_q
        self.states[pid] = ProcedureState.CREATING

        # Runtime error will be raised if Proc creation fails
        # TODO close and delete work_q, etc. on failure?
        procedure = self.ctx.Proc(
            str(pid),
            ScriptWorker,
            work_q,
            *init_args.args,
            scan_counter=self._scan_id,
            environment=env,
            **init_args.kwargs,
        )

        # Proc creation was successful. Can store procedure and continue.
        self.procedures[pid] = procedure

        return pid
    def test_external_messages_are_published_locally(self, mp_fixture, caplog):
        """
        Verify that message event is published if the event originates from an
        external source.
        """
        pubsub.pub.unsubAll()
        helper = PubSubHelper()

        work_q = MPQueue(ctx=mp_fixture)
        msg = EventMessage(
            "EXTERNAL COMPONENT",
            "PUBSUB",
            dict(topic=topics.request.procedure.list,
                 kwargs={"request_id": "123"}),
        )
        work_q.put(msg)

        with mock.patch.object(pubsub.pub, "unsubAll", return_value=[]):
            _proc_worker_wrapper_helper(
                mp_fixture,
                caplog,
                EventBusWorker,
                args=(work_q, ),
                expect_shutdown_evt=True,
            )

        assert topics.request.procedure.list in helper.topic_list
        work_q.safe_close()
예제 #5
0
    def republish(self, topic: pub.Topic = pub.AUTO_TOPIC, **kwargs) -> None:
        """
        Republish a local pypubsub event over the inter-process mptools event
        bus.

        :param topic: message topic, set automatically by pypubsub
        :param kwargs: any metadata associated with pypubsub message
        :return:
        """
        # avoid infinite loop - do not republish external events
        try:
            msg_src = kwargs.pop("msg_src")
        except KeyError:
            # No message source = virgin event published on pypubsub
            msg_src = self.name

        # ... but if this is a local message (message source = us), send it
        # out to the main queue and hence on to other EventBusWorkers
        if msg_src == self.name:
            # Convert pypubsub event to the equivalent mptools EventMessage
            msg = EventMessage(self.name, "PUBSUB",
                               dict(topic=topic.name, kwargs=kwargs))

            # not that this is a blocking put. If the queue is full, this call
            # will block until the queue has room to accept the message
            self.log(logging.DEBUG, "Republishing local pypubsub event: %s",
                     msg)
            self.event_q.put(msg)
def test_main_loop_ends_on_fatal_message(mp_fixture):
    """
    Main loop should terminate when fatal messsage is received.
    """
    mock_ctx = mock.MagicMock()

    event_q = MPQueue(ctx=mp_fixture)
    event_q.put(EventMessage("TEST", "FATAL", msg="foo"))
    event_q.put(EventMessage("TEST", "END", msg="foo"))
    mock_ctx.event_queue = event_q

    mock_ctx.shutdown_event.is_set.return_value = False

    main_loop(mock_ctx, [])

    assert event_q.safe_close() == 1
예제 #7
0
    def test_internal_messages_not_republished(self, mp, caplog):
        """
        Verify that message event is not published if the event originates from
        an internal source.
        """
        helper = PubSubHelper()

        work_q = MPQueue(ctx=mp)
        # TEST is the default component name assigned in
        # _proc_worker_wrapper_helper. This message should not be published to pypubsub
        msg = EventMessage(
            "TEST",
            "PUBSUB",
            dict(topic=topics.request.procedure.list,
                 kwargs={"request_id": "123"}),
        )
        work_q.put(msg)

        _proc_worker_wrapper_helper(mp,
                                    caplog,
                                    ScriptWorker,
                                    args=(work_q, ),
                                    expect_shutdown_evt=True)

        msgs_on_topic = helper.messages_on_topic(topics.request.procedure.list)
        assert len(msgs_on_topic) == 0
def assert_command_request_and_response(mp_fixture, caplog, mock_method,
                                        request_topic, response_topic, cmd):
    pubsub.pub.unsubAll()
    helper = PubSubHelper()

    work_q = MPQueue(ctx=mp_fixture)
    msg = EventMessage(
        "UNITTEST",
        "PUBSUB",
        dict(topic=request_topic, kwargs={
            "request_id": "1234",
            "cmd": cmd
        }),
    )
    work_q.put(msg)
    event = mp_fixture.Event()

    mock_method.side_effect = partial(set_event, event)
    with mock.patch.object(pubsub.pub, "unsubAll", return_value=[]):
        _proc_worker_wrapper_helper(
            mp_fixture,
            caplog,
            ScriptExecutionServiceWorker,
            args=(work_q, mp_fixture),
            expect_shutdown_evt=True,
        )

    assert event.is_set()
    mock_method.assert_called_once()
    assert mock_method.call_args[0][0] == cmd

    assert helper.topic_list == [request_topic, response_topic]

    work_q.safe_close()
예제 #9
0
    def __init__(
        self,
        name: str,
        startup_event: multiprocessing.Event,
        shutdown_event: multiprocessing.Event,
        event_q: mptools.MPQueue,
        work_q: mptools.MPQueue,
        *args,
        scan_counter: Optional[multiprocessing.Value] = None,
        environment: Optional[Environment] = None,
        **kwargs,
    ):
        # Message is rolled by hand and sent via a direct message to the
        # ProcessManager as we want to announce CREATING at the earliest
        # possible moment; we can't announce via pypubsub just yet as the
        # intraprocess<->interprocess republish function is not registered
        # till later in the construction process
        msg = EventMessage(
            msg_src=name,
            msg_type="PUBSUB",
            msg=dict(
                topic="procedure.lifecycle.statechange",
                kwargs=dict(new_state=ProcedureState.CREATING),
            ),
        )
        event_q.put(msg)

        self.name = name

        self._scan_counter = scan_counter
        self._environment = environment
        self.work_q = work_q

        # user_module will be set on LOAD message
        self.user_module = None

        super().__init__(name, startup_event, shutdown_event, event_q, *args,
                         **kwargs)

        # AT2-591. The forked process inherits all subscriptions of the
        # parent, which we do not want to maintain in this child process. This
        # could be done before super().__init__() at the expense of losing the
        # log message, as logging is set up in the super constructor
        unsubscribed = pub.unsubAll()
        self.log(
            logging.DEBUG,
            "Unsubscribed %s pypubsub subscriptions in Procedure #%s (PID=%s)",
            len(unsubscribed),
            self.name,
            os.getpid(),
        )

        # Register a callback function so that all pypubsub messages broadcast
        # in this process are also queued for distribution to remote processes
        pub.subscribe(self.republish, pub.ALL_TOPICS)
    def test_internal_messages_not_republished(self, mp_fixture, caplog):
        """
        Verify that message event is not published if the event originates from
        an internal source.
        """
        pubsub.pub.unsubAll()
        helper = PubSubHelper()

        work_q = MPQueue(ctx=mp_fixture)
        # TEST is the default component name assigned in
        # _proc_worker_wrapper_helper. This message should be ignored.
        msg = EventMessage(
            "TEST",
            "PUBSUB",
            dict(topic=topics.request.procedure.list,
                 kwargs={"request_id": "123"}),
        )

        work_q.put(msg)
        # But coming from NONTEST, this message should be republished.
        msg = EventMessage(
            "NONTEST",
            "PUBSUB",
            dict(topic=topics.request.procedure.list,
                 kwargs={"request_id": "456"}),
        )
        work_q.put(msg)

        with mock.patch.object(pubsub.pub, "unsubAll", return_value=[]):
            _proc_worker_wrapper_helper(
                mp_fixture,
                caplog,
                EventBusWorker,
                args=(work_q, ),
                expect_shutdown_evt=True,
            )

        assert len(helper.messages) == 1
        assert helper.messages[0][1] == dict(msg_src="NONTEST",
                                             request_id="456")

        work_q.safe_close()
def test_main_loop_ends_when_shutdown_event_is_set(mp_fixture):
    """
    Main loop should terminate when shutdown event is set.
    """
    mock_ctx = mock.MagicMock()

    event_q = MPQueue(ctx=mp_fixture)
    event_q.put(EventMessage("TEST", "PUBSUB", msg="foo"))
    event_q.put(EventMessage("TEST", "PUBSUB", msg="foo"))
    event_q.put(EventMessage("TEST", "PUBSUB", msg="foo"))
    event_q.put(EventMessage("TEST", "END", msg="foo"))
    mock_ctx.event_queue = event_q

    # one processing loop before shutdown in set, at which point the loop
    # should exit with two messages still in the event queue
    mock_ctx.shutdown_event.is_set.side_effect = [False, False, True]

    main_loop(mock_ctx, [])

    assert event_q.safe_close() == 2
    def test_callback_is_invoked_when_pubsub_message_received(self):
        """
        Confirm that a callback function sees pubsub messages received by the
        SES's ProcessManager.
        """
        non_pubsub_msg = EventMessage("TEST", "foo", "bar")
        pubsub_msg = EventMessage(
            "EXTERNAL COMPONENT",
            "PUBSUB",
            dict(topic=topics.request.procedure.list,
                 kwargs={"request_id": "123"}),
        )

        ses = None
        cb_received = []
        cb_called = multiprocessing.Event()

        def cb(event):
            cb_received.append(event)
            cb_called.set()

        try:
            ses = ScriptExecutionService(on_pubsub=[cb])
            ses._process_manager.ctx.event_queue.put(non_pubsub_msg)
            ses._process_manager.ctx.event_queue.put(pubsub_msg)
            ses._process_manager.ctx.event_queue.put(non_pubsub_msg)
            cb_called.wait(0.1)

        finally:
            if ses is not None:
                ses.shutdown()

        assert cb_called.is_set()
        assert len(cb_received) == 1
        # can't do direct eq comparison as queue item is pickled copy, hence
        # object ID is different
        received: EventMessage = cb_received.pop()
        assert received.id == pubsub_msg.id and received.msg == pubsub_msg.msg
예제 #13
0
    def test_callback_sees_received_pubsub_messages(self):
        """
        Callbacks passed to ProcessManager constructor should be given each
        MPTools message received.
        """
        non_pubsub_msg = EventMessage("TEST", "foo", "bar")
        pubsub_msg = EventMessage(
            "EXTERNAL COMPONENT",
            "PUBSUB",
            dict(topic=topics.request.procedure.list,
                 kwargs={"request_id": "123"}),
        )

        cb_received = []
        cb_called = multiprocessing.Event()

        def cb(event):
            cb_received.append(event)
            cb_called.set()

        manager = None
        try:
            manager = ProcessManager(on_pubsub=[cb])
            manager.ctx.event_queue.put(non_pubsub_msg)
            manager.ctx.event_queue.put(pubsub_msg)
            manager.ctx.event_queue.put(non_pubsub_msg)
            cb_called.wait(0.1)
        finally:
            if manager is not None:
                manager.shutdown()

        assert cb_called.is_set()
        assert len(cb_received) == 1
        # can't do direct eq comparison as queue item is pickled copy, hence
        # object ID is different
        received: EventMessage = cb_received.pop()
        assert received.id == pubsub_msg.id and received.msg == pubsub_msg.msg
예제 #14
0
    def _on_fatal(self, event: EventMessage) -> None:
        # event needs to be added to the queue so that other listeners
        # can process it, otherwise we'd call update_state_do
        # self._update_state_and_cleanup(int(event.msg_src), ProcedureState.FAILED)
        self.ctx.event_queue.put(
            EventMessage(
                msg_src=event.msg_src,
                msg_type="PUBSUB",
                msg=dict(
                    topic="procedure.lifecycle.statechange",
                    kwargs=dict(new_state=ProcedureState.FAILED),
                ),
            ))

        # announce stacktrace for any interested parties
        self.ctx.event_queue.put(
            EventMessage(
                msg_src=event.msg_src,
                msg_type="PUBSUB",
                msg=dict(
                    topic="procedure.lifecycle.stacktrace",
                    kwargs=dict(stacktrace=event.msg),
                ),
            ))
예제 #15
0
    def test_on_load(self, mock_module_fn, mp, caplog):
        """ """
        mock_module_fn.side_effect = MagicMock()
        script = GitScript("git://test.py", GitArgs())
        evt = EventMessage("test", "LOAD", script)

        work_q = MPQueue(ctx=mp)
        work_q.put(evt)

        _proc_worker_wrapper_helper(mp,
                                    caplog,
                                    ScriptWorker,
                                    args=(work_q, ),
                                    expect_shutdown_evt=True)
        assert mock_module_fn.called_once_with(script)
def test_main_loop_ignores_and_logs_events_of_unknown_types(mp_fixture):
    """
    Loop should log events it doesn't know how to handle.
    """
    mock_ctx = mock.MagicMock()

    event_q = MPQueue(ctx=mp_fixture)
    event_q.put(EventMessage("TEST", "FOO", msg="1"))
    mock_ctx.event_queue = event_q

    # one processing loop before shutdown in set, at which point the loop
    # should exit with three messages still in the event queue
    mock_ctx.shutdown_event.is_set.side_effect = [False, True]

    main_loop(mock_ctx, [])

    event_q.safe_close()
    mock_ctx.log.assert_called_once()
    assert "Unknown Event" in mock_ctx.log.call_args[0][1]
예제 #17
0
    def stop(self, process_id: int) -> None:
        """
        Stop a running Procedure.

        This stops execution of a currently running script.

        :param process_id: ID of Procedure to stop
        :return:
        """
        try:
            procedure = self.procedures[process_id]
            state = self.states[process_id]
        except KeyError as exc:
            raise ValueError(f"Process {process_id} not found") from exc

        stoppable_states = [
            ProcedureState.IDLE,
            ProcedureState.READY,
            ProcedureState.RUNNING,
            ProcedureState.LOADING,
        ]
        if state not in stoppable_states:
            raise ValueError(
                f"Cannot stop PID {process_id} with state {state.name}")

        if procedure.proc.is_alive():
            LOGGER.debug("Stopping Procedure %d", process_id)
            terminated = procedure.terminate(max_retries=3, timeout=0.1)
            final_state = (ProcedureState.STOPPED
                           if terminated else ProcedureState.UNKNOWN)

            msg = EventMessage(
                msg_src=str(process_id),
                msg_type="PUBSUB",
                msg=dict(
                    topic="procedure.lifecycle.statechange",
                    kwargs=dict(new_state=final_state),
                ),
            )
            self.ctx.event_queue.put(msg)

            # join any potentially zombie process, allowing it to clean up
            multiprocessing.active_children()
    def test_handles_request_to_list_invalid_id(self, mp_fixture, caplog):
        """
        The ValueError raised when SES.summarise is given an invalid PID should be handled.
        """
        pubsub.pub.unsubAll()
        helper = PubSubHelper()

        work_q = MPQueue(ctx=mp_fixture)
        msg = EventMessage(
            "TEST_SUMMARY",
            "PUBSUB",
            dict(topic=topics.request.procedure.list,
                 kwargs={"request_id": "123"}),
        )
        work_q.put(msg)

        with mock.patch(
                "ska_oso_oet.procedure.application.main.ScriptExecutionService.summarise"
        ) as mock_cls:
            with mock.patch.object(pubsub.pub, "unsubAll", return_value=[]):
                mock_cls.side_effect = ValueError
                _proc_worker_wrapper_helper(
                    mp_fixture,
                    caplog,
                    ScriptExecutionServiceWorker,
                    args=(work_q, mp_fixture),
                    expect_shutdown_evt=True,
                )

        mock_cls.assert_called_once()

        assert helper.topic_list == [
            topics.request.procedure.list,  # list requested
            topics.procedure.pool.list,  # response published
        ]
        assert helper.messages[1][1] == dict(msg_src="TEST",
                                             request_id="123",
                                             result=[])

        work_q.safe_close()
def test_main_loop_checks_shutdown_event_after_every_queue_get(mp_fixture):
    """
    Loop should regularly check shutdown event,
    """
    mock_ctx = mock.MagicMock()

    event_q = MPQueue(ctx=mp_fixture)
    mock_ctx.event_queue.safe_get.side_effect = [
        False,
        False,
        EventMessage("TEST", "END", msg="foo"),
    ]

    # loop won't exit as a result of shutdown_event being True
    mock_ctx.shutdown_event.is_set.side_effect = [
        False, False, False, False, False
    ]

    main_loop(mock_ctx, [])

    assert event_q.safe_close() == 0
    assert mock_ctx.shutdown_event.is_set.call_count == 3
    def test_list_method_called(self, mp_fixture, caplog):
        """
        SES.summarise should be called when 'request.procedure.list' message is received
        """
        pubsub.pub.unsubAll()
        helper = PubSubHelper()

        work_q = MPQueue(ctx=mp_fixture)
        msg = EventMessage(
            "TEST_SUMMARY",
            "PUBSUB",
            dict(topic=topics.request.procedure.list,
                 kwargs={"request_id": "123"}),
        )
        work_q.put(msg)
        event = mp_fixture.Event()

        with mock.patch(
                "ska_oso_oet.procedure.application.main.ScriptExecutionService.summarise"
        ) as mock_cls:
            with mock.patch.object(pubsub.pub, "unsubAll", return_value=[]):
                mock_cls.side_effect = partial(set_event, event)
                _proc_worker_wrapper_helper(
                    mp_fixture,
                    caplog,
                    ScriptExecutionServiceWorker,
                    args=(work_q, mp_fixture),
                    expect_shutdown_evt=True,
                )

        assert event.is_set() is True
        mock_cls.assert_called_once()

        assert helper.topic_list == [
            topics.request.procedure.list,  # list requested
            topics.procedure.pool.list,  # response published
        ]

        work_q.safe_close()
예제 #21
0
    def test_external_messages_are_published_locally(self, mp, caplog):
        """
        Verify that message event is published if the event originates from an
        external source.
        """
        work_q = MPQueue(ctx=mp)
        msg = EventMessage(
            "EXTERNAL COMPONENT",
            "PUBSUB",
            dict(topic=topics.request.procedure.list,
                 kwargs={"request_id": "123"}),
        )
        work_q.put(msg)
        _proc_worker_wrapper_helper(mp,
                                    caplog,
                                    ScriptWorker,
                                    args=(work_q, ),
                                    expect_shutdown_evt=True)

        # there's no easy way to assert that the external event was republished
        # on an independent pypubsub bus. Workaround is to assert that the
        # republishing code was run via the log message
        assert "Republishing external event: EXTERNAL COMPONENT" in caplog.text
예제 #22
0
    def run(self, process_id: int, *, call: str,
            run_args: ProcedureInput) -> None:
        """
        Run a prepared Procedure.

        This starts execution of the script prepared by a previous create()
        call.

        :param process_id: ID of Procedure to execute
        :param call: name of function to call
        :param run_args: late-binding arguments to provide to the script
        :return:
        """
        if process_id not in self.states:
            raise ValueError(f"PID #{process_id} not found")

        if self.states[process_id] != ProcedureState.READY:
            raise ValueError(
                f"PID #{process_id} unrunnable in state {self.states[process_id]}"
            )

        running_pid = [(pid, state) for pid, state in self.states.items()
                       if state == ProcedureState.RUNNING]
        if running_pid:
            pid, state = running_pid[0]
            raise ValueError(
                f"Cannot start PID {process_id}: PID #{pid} is {state}")

        msg = EventMessage(msg_src=self.__class__.__name__,
                           msg_type="RUN",
                           msg=(call, run_args))
        LOGGER.debug("Sending 'run %s' message to PID %d", call, process_id)
        msg_was_sent = self.script_queues[process_id].safe_put(msg)
        if not msg_was_sent:
            raise ValueError(
                f"Could not send run message to process {process_id}")
예제 #23
0
    def publish_lifecycle(self, new_state: ProcedureState):
        """
        Broadcast a lifecycle status change event.

        :param new_state: new lifecycle state
        """
        # This message could be broadcast on pypubsub, letting the republish
        # callback rebroadcast it on the mptools bus. But, we know there are no
        # local subscribers so bypass the pypubsub step and broadcast directly to
        # the inter-process event bus.
        # pub.sendMessage(
        #     topics.procedure.lifecycle.statechange,
        #     msg_src=self.name,
        #     new_state=new_state,
        # )
        msg = EventMessage(
            msg_src=self.name,
            msg_type="PUBSUB",
            msg=dict(
                topic="procedure.lifecycle.statechange",
                kwargs=dict(new_state=new_state),
            ),
        )
        self.event_q.put(msg)