def test_internal_messages_not_republished(self, mp, caplog): """ Verify that message event is not published if the event originates from an internal source. """ helper = PubSubHelper() work_q = MPQueue(ctx=mp) # TEST is the default component name assigned in # _proc_worker_wrapper_helper. This message should not be published to pypubsub msg = EventMessage( "TEST", "PUBSUB", dict(topic=topics.request.procedure.list, kwargs={"request_id": "123"}), ) work_q.put(msg) _proc_worker_wrapper_helper(mp, caplog, ScriptWorker, args=(work_q, ), expect_shutdown_evt=True) msgs_on_topic = helper.messages_on_topic(topics.request.procedure.list) assert len(msgs_on_topic) == 0
def test_proc_worker_run(mp_fixture, caplog): class ProcWorkerTest(ProcWorker): def init_args(self, args, kwargs): self.args = args def main_func(self): self.log(logging.INFO, f"MAIN_FUNC: {self.args}") self.shutdown_event.set() startup_evt = mp_fixture.Event() shutdown_evt = mp_fixture.Event() event_q = MPQueue(ctx=mp_fixture) caplog.set_level(logging.INFO) pw = ProcWorkerTest("TEST", startup_evt, shutdown_evt, event_q, "ARG1", "ARG2") assert not startup_evt.is_set() assert not shutdown_evt.is_set() pw.run() assert startup_evt.is_set() assert shutdown_evt.is_set() item = event_q.safe_get() assert item assert item.msg_src == "TEST" assert item.msg_type == "SHUTDOWN" assert item.msg == "Normal" assert "MAIN_FUNC: ('ARG1', 'ARG2')" in caplog.text
def __init__( self, name: str, startup_event: multiprocessing.Event, shutdown_event: multiprocessing.Event, event_q: mptools.MPQueue, work_q: mptools.MPQueue, *args, scan_counter: Optional[multiprocessing.Value] = None, environment: Optional[Environment] = None, **kwargs, ): # Message is rolled by hand and sent via a direct message to the # ProcessManager as we want to announce CREATING at the earliest # possible moment; we can't announce via pypubsub just yet as the # intraprocess<->interprocess republish function is not registered # till later in the construction process msg = EventMessage( msg_src=name, msg_type="PUBSUB", msg=dict( topic="procedure.lifecycle.statechange", kwargs=dict(new_state=ProcedureState.CREATING), ), ) event_q.put(msg) self.name = name self._scan_counter = scan_counter self._environment = environment self.work_q = work_q # user_module will be set on LOAD message self.user_module = None super().__init__(name, startup_event, shutdown_event, event_q, *args, **kwargs) # AT2-591. The forked process inherits all subscriptions of the # parent, which we do not want to maintain in this child process. This # could be done before super().__init__() at the expense of losing the # log message, as logging is set up in the super constructor unsubscribed = pub.unsubAll() self.log( logging.DEBUG, "Unsubscribed %s pypubsub subscriptions in Procedure #%s (PID=%s)", len(unsubscribed), self.name, os.getpid(), ) # Register a callback function so that all pypubsub messages broadcast # in this process are also queued for distribution to remote processes pub.subscribe(self.republish, pub.ALL_TOPICS)
def test_procworker_rejects_unexpected_arguments(mp_fixture): with pytest.raises(ValueError): ProcWorker( "TEST", mp_fixture.Event(), mp_fixture.Event(), MPQueue(ctx=mp_fixture), MPQueue(ctx=mp_fixture), "ARG1", "ARG2", )
def test_queue_put(mp_fixture): # Create MPQueue of max size 2 q = MPQueue(2, ctx=mp_fixture) # Putting two items should succeed and return True assert q.safe_put("ITEM1") assert q.safe_put("ITEM2") # But once full, puts should return False assert not q.safe_put("ITEM3") num_left = q.safe_close() assert num_left == 2
def test_proc_start_hangs(mp_fixture, caplog): shutdown_evt = mp_fixture.Event() event_q = MPQueue(ctx=mp_fixture) log_q = MPQueue(ctx=mp_fixture) caplog.set_level(logging.INFO) Proc.STARTUP_WAIT_SECS = 0.2 try: with pytest.raises(RuntimeError): Proc(mp_fixture, "TEST", StartHangWorker, shutdown_evt, event_q, log_q) finally: Proc.STARTUP_WAIT_SECS = 3.0
def test_proc_worker_no_main_func(mp_fixture, caplog): startup_evt = mp_fixture.Event() shutdown_evt = mp_fixture.Event() event_q = MPQueue(ctx=mp_fixture) try: caplog.set_level(logging.INFO) pw = ProcWorker("TEST", startup_evt, shutdown_evt, event_q) with pytest.raises(NotImplementedError): pw.main_func() finally: event_q.safe_close()
def test_script_worker_calls_correct_function_on_message_type( self, mock_env_fn, mock_load_fn, mock_run_fn, caplog): mp = multiprocessing.get_context() script = GitScript("git://test.py", GitArgs()) env_evt = EventMessage("test", "ENV", script) load_evt = EventMessage("test", "LOAD", script) run_evt = EventMessage("test", "RUN", ("init", None)) work_q = MPQueue(ctx=mp) work_q.put(env_evt) work_q.put(load_evt) work_q.put(run_evt) _proc_worker_wrapper_helper(mp, caplog, ScriptWorker, args=(work_q, ), expect_shutdown_evt=True) env_args, _ = mock_env_fn.call_args assert env_args[0].msg_type == env_evt.msg_type mock_env_fn.assert_called_once() load_args, _ = mock_load_fn.call_args assert load_args[0].msg_type == load_evt.msg_type mock_load_fn.assert_called_once() run_args, _ = mock_run_fn.call_args assert run_args[0].msg_type == run_evt.msg_type mock_run_fn.assert_called_once()
def test_on_load(self, mock_module_fn, mp, caplog): """ """ mock_module_fn.side_effect = MagicMock() script = GitScript("git://test.py", GitArgs()) evt = EventMessage("test", "LOAD", script) work_q = MPQueue(ctx=mp) work_q.put(evt) _proc_worker_wrapper_helper(mp, caplog, ScriptWorker, args=(work_q, ), expect_shutdown_evt=True) assert mock_module_fn.called_once_with(script)
def test_queue_proc_worker(mp_fixture, caplog): work_q = MPQueue(ctx=mp_fixture) work_q.put(1) work_q.put(2) work_q.put(3) work_q.put(4) work_q.put("END") work_q.put(5) items = _proc_worker_wrapper_helper( mp_fixture, caplog, QueueProcWorkerTest, args=(work_q, ), expect_shutdown_evt=False, ) assert len(items) == 4 assert items == [f"DONE {idx + 1}" for idx in range(4)]
def test_proc_full_stop_need_terminate(mp_fixture, caplog): shutdown_evt = mp_fixture.Event() event_q = MPQueue(ctx=mp_fixture) caplog.set_level(logging.INFO) proc = Proc(mp_fixture, "TEST", NeedTerminateWorker, shutdown_evt, event_q) proc.full_stop(wait_time=0.1) # additional delay is required for coverage cleanup to complete, at which # point the process will _finally_ be dead proc.proc.join(timeout=COVERAGE_PROCESS_END_OVERHEAD_SECS) assert not proc.proc.is_alive()
def test_proc_full_stop(mp_fixture, caplog): shutdown_evt = mp_fixture.Event() event_q = MPQueue(ctx=mp_fixture) caplog.set_level(logging.INFO) proc = Proc(mp_fixture, "TEST", TimerProcWorkerTest, shutdown_evt, event_q) for idx in range(4): item = event_q.safe_get(1.0) assert item, f"idx: {idx}" assert item.startswith(f"TIMER {idx + 1} [") item = event_q.safe_get(1.0) assert item.msg_src == "TEST" assert item.msg_type == "SHUTDOWN" assert item.msg == "Normal" proc.full_stop(wait_time=0.5) assert not proc.proc.is_alive()
def assert_command_request_and_response(mp_fixture, caplog, mock_method, request_topic, response_topic, cmd): pubsub.pub.unsubAll() helper = PubSubHelper() work_q = MPQueue(ctx=mp_fixture) msg = EventMessage( "UNITTEST", "PUBSUB", dict(topic=request_topic, kwargs={ "request_id": "1234", "cmd": cmd }), ) work_q.put(msg) event = mp_fixture.Event() mock_method.side_effect = partial(set_event, event) with mock.patch.object(pubsub.pub, "unsubAll", return_value=[]): _proc_worker_wrapper_helper( mp_fixture, caplog, ScriptExecutionServiceWorker, args=(work_q, mp_fixture), expect_shutdown_evt=True, ) assert event.is_set() mock_method.assert_called_once() assert mock_method.call_args[0][0] == cmd assert helper.topic_list == [request_topic, response_topic] work_q.safe_close()
def test_external_messages_are_published_locally(self, mp_fixture, caplog): """ Verify that message event is published if the event originates from an external source. """ pubsub.pub.unsubAll() helper = PubSubHelper() work_q = MPQueue(ctx=mp_fixture) msg = EventMessage( "EXTERNAL COMPONENT", "PUBSUB", dict(topic=topics.request.procedure.list, kwargs={"request_id": "123"}), ) work_q.put(msg) with mock.patch.object(pubsub.pub, "unsubAll", return_value=[]): _proc_worker_wrapper_helper( mp_fixture, caplog, EventBusWorker, args=(work_q, ), expect_shutdown_evt=True, ) assert topics.request.procedure.list in helper.topic_list work_q.safe_close()
def test_main_loop_ends_on_end_message(mp_fixture): """ Main loop should terminate when end message is received. """ mock_ctx = mock.MagicMock() event_q = MPQueue(ctx=mp_fixture) event_q.put(EventMessage("TEST", "PUBSUB", msg="foo")) event_q.put(EventMessage("TEST", "PUBSUB", msg="foo")) event_q.put(EventMessage("TEST", "PUBSUB", msg="foo")) event_q.put(EventMessage("TEST", "END", msg="foo")) mock_ctx.event_queue = event_q mock_ctx.shutdown_event.is_set.return_value = False main_loop(mock_ctx, []) assert event_q.safe_close() == 0
def messages(self) -> Generator[Message, None, None]: """ A generator of Message objects created from received pubsub events """ q = MPQueue(ctx=self._mp_context) def add_to_q(topic: pub.Topic = pub.AUTO_TOPIC, **kwargs): kwargs["topic"] = topic.name other = {} if "request_id" in kwargs: other["id"] = kwargs["request_id"] del kwargs["request_id"] msg = Message(kwargs, **other) q.put(msg) pub.subscribe(add_to_q, pub.ALL_TOPICS) while True: msg = q.safe_get(timeout=0.1) if msg is not None: yield msg
def test_main_loop_checks_shutdown_event_after_every_queue_get(mp_fixture): """ Loop should regularly check shutdown event, """ mock_ctx = mock.MagicMock() event_q = MPQueue(ctx=mp_fixture) mock_ctx.event_queue.safe_get.side_effect = [ False, False, EventMessage("TEST", "END", msg="foo"), ] # loop won't exit as a result of shutdown_event being True mock_ctx.shutdown_event.is_set.side_effect = [ False, False, False, False, False ] main_loop(mock_ctx, []) assert event_q.safe_close() == 0 assert mock_ctx.shutdown_event.is_set.call_count == 3
def test_proc_worker_exception(mp_fixture, caplog): class ProcWorkerException(ProcWorker): def main_func(self): raise NameError("Because this doesn't happen often") startup_evt = mp_fixture.Event() shutdown_evt = mp_fixture.Event() event_q = MPQueue(ctx=mp_fixture) caplog.set_level(logging.INFO) with pytest.raises(SystemExit): proc_worker_wrapper(ProcWorkerException, "TEST", startup_evt, shutdown_evt, event_q) assert startup_evt.is_set() assert not shutdown_evt.is_set() item = event_q.safe_get() assert item assert item.msg_src == "TEST" assert item.msg_type == "FATAL" assert 'raise NameError("Because this doesn\'t happen often")' in item.msg assert "Exception Shutdown" in caplog.text
def test_mpqueue_get(mp_fixture): q = MPQueue(ctx=mp_fixture) item = q.safe_get(None) assert item is None q.put("ITEM1") q.put("ITEM2") assert q.safe_get(0.02) == "ITEM1" assert q.safe_get(0.02) == "ITEM2" assert q.safe_get(0.02) is None assert q.safe_get(None) is None num_left = q.safe_close() assert num_left == 0
def test_external_messages_are_published_locally(self, mp, caplog): """ Verify that message event is published if the event originates from an external source. """ work_q = MPQueue(ctx=mp) msg = EventMessage( "EXTERNAL COMPONENT", "PUBSUB", dict(topic=topics.request.procedure.list, kwargs={"request_id": "123"}), ) work_q.put(msg) _proc_worker_wrapper_helper(mp, caplog, ScriptWorker, args=(work_q, ), expect_shutdown_evt=True) # there's no easy way to assert that the external event was republished # on an independent pypubsub bus. Workaround is to assert that the # republishing code was run via the log message assert "Republishing external event: EXTERNAL COMPONENT" in caplog.text
def _proc_worker_wrapper_helper( mp_fixture, caplog, worker_class, args=None, kwargs=None, expect_shutdown_evt=True, alarm_secs=1.0, ): startup_evt = mp_fixture.Event() shutdown_evt = mp_fixture.Event() event_q = MPQueue(ctx=mp_fixture) if args is None: args = () if kwargs is None: kwargs = {} def alarm_handler(signal_num, current_stack_frame): shutdown_evt.set() if alarm_secs: signal.signal(signal.SIGALRM, alarm_handler) signal.setitimer(signal.ITIMER_REAL, alarm_secs) caplog.set_level(logging.DEBUG) exitcode = proc_worker_wrapper(worker_class, "TEST", startup_evt, shutdown_evt, event_q, *args, **kwargs) assert startup_evt.is_set() assert shutdown_evt.is_set() == expect_shutdown_evt items = list(event_q.drain()) assert items last_item = items[-1] assert last_item.msg_src == "TEST" assert last_item.msg_type == "SHUTDOWN" assert last_item.msg == "Normal" assert exitcode == 0 return items[:-1]
def test_main_loop_ends_when_shutdown_event_is_set(mp_fixture): """ Main loop should terminate when shutdown event is set. """ mock_ctx = mock.MagicMock() event_q = MPQueue(ctx=mp_fixture) event_q.put(EventMessage("TEST", "PUBSUB", msg="foo")) event_q.put(EventMessage("TEST", "PUBSUB", msg="foo")) event_q.put(EventMessage("TEST", "PUBSUB", msg="foo")) event_q.put(EventMessage("TEST", "END", msg="foo")) mock_ctx.event_queue = event_q # one processing loop before shutdown in set, at which point the loop # should exit with two messages still in the event queue mock_ctx.shutdown_event.is_set.side_effect = [False, False, True] main_loop(mock_ctx, []) assert event_q.safe_close() == 2
def test_drain_queue(mp_fixture): q = MPQueue(ctx=mp_fixture) items = list(q.drain()) assert items == [] expected = [f"ITEM{idx}" for idx in range(10)] for item in expected: q.put(item) items = list(q.drain()) assert items == expected num_left = q.safe_close() assert num_left == 0
def test_flask_worker_starts_flask(mp_fixture, caplog): """ Verify that the FlaskWorker starts Flask. """ with mock.patch("flask.Flask") as mock_flask: # mock Flask causes connection error in shutdown as shutdown URL is accessed with mock.patch("requests.post"): _proc_worker_wrapper_helper( mp_fixture, caplog, FlaskWorker, args=(MPQueue(ctx=mp_fixture), ), expect_shutdown_evt=True, ) mock_app_instance = mock_flask.return_value mock_app_instance.run.assert_called_once()
def test_internal_messages_not_republished(self, mp_fixture, caplog): """ Verify that message event is not published if the event originates from an internal source. """ pubsub.pub.unsubAll() helper = PubSubHelper() work_q = MPQueue(ctx=mp_fixture) # TEST is the default component name assigned in # _proc_worker_wrapper_helper. This message should be ignored. msg = EventMessage( "TEST", "PUBSUB", dict(topic=topics.request.procedure.list, kwargs={"request_id": "123"}), ) work_q.put(msg) # But coming from NONTEST, this message should be republished. msg = EventMessage( "NONTEST", "PUBSUB", dict(topic=topics.request.procedure.list, kwargs={"request_id": "456"}), ) work_q.put(msg) with mock.patch.object(pubsub.pub, "unsubAll", return_value=[]): _proc_worker_wrapper_helper( mp_fixture, caplog, EventBusWorker, args=(work_q, ), expect_shutdown_evt=True, ) assert len(helper.messages) == 1 assert helper.messages[0][1] == dict(msg_src="NONTEST", request_id="456") work_q.safe_close()
def test_procworker_passes_excess_arguments_to_init_args(mp_fixture): class ProcWorkerTest(ProcWorker): def init_args(self, args, kwargs): (l, ) = args l.extend(["ARG1", "ARG2"]) d = kwargs["mydict"] d["k"] = "v" arglist = [] argdict = {} ProcWorkerTest( "TEST", mp_fixture.Event(), mp_fixture.Event(), MPQueue(ctx=mp_fixture), arglist, mydict=argdict, ) assert arglist == ["ARG1", "ARG2"] assert argdict == {"k": "v"}
def test_main_loop_ignores_and_logs_events_of_unknown_types(mp_fixture): """ Loop should log events it doesn't know how to handle. """ mock_ctx = mock.MagicMock() event_q = MPQueue(ctx=mp_fixture) event_q.put(EventMessage("TEST", "FOO", msg="1")) mock_ctx.event_queue = event_q # one processing loop before shutdown in set, at which point the loop # should exit with three messages still in the event queue mock_ctx.shutdown_event.is_set.side_effect = [False, True] main_loop(mock_ctx, []) event_q.safe_close() mock_ctx.log.assert_called_once() assert "Unknown Event" in mock_ctx.log.call_args[0][1]
def test_handles_request_to_list_invalid_id(self, mp_fixture, caplog): """ The ValueError raised when SES.summarise is given an invalid PID should be handled. """ pubsub.pub.unsubAll() helper = PubSubHelper() work_q = MPQueue(ctx=mp_fixture) msg = EventMessage( "TEST_SUMMARY", "PUBSUB", dict(topic=topics.request.procedure.list, kwargs={"request_id": "123"}), ) work_q.put(msg) with mock.patch( "ska_oso_oet.procedure.application.main.ScriptExecutionService.summarise" ) as mock_cls: with mock.patch.object(pubsub.pub, "unsubAll", return_value=[]): mock_cls.side_effect = ValueError _proc_worker_wrapper_helper( mp_fixture, caplog, ScriptExecutionServiceWorker, args=(work_q, mp_fixture), expect_shutdown_evt=True, ) mock_cls.assert_called_once() assert helper.topic_list == [ topics.request.procedure.list, # list requested topics.procedure.pool.list, # response published ] assert helper.messages[1][1] == dict(msg_src="TEST", request_id="123", result=[]) work_q.safe_close()
def test_list_method_called(self, mp_fixture, caplog): """ SES.summarise should be called when 'request.procedure.list' message is received """ pubsub.pub.unsubAll() helper = PubSubHelper() work_q = MPQueue(ctx=mp_fixture) msg = EventMessage( "TEST_SUMMARY", "PUBSUB", dict(topic=topics.request.procedure.list, kwargs={"request_id": "123"}), ) work_q.put(msg) event = mp_fixture.Event() with mock.patch( "ska_oso_oet.procedure.application.main.ScriptExecutionService.summarise" ) as mock_cls: with mock.patch.object(pubsub.pub, "unsubAll", return_value=[]): mock_cls.side_effect = partial(set_event, event) _proc_worker_wrapper_helper( mp_fixture, caplog, ScriptExecutionServiceWorker, args=(work_q, mp_fixture), expect_shutdown_evt=True, ) assert event.is_set() is True mock_cls.assert_called_once() assert helper.topic_list == [ topics.request.procedure.list, # list requested topics.procedure.pool.list, # response published ] work_q.safe_close()
def test_main_loop_adds_pubsub_messages_to_event_queues(mp_fixture): """ PUBSUB messages should be added to event queues. """ mock_ctx = mock.MagicMock() event_q = MPQueue(ctx=mp_fixture) event_q.put(EventMessage("TEST", "PUBSUB", msg="1")) event_q.put(EventMessage("TEST", "PUBSUB", msg="2")) event_q.put(EventMessage("TEST", "PUBSUB", msg="3")) event_q.put(EventMessage("TEST", "END", msg="foo")) mock_ctx.event_queue = event_q # one processing loop before shutdown in set, at which point the loop # should exit with three messages still in the event queue mock_ctx.shutdown_event.is_set.return_value = False q1 = MPQueue(ctx=mp_fixture) q2 = MPQueue(ctx=mp_fixture) main_loop(mock_ctx, [q1, q2]) assert q1.safe_close() == 3 assert q2.safe_close() == 3 event_q.safe_close()