def _launch_process(self, queue_name="", out_streams=None, process_definition_id="", configuration=None): """ Launches the process """ # ------------------------------------------------------------------------------------ # Spawn Configuration and Parameters # ------------------------------------------------------------------------------------ if "process" not in configuration: configuration["process"] = {} configuration["process"]["queue_name"] = queue_name configuration["process"]["publish_streams"] = out_streams # Setting the restart mode schedule = ProcessSchedule() schedule.restart_mode = ProcessRestartMode.ABNORMAL schedule.queueing_mode = ProcessQueueingMode.ALWAYS # ------------------------------------------------------------------------------------ # Process Spawning # ------------------------------------------------------------------------------------ # Spawn the process pid = self.clients.process_dispatcher.schedule_process( process_definition_id=process_definition_id, schedule=schedule, configuration=configuration ) validate_is_not_none(pid, "Process could not be spawned") return pid
def launch_worker(self, queue_name, config): config = DotDict(config or {}) config.process.queue_name = queue_name config.process.buffer_limit = self.CFG.get_safe( 'service.ingestion_management.buffer_limit', 10) config.process.time_limit = self.CFG.get_safe( 'service.ingestion_management.time_limit', 10) process_definition_id, _ = self.clients.resource_registry.find_resources( restype=RT.ProcessDefinition, name='ingestion_worker_process', id_only=True) validate_true( len(process_definition_id), 'No process definition for ingestion workers could be found') process_definition_id = process_definition_id[0] process_id = self.clients.process_dispatcher.create_process( process_definition_id=process_definition_id) xn_ids, _ = self.clients.resource_registry.find_resources( restype=RT.ExchangeName, name=queue_name, id_only=True) for xn_id in xn_ids: self.clients.resource_registry.create_association( xn_id, PRED.hasIngestionWorker, process_id) schedule = ProcessSchedule() schedule.restart_mode = ProcessRestartMode.ABNORMAL schedule.queueing_mode = ProcessQueueingMode.ALWAYS self.clients.process_dispatcher.schedule_process( process_definition_id=process_definition_id, schedule=schedule, process_id=process_id, configuration=config)
def _get_process_schedule(**kwargs): queueing_mode = kwargs.get('queueing_mode') restart_mode = kwargs.get('restart_mode') execution_engine_id = kwargs.get('execution_engine_id') node_exclusive = kwargs.get('node_exclusive') constraints = kwargs.get('constraints') process_schedule = ProcessSchedule() if queueing_mode is not None: try: process_schedule.queueing_mode = ProcessQueueingMode._value_map[queueing_mode] except KeyError: msg = "%s is not a known ProcessQueueingMode" % (queueing_mode) raise BadRequest(msg) if restart_mode is not None: try: process_schedule.restart_mode = ProcessRestartMode._value_map[restart_mode] except KeyError: msg = "%s is not a known ProcessRestartMode" % (restart_mode) raise BadRequest(msg) else: # if restart mode isn't specified, use NEVER. HA Agent itself will reschedule failures. process_schedule.restart_mode = ProcessRestartMode.NEVER target = ProcessTarget() if execution_engine_id is not None: target.execution_engine_id = execution_engine_id if node_exclusive is not None: target.node_exclusive = node_exclusive if constraints is not None: target.constraints = constraints process_schedule.target = target return process_schedule
def test_node_exclusive(self): # the node_exclusive constraint is used to ensure multiple processes # of the same "kind" each get a VM exclusive of each other. Other # processes may run on these VMs, just not processes with the same # node_exclusive tag. Since we cannot directly query the contents # of each node in this test, we prove the capability by scheduling # processes one by one and checking their state. # verifies L4-CI-CEI-RQ121 # verifies L4-CI-CEI-RQ57 # first off, setUp() created a single node and eeagent. # We schedule two processes with the same "abc" node_exclusive # tag. Since there is only one node, the first process should run # and the second should be queued. process_target = ProcessTarget(execution_engine_id="engine1") process_target.node_exclusive = "abc" process_schedule = ProcessSchedule() process_schedule.queueing_mode = ProcessQueueingMode.ALWAYS process_schedule.target = process_target pid1 = self.pd_cli.create_process(self.process_definition_id) self.waiter.start() self.pd_cli.schedule_process(self.process_definition_id, process_schedule, process_id=pid1) self.waiter.await_state_event(pid1, ProcessStateEnum.RUNNING) pid2 = self.pd_cli.create_process(self.process_definition_id) self.pd_cli.schedule_process(self.process_definition_id, process_schedule, process_id=pid2) self.waiter.await_state_event(pid2, ProcessStateEnum.WAITING) # now demonstrate that the node itself is not full by launching # a third process without a node_exclusive tag -- it should start # immediately process_target.node_exclusive = None pid3 = self.pd_cli.create_process(self.process_definition_id) self.pd_cli.schedule_process(self.process_definition_id, process_schedule, process_id=pid3) self.waiter.await_state_event(pid3, ProcessStateEnum.RUNNING) # finally, add a second node to the engine. pid2 should be started # since there is an exclusive "abc" node free. node2_id = uuid.uuid4().hex self._send_node_state("engine1", node2_id) self._start_eeagent(node2_id) self.waiter.await_state_event(pid2, ProcessStateEnum.RUNNING) # kill the processes for good self.pd_cli.cancel_process(pid1) self.waiter.await_state_event(pid1, ProcessStateEnum.TERMINATED) self.pd_cli.cancel_process(pid2) self.waiter.await_state_event(pid2, ProcessStateEnum.TERMINATED) self.pd_cli.cancel_process(pid3) self.waiter.await_state_event(pid3, ProcessStateEnum.TERMINATED)
def test_queueing_mode(self): proc_def = DotDict() proc_def['name'] = "someprocess" proc_def['executable'] = {'module': 'my_module', 'class': 'class'} mock_read_definition = Mock() mock_read_definition.return_value = proc_def self.pd_service.backend.read_definition = mock_read_definition pid = self.pd_service.create_process("fake-process-def-id") pyon_queueing_mode = ProcessQueueingMode.ALWAYS core_queueing_mode = "ALWAYS" proc_schedule = ProcessSchedule() proc_schedule.queueing_mode = pyon_queueing_mode configuration = {"some": "value"} pid2 = self.pd_service.schedule_process("fake-process-def-id", proc_schedule, configuration, pid) self.assertEqual(self.mock_core.schedule_process.call_count, 1) call_args, call_kwargs = self.mock_core.schedule_process.call_args self.assertEqual(call_kwargs['queueing_mode'], core_queueing_mode)
def _launch_process(self, queue_name='', out_streams=None, process_definition_id='', configuration=None): """ Launches the process """ # ------------------------------------------------------------------------------------ # Spawn Configuration and Parameters # ------------------------------------------------------------------------------------ if 'process' not in configuration: configuration['process'] = {} configuration['process']['queue_name'] = queue_name configuration['process']['publish_streams'] = out_streams # Setting the restart mode schedule = ProcessSchedule() schedule.restart_mode = ProcessRestartMode.ABNORMAL schedule.queueing_mode = ProcessQueueingMode.ALWAYS # ------------------------------------------------------------------------------------ # Process Spawning # ------------------------------------------------------------------------------------ # Spawn the process pid = self.clients.process_dispatcher.schedule_process( process_definition_id=process_definition_id, schedule=schedule, configuration=configuration) validate_is_not_none(pid, "Process could not be spawned") return pid
def test_create_schedule_cancel(self): process_schedule = ProcessSchedule() process_schedule.queueing_mode = ProcessQueueingMode.ALWAYS proc_name = 'myreallygoodname' pid = self.pd_cli.create_process(self.process_definition_id) self.waiter.start(pid) pid2 = self.pd_cli.schedule_process(self.process_definition_id, process_schedule, configuration={}, process_id=pid, name=proc_name) self.assertEqual(pid, pid2) # verifies L4-CI-CEI-RQ141 and L4-CI-CEI-RQ142 self.waiter.await_state_event(pid, ProcessStateEnum.RUNNING) proc = self.pd_cli.read_process(pid) self.assertEqual(proc.process_id, pid) self.assertEqual(proc.process_configuration, {}) self.assertEqual(proc.process_state, ProcessStateEnum.RUNNING) # make sure process is readable directly from RR (mirrored) # verifies L4-CI-CEI-RQ63 # verifies L4-CI-CEI-RQ64 proc = self.rr_cli.read(pid) self.assertEqual(proc.process_id, pid) # now try communicating with the process to make sure it is really running test_client = TestClient() for i in range(5): self.assertEqual(i + 1, test_client.count(timeout=10)) # verifies L4-CI-CEI-RQ147 # check the process name was set in container got_proc_name = test_client.get_process_name(pid=pid2) self.assertEqual(proc_name, got_proc_name) # kill the process and start it again self.pd_cli.cancel_process(pid) self.waiter.await_state_event(pid, ProcessStateEnum.TERMINATED) pid2 = self.pd_cli.schedule_process(self.process_definition_id, process_schedule, configuration={}, process_id=pid) self.assertEqual(pid, pid2) self.waiter.await_state_event(pid, ProcessStateEnum.RUNNING) for i in range(5): self.assertEqual(i + 1, test_client.count(timeout=10)) # kill the process for good self.pd_cli.cancel_process(pid) self.waiter.await_state_event(pid, ProcessStateEnum.TERMINATED)
def test_create_schedule_cancel(self): process_schedule = ProcessSchedule() process_schedule.queueing_mode = ProcessQueueingMode.ALWAYS pid = self.pd_cli.create_process(self.process_definition_id) self.waiter.start(pid) pid2 = self.pd_cli.schedule_process(self.process_definition_id, process_schedule, configuration={}, process_id=pid) self.assertEqual(pid, pid2) # verifies L4-CI-CEI-RQ141 and L4-CI-CEI-RQ142 self.waiter.await_state_event(pid, ProcessStateEnum.RUNNING) proc = self.pd_cli.read_process(pid) self.assertEqual(proc.process_id, pid) self.assertEqual(proc.process_configuration, {}) self.assertEqual(proc.process_state, ProcessStateEnum.RUNNING) # make sure process is readable directly from RR (mirrored) # verifies L4-CI-CEI-RQ63 # verifies L4-CI-CEI-RQ64 proc = self.rr_cli.read(pid) self.assertEqual(proc.process_id, pid) # now try communicating with the process to make sure it is really running test_client = TestClient() for i in range(5): self.assertEqual(i + 1, test_client.count(timeout=10)) # verifies L4-CI-CEI-RQ147 # kill the process and start it again self.pd_cli.cancel_process(pid) self.waiter.await_state_event(pid, ProcessStateEnum.TERMINATED) self.waiter.stop() oldpid = pid pid = self.pd_cli.create_process(self.process_definition_id) self.waiter.start(pid) pid2 = self.pd_cli.schedule_process(self.process_definition_id, process_schedule, configuration={}, process_id=pid) self.assertEqual(pid, pid2) self.assertNotEqual(oldpid, pid) self.waiter.await_state_event(pid, ProcessStateEnum.RUNNING) for i in range(5): self.assertEqual(i + 1, test_client.count(timeout=10)) # kill the process for good self.pd_cli.cancel_process(pid) self.waiter.await_state_event(pid, ProcessStateEnum.TERMINATED)
def test_code_download(self): # create a process definition that has no URL; only module and class. process_definition_no_url = ProcessDefinition( name='test_process_nodownload') process_definition_no_url.executable = { 'module': 'ion.my.test.process', 'class': 'TestProcess' } process_definition_id_no_url = self.pd_cli.create_process_definition( process_definition_no_url) # create another that has a URL of the python file (this very file) # verifies L4-CI-CEI-RQ114 url = "file://%s" % os.path.join(os.path.dirname(__file__), 'test_process_dispatcher.py') process_definition = ProcessDefinition(name='test_process_download') process_definition.executable = { 'module': 'ion.my.test.process', 'class': 'TestProcess', 'url': url } process_definition_id = self.pd_cli.create_process_definition( process_definition) process_target = ProcessTarget() process_schedule = ProcessSchedule() process_schedule.queueing_mode = ProcessQueueingMode.ALWAYS process_schedule.target = process_target self.waiter.start() # Test a module with no download fails pid_no_url = self.pd_cli.create_process(process_definition_id_no_url) self.pd_cli.schedule_process(process_definition_id_no_url, process_schedule, process_id=pid_no_url) self.waiter.await_state_event(pid_no_url, ProcessStateEnum.FAILED) # Test a module with a URL runs pid = self.pd_cli.create_process(process_definition_id) self.pd_cli.schedule_process(process_definition_id, process_schedule, process_id=pid) self.waiter.await_state_event(pid, ProcessStateEnum.RUNNING)
def _launch_highcharts(self, viz_id, data_product_id, out_stream_id): ''' Launches the high-charts transform ''' stream_ids, _ = self.clients.resource_registry.find_objects( data_product_id, PRED.hasStream, id_only=True) if not stream_ids: raise BadRequest( "Can't launch high charts streaming: data product doesn't have associated stream (%s)" % data_product_id) queue_name = 'viz_%s' % data_product_id sub_id = self.clients.pubsub_management.create_subscription( name='viz transform for %s' % data_product_id, exchange_name=queue_name, stream_ids=stream_ids) self.clients.pubsub_management.activate_subscription(sub_id) self.clients.resource_registry.create_association( viz_id, PRED.hasSubscription, sub_id) config = DotDict() config.process.publish_streams.highcharts = out_stream_id config.process.queue_name = queue_name # This process MUST be launched the first time or fail so the user # doesn't wait there for nothing to happen. schedule = ProcessSchedule() schedule.restart_mode = ProcessRestartMode.NEVER schedule.queueing_mode = ProcessQueueingMode.NEVER # Launch the process procdef_id = self._get_highcharts_procdef() pid = self.clients.process_dispatcher.schedule_process( process_definition_id=procdef_id, schedule=schedule, configuration=config) # Make sure it launched or raise an error process_gate = ProcessStateGate( self.clients.process_dispatcher.read_process, pid, ProcessStateEnum.RUNNING) if not process_gate. await (self.CFG.get_safe( 'endpoint.receive.timeout', 10)): raise ServiceUnavailable( "Failed to launch high charts realtime visualization")
def schedule_process(self, upid, definition_id, configuration=None, subscribers=None, constraints=None, queueing_mode=None, restart_mode=None, execution_engine_id=None, node_exclusive=None): definition = self.real_client.read_process_definition(definition_id) self.event_pub.publish_event(event_type="ProcessLifecycleEvent", origin=definition.name, origin_type="DispatchedHAProcess", state=ProcessStateEnum.RUNNING) create_upid = self.real_client.create_process(definition_id) process_schedule = ProcessSchedule() if queueing_mode is not None: try: process_schedule.queueing_mode = ProcessQueueingMode._value_map[queueing_mode] except KeyError: msg = "%s is not a known ProcessQueueingMode" % (queueing_mode) raise BadRequest(msg) if restart_mode is not None: try: process_schedule.restart_mode = ProcessRestartMode._value_map[restart_mode] except KeyError: msg = "%s is not a known ProcessRestartMode" % (restart_mode) raise BadRequest(msg) target = ProcessTarget() if execution_engine_id is not None: target.execution_engine_id = execution_engine_id if node_exclusive is not None: target.node_exclusive = node_exclusive if constraints is not None: target.constraints = constraints process_schedule.target = target sched_pid = self.real_client.schedule_process(definition_id, process_schedule, configuration=configuration, process_id=create_upid) proc = self.real_client.read_process(sched_pid) self._associate_process(proc) dict_proc = {'upid': proc.process_id, 'state': self.state_map.get(proc.process_state, self.unknown_state), } return dict_proc
def test_create_schedule_cancel(self): process_schedule = ProcessSchedule() process_schedule.queueing_mode = ProcessQueueingMode.ALWAYS pid = self.pd_cli.create_process(self.process_definition_id) self.subscribe_events(pid) pid2 = self.pd_cli.schedule_process(self.process_definition_id, process_schedule, configuration={}, process_id=pid) self.assertEqual(pid, pid2) self.await_state_event(pid, ProcessStateEnum.SPAWN) proc = self.pd_cli.read_process(pid) self.assertEqual(proc.process_id, pid) self.assertEqual(proc.process_configuration, {}) self.assertEqual(proc.process_state, ProcessStateEnum.SPAWN) # now try communicating with the process to make sure it is really running test_client = TestClient() for i in range(5): self.assertEqual(i + 1, test_client.count(timeout=10)) # kill the process and start it again self.pd_cli.cancel_process(pid) self.await_state_event(pid, ProcessStateEnum.TERMINATE) oldpid = pid pid = self.pd_cli.create_process(self.process_definition_id) self.subscribe_events(pid) pid2 = self.pd_cli.schedule_process(self.process_definition_id, process_schedule, configuration={}, process_id=pid) self.assertEqual(pid, pid2) self.assertNotEqual(oldpid, pid) self.await_state_event(pid, ProcessStateEnum.SPAWN) for i in range(5): self.assertEqual(i + 1, test_client.count(timeout=10)) # kill the process for good self.pd_cli.cancel_process(pid) self.await_state_event(pid, ProcessStateEnum.TERMINATE)
def test_idempotency(self): # ensure every operation can be safely retried process_schedule = ProcessSchedule() process_schedule.queueing_mode = ProcessQueueingMode.ALWAYS proc_name = 'myreallygoodname' pid = self.pd_cli.create_process(self.process_definition_id) self.waiter.start(pid) # note: if we import UNSCHEDULED state into ProcessStateEnum, # this assertion will need to change. proc = self.pd_cli.read_process(pid) self.assertEqual(proc.process_id, pid) self.assertEqual(proc.process_state, ProcessStateEnum.REQUESTED) pid2 = self.pd_cli.schedule_process(self.process_definition_id, process_schedule, configuration={}, process_id=pid, name=proc_name) self.assertEqual(pid, pid2) self.waiter.await_state_event(pid, ProcessStateEnum.RUNNING) # repeating schedule is harmless pid2 = self.pd_cli.schedule_process(self.process_definition_id, process_schedule, configuration={}, process_id=pid, name=proc_name) self.assertEqual(pid, pid2) proc = self.pd_cli.read_process(pid) self.assertEqual(proc.process_id, pid) self.assertEqual(proc.process_configuration, {}) self.assertEqual(proc.process_state, ProcessStateEnum.RUNNING) self.pd_cli.cancel_process(pid) self.waiter.await_state_event(pid, ProcessStateEnum.TERMINATED) # repeating cancel is harmless self.pd_cli.cancel_process(pid) proc = self.pd_cli.read_process(pid) self.assertEqual(proc.process_id, pid) self.assertEqual(proc.process_configuration, {}) self.assertEqual(proc.process_state, ProcessStateEnum.TERMINATED)
def launch_worker(self, queue_name, config): config = DotDict(config or {}) config.process.queue_name = queue_name config.process.buffer_limit = self.CFG.get_safe('service.ingestion_management.buffer_limit', 10) config.process.time_limit = self.CFG.get_safe('service.ingestion_management.time_limit', 10) process_definition_id, _ = self.clients.resource_registry.find_resources(restype=RT.ProcessDefinition, name='ingestion_worker_process', id_only=True) validate_true(len(process_definition_id), 'No process definition for ingestion workers could be found') process_definition_id = process_definition_id[0] process_id = self.clients.process_dispatcher.create_process(process_definition_id=process_definition_id) xn_ids, _ = self.clients.resource_registry.find_resources(restype=RT.ExchangeName, name=queue_name, id_only=True) for xn_id in xn_ids: self.clients.resource_registry.create_association(xn_id, PRED.hasIngestionWorker, process_id) schedule = ProcessSchedule() schedule.restart_mode = ProcessRestartMode.ABNORMAL schedule.queueing_mode = ProcessQueueingMode.ALWAYS self.clients.process_dispatcher.schedule_process(process_definition_id=process_definition_id,schedule=schedule,process_id=process_id, configuration=config)
def _launch_highcharts(self, viz_id, data_product_id, out_stream_id): ''' Launches the high-charts transform ''' stream_ids, _ = self.clients.resource_registry.find_objects(data_product_id, PRED.hasStream, id_only=True) if not stream_ids: raise BadRequest("Can't launch high charts streaming: data product doesn't have associated stream (%s)" % data_product_id) queue_name = 'viz_%s' % data_product_id sub_id = self.clients.pubsub_management.create_subscription( name='viz transform for %s' % data_product_id, exchange_name=queue_name, stream_ids=stream_ids) self.clients.pubsub_management.activate_subscription(sub_id) self.clients.resource_registry.create_association(viz_id, PRED.hasSubscription, sub_id) config = DotDict() config.process.publish_streams.highcharts = out_stream_id config.process.queue_name = queue_name # This process MUST be launched the first time or fail so the user # doesn't wait there for nothing to happen. schedule = ProcessSchedule() schedule.restart_mode = ProcessRestartMode.NEVER schedule.queueing_mode = ProcessQueueingMode.NEVER # Launch the process procdef_id = self._get_highcharts_procdef() pid = self.clients.process_dispatcher.schedule_process( process_definition_id=procdef_id, schedule=schedule, configuration=config) # Make sure it launched or raise an error process_gate = ProcessStateGate(self.clients.process_dispatcher.read_process, pid, ProcessStateEnum.RUNNING) if not process_gate.await(self.CFG.get_safe('endpoint.receive.timeout', 10)): raise ServiceUnavailable("Failed to launch high charts realtime visualization")
def test_schedule_with_config(self): process_schedule = ProcessSchedule() process_schedule.queueing_mode = ProcessQueueingMode.ALWAYS pid = self.pd_cli.create_process(self.process_definition_id) self.waiter.start(pid) # verifies L4-CI-CEI-RQ66 # feed in a string that the process will return -- verifies that # configuration actually makes it to the instantiated process test_response = uuid.uuid4().hex configuration = {"test_response": test_response} pid2 = self.pd_cli.schedule_process(self.process_definition_id, process_schedule, configuration=configuration, process_id=pid) self.assertEqual(pid, pid2) self.waiter.await_state_event(pid, ProcessStateEnum.RUNNING) test_client = TestClient() # verifies L4-CI-CEI-RQ139 # assure that configuration block (which can contain inputs, outputs, # and arbitrary config) 1) makes it to the process and 2) is returned # in process queries self.assertEqual(test_client.query(), test_response) proc = self.pd_cli.read_process(pid) self.assertEqual(proc.process_id, pid) self.assertEqual(proc.process_configuration, configuration) # kill the process for good self.pd_cli.cancel_process(pid) self.waiter.await_state_event(pid, ProcessStateEnum.TERMINATED)
def test_schedule_with_config(self): process_schedule = ProcessSchedule() process_schedule.queueing_mode = ProcessQueueingMode.ALWAYS pid = self.pd_cli.create_process(self.process_definition_id) self.waiter.start(pid) # verifies L4-CI-CEI-RQ66 # feed in a string that the process will return -- verifies that # configuration actually makes it to the instantiated process test_response = uuid.uuid4().hex configuration = {"test_response" : test_response} pid2 = self.pd_cli.schedule_process(self.process_definition_id, process_schedule, configuration=configuration, process_id=pid) self.assertEqual(pid, pid2) self.waiter.await_state_event(pid, ProcessStateEnum.RUNNING) test_client = TestClient() # verifies L4-CI-CEI-RQ139 # assure that configuration block (which can contain inputs, outputs, # and arbitrary config) 1) makes it to the process and 2) is returned # in process queries self.assertEqual(test_client.query(), test_response) proc = self.pd_cli.read_process(pid) self.assertEqual(proc.process_id, pid) self.assertEqual(proc.process_configuration, configuration) # kill the process for good self.pd_cli.cancel_process(pid) self.waiter.await_state_event(pid, ProcessStateEnum.TERMINATED)
def test_code_download(self): # create a process definition that has no URL; only module and class. process_definition_no_url = ProcessDefinition(name='test_process_nodownload') process_definition_no_url.executable = {'module': 'ion.my.test.process', 'class': 'TestProcess'} process_definition_id_no_url = self.pd_cli.create_process_definition(process_definition_no_url) # create another that has a URL of the python file (this very file) # verifies L4-CI-CEI-RQ114 url = "file://%s" % os.path.join(os.path.dirname(__file__), 'test_process_dispatcher.py') process_definition = ProcessDefinition(name='test_process_download') process_definition.executable = {'module': 'ion.my.test.process', 'class': 'TestProcess', 'url': url} process_definition_id = self.pd_cli.create_process_definition(process_definition) process_target = ProcessTarget() process_schedule = ProcessSchedule() process_schedule.queueing_mode = ProcessQueueingMode.ALWAYS process_schedule.target = process_target self.waiter.start() # Test a module with no download fails pid_no_url = self.pd_cli.create_process(process_definition_id_no_url) self.pd_cli.schedule_process(process_definition_id_no_url, process_schedule, process_id=pid_no_url) self.waiter.await_state_event(pid_no_url, ProcessStateEnum.FAILED) # Test a module with a URL runs pid = self.pd_cli.create_process(process_definition_id) self.pd_cli.schedule_process(process_definition_id, process_schedule, process_id=pid) self.waiter.await_state_event(pid, ProcessStateEnum.RUNNING)
def test_requested_ee(self): # request non-default engine process_target = ProcessTarget(execution_engine_id="engine2") process_schedule = ProcessSchedule() process_schedule.queueing_mode = ProcessQueueingMode.ALWAYS process_schedule.target = process_target pid = self.pd_cli.create_process(self.process_definition_id) self.waiter.start() self.pd_cli.schedule_process(self.process_definition_id, process_schedule, process_id=pid) self.waiter.await_state_event(pid, ProcessStateEnum.WAITING) # request unknown engine, with NEVER queuing mode. The request # should be rejected. # verifies L4-CI-CEI-RQ52 process_target = ProcessTarget(execution_engine_id="not-a-real-ee") process_schedule = ProcessSchedule() process_schedule.queueing_mode = ProcessQueueingMode.NEVER process_schedule.target = process_target rejected_pid = self.pd_cli.create_process(self.process_definition_id) self.pd_cli.schedule_process(self.process_definition_id, process_schedule, process_id=rejected_pid) self.waiter.await_state_event(rejected_pid, ProcessStateEnum.REJECTED) # now add a node and eeagent for engine2. original process should leave # queue and start running node2_id = uuid.uuid4().hex self._send_node_state("engine2", node2_id) self._start_eeagent(node2_id) self.waiter.await_state_event(pid, ProcessStateEnum.RUNNING) # spawn another process. it should start immediately. process_target = ProcessTarget(execution_engine_id="engine2") process_schedule = ProcessSchedule() process_schedule.queueing_mode = ProcessQueueingMode.NEVER process_schedule.target = process_target pid2 = self.pd_cli.create_process(self.process_definition_id) self.pd_cli.schedule_process(self.process_definition_id, process_schedule, process_id=pid2) self.waiter.await_state_event(pid2, ProcessStateEnum.RUNNING) # one more with node exclusive process_target = ProcessTarget(execution_engine_id="engine2", node_exclusive="hats") process_schedule = ProcessSchedule() process_schedule.queueing_mode = ProcessQueueingMode.NEVER process_schedule.target = process_target pid3 = self.pd_cli.create_process(self.process_definition_id) self.pd_cli.schedule_process(self.process_definition_id, process_schedule, process_id=pid3) self.waiter.await_state_event(pid3, ProcessStateEnum.RUNNING) # kill the processes for good self.pd_cli.cancel_process(pid) self.waiter.await_state_event(pid, ProcessStateEnum.TERMINATED) self.pd_cli.cancel_process(pid2) self.waiter.await_state_event(pid2, ProcessStateEnum.TERMINATED) self.pd_cli.cancel_process(pid3) self.waiter.await_state_event(pid3, ProcessStateEnum.TERMINATED)