def launch_worker(self, queue_name):
        config = DotDict()
        config.process.queue_name = queue_name
        config.process.buffer_limit = self.CFG.get_safe("service.ingestion_management.buffer_limit", 10)
        config.process.time_limit = self.CFG.get_safe("service.ingestion_management.time_limit", 10)

        process_definition_id, _ = self.clients.resource_registry.find_resources(
            restype=RT.ProcessDefinition, name="ingestion_worker_process", id_only=True
        )
        validate_true(len(process_definition_id), "No process definition for ingestion workers could be found")
        process_definition_id = process_definition_id[0]

        process_id = self.clients.process_dispatcher.create_process(process_definition_id=process_definition_id)

        xn_ids, _ = self.clients.resource_registry.find_resources(
            restype=RT.ExchangeName, name=queue_name, id_only=True
        )
        for xn_id in xn_ids:
            self.clients.resource_registry.create_association(xn_id, PRED.hasIngestionWorker, process_id)

        schedule = ProcessSchedule()
        schedule.restart_mode = ProcessRestartMode.ABNORMAL

        self.clients.process_dispatcher.schedule_process(
            process_definition_id=process_definition_id, schedule=schedule, process_id=process_id, configuration=config
        )
def _get_process_schedule(**kwargs):
    queueing_mode = kwargs.get('queueing_mode')
    restart_mode = kwargs.get('restart_mode')
    execution_engine_id = kwargs.get('execution_engine_id')
    node_exclusive = kwargs.get('node_exclusive')
    constraints = kwargs.get('constraints')

    process_schedule = ProcessSchedule()
    if queueing_mode is not None:
        try:
            process_schedule.queueing_mode = ProcessQueueingMode._value_map[queueing_mode]
        except KeyError:
            msg = "%s is not a known ProcessQueueingMode" % (queueing_mode)
            raise BadRequest(msg)

    if restart_mode is not None:
        try:
            process_schedule.restart_mode = ProcessRestartMode._value_map[restart_mode]
        except KeyError:
            msg = "%s is not a known ProcessRestartMode" % (restart_mode)
            raise BadRequest(msg)
    else:
        # if restart mode isn't specified, use NEVER. HA Agent itself will reschedule failures.
        process_schedule.restart_mode = ProcessRestartMode.NEVER

    target = ProcessTarget()
    if execution_engine_id is not None:
        target.execution_engine_id = execution_engine_id
    if node_exclusive is not None:
        target.node_exclusive = node_exclusive
    if constraints is not None:
        target.constraints = constraints

    process_schedule.target = target
    return process_schedule
    def test_restart_mode(self):

        proc_def = DotDict()
        proc_def['name'] = "someprocess"
        proc_def['executable'] = {'module': 'my_module', 'class': 'class'}
        mock_read_definition = Mock()
        mock_read_definition.return_value = proc_def
        self.pd_service.backend.read_definition = mock_read_definition

        pid = self.pd_service.create_process("fake-process-def-id")

        pyon_restart_mode = ProcessRestartMode.ABNORMAL
        core_restart_mode = "ABNORMAL"

        proc_schedule = ProcessSchedule()
        proc_schedule.restart_mode = pyon_restart_mode

        configuration = {"some": "value"}

        pid2 = self.pd_service.schedule_process("fake-process-def-id",
            proc_schedule, configuration, pid)

        self.assertEqual(self.mock_core.schedule_process.call_count, 1)
        call_args, call_kwargs = self.mock_core.schedule_process.call_args
        self.assertEqual(call_kwargs['restart_mode'], core_restart_mode)
    def test_restart_mode(self):

        proc_def = DotDict()
        proc_def['name'] = "someprocess"
        proc_def['executable'] = {'module': 'my_module', 'class': 'class'}
        mock_read_definition = Mock()
        mock_read_definition.return_value = proc_def
        self.pd_service.backend.read_definition = mock_read_definition

        pid = self.pd_service.create_process("fake-process-def-id")

        pyon_restart_mode = ProcessRestartMode.ABNORMAL
        core_restart_mode = "ABNORMAL"

        proc_schedule = ProcessSchedule()
        proc_schedule.restart_mode = pyon_restart_mode

        configuration = {"some": "value"}

        pid2 = self.pd_service.schedule_process("fake-process-def-id",
                                                proc_schedule, configuration,
                                                pid)

        self.assertEqual(self.mock_core.schedule_process.call_count, 1)
        call_args, call_kwargs = self.mock_core.schedule_process.call_args
        self.assertEqual(call_kwargs['restart_mode'], core_restart_mode)
    def test_node_exclusive(self):

        # the node_exclusive constraint is used to ensure multiple processes
        # of the same "kind" each get a VM exclusive of each other. Other
        # processes may run on these VMs, just not processes with the same
        # node_exclusive tag. Since we cannot directly query the contents
        # of each node in this test, we prove the capability by scheduling
        # processes one by one and checking their state.

        # verifies L4-CI-CEI-RQ121
        # verifies L4-CI-CEI-RQ57

        # first off, setUp() created a single node and eeagent.
        # We schedule two processes with the same "abc" node_exclusive
        # tag. Since there is only one node, the first process should run
        # and the second should be queued.

        process_target = ProcessTarget(execution_engine_id="engine1")
        process_target.node_exclusive = "abc"
        process_schedule = ProcessSchedule()
        process_schedule.queueing_mode = ProcessQueueingMode.ALWAYS
        process_schedule.target = process_target

        pid1 = self.pd_cli.create_process(self.process_definition_id)
        self.waiter.start()

        self.pd_cli.schedule_process(self.process_definition_id,
            process_schedule, process_id=pid1)

        self.waiter.await_state_event(pid1, ProcessStateEnum.RUNNING)

        pid2 = self.pd_cli.create_process(self.process_definition_id)
        self.pd_cli.schedule_process(self.process_definition_id,
            process_schedule, process_id=pid2)
        self.waiter.await_state_event(pid2, ProcessStateEnum.WAITING)

        # now demonstrate that the node itself is not full by launching
        # a third process without a node_exclusive tag -- it should start
        # immediately

        process_target.node_exclusive = None
        pid3 = self.pd_cli.create_process(self.process_definition_id)
        self.pd_cli.schedule_process(self.process_definition_id,
            process_schedule, process_id=pid3)
        self.waiter.await_state_event(pid3, ProcessStateEnum.RUNNING)

        # finally, add a second node to the engine. pid2 should be started
        # since there is an exclusive "abc" node free.
        node2_id = uuid.uuid4().hex
        self._send_node_state("engine1", node2_id)
        self._start_eeagent(node2_id)
        self.waiter.await_state_event(pid2, ProcessStateEnum.RUNNING)

        # kill the processes for good
        self.pd_cli.cancel_process(pid1)
        self.waiter.await_state_event(pid1, ProcessStateEnum.TERMINATED)
        self.pd_cli.cancel_process(pid2)
        self.waiter.await_state_event(pid2, ProcessStateEnum.TERMINATED)
        self.pd_cli.cancel_process(pid3)
        self.waiter.await_state_event(pid3, ProcessStateEnum.TERMINATED)
    def _launch_process(self,
                        queue_name='',
                        out_streams=None,
                        process_definition_id='',
                        configuration=None):
        """
        Launches the process
        """

        # ------------------------------------------------------------------------------------
        # Spawn Configuration and Parameters
        # ------------------------------------------------------------------------------------

        if 'process' not in configuration:
            configuration['process'] = {}
        configuration['process']['queue_name'] = queue_name
        configuration['process']['publish_streams'] = out_streams

        # Setting the restart mode
        schedule = ProcessSchedule()
        schedule.restart_mode = ProcessRestartMode.ABNORMAL
        schedule.queueing_mode = ProcessQueueingMode.ALWAYS

        # ------------------------------------------------------------------------------------
        # Process Spawning
        # ------------------------------------------------------------------------------------
        # Spawn the process
        pid = self.clients.process_dispatcher.schedule_process(
            process_definition_id=process_definition_id,
            schedule=schedule,
            configuration=configuration)
        validate_is_not_none(pid, "Process could not be spawned")

        return pid
    def _launch_process(self, queue_name='', out_streams=None, process_definition_id='', configuration=None):
        """
        Launches the process
        """

        # ------------------------------------------------------------------------------------
        # Spawn Configuration and Parameters
        # ------------------------------------------------------------------------------------

        if 'process' not in configuration:
            configuration['process'] = {}
        configuration['process']['queue_name'] = queue_name
        configuration['process']['publish_streams'] = out_streams

        # Setting the restart mode
        schedule = ProcessSchedule()
        schedule.restart_mode = ProcessRestartMode.ABNORMAL

        # ------------------------------------------------------------------------------------
        # Process Spawning
        # ------------------------------------------------------------------------------------
        # Spawn the process
        pid = self.clients.process_dispatcher.schedule_process(
            process_definition_id=process_definition_id,
            schedule= schedule,
            configuration=configuration
        )
        validate_is_not_none( pid, "Process could not be spawned")

        return pid
Beispiel #8
0
    def launch_worker(self, queue_name, config):
        config = DotDict(config or {})
        config.process.queue_name = queue_name
        config.process.buffer_limit = self.CFG.get_safe(
            'service.ingestion_management.buffer_limit', 10)
        config.process.time_limit = self.CFG.get_safe(
            'service.ingestion_management.time_limit', 10)

        process_definition_id, _ = self.clients.resource_registry.find_resources(
            restype=RT.ProcessDefinition,
            name='ingestion_worker_process',
            id_only=True)
        validate_true(
            len(process_definition_id),
            'No process definition for ingestion workers could be found')
        process_definition_id = process_definition_id[0]

        process_id = self.clients.process_dispatcher.create_process(
            process_definition_id=process_definition_id)

        xn_ids, _ = self.clients.resource_registry.find_resources(
            restype=RT.ExchangeName, name=queue_name, id_only=True)
        for xn_id in xn_ids:
            self.clients.resource_registry.create_association(
                xn_id, PRED.hasIngestionWorker, process_id)

        schedule = ProcessSchedule()
        schedule.restart_mode = ProcessRestartMode.ABNORMAL
        schedule.queueing_mode = ProcessQueueingMode.ALWAYS

        self.clients.process_dispatcher.schedule_process(
            process_definition_id=process_definition_id,
            schedule=schedule,
            process_id=process_id,
            configuration=config)
    def test_create_schedule_cancel(self):
        process_schedule = ProcessSchedule()
        process_schedule.queueing_mode = ProcessQueueingMode.ALWAYS

        proc_name = 'myreallygoodname'
        pid = self.pd_cli.create_process(self.process_definition_id)
        self.waiter.start(pid)

        pid2 = self.pd_cli.schedule_process(self.process_definition_id,
                                            process_schedule,
                                            configuration={},
                                            process_id=pid,
                                            name=proc_name)
        self.assertEqual(pid, pid2)

        # verifies L4-CI-CEI-RQ141 and L4-CI-CEI-RQ142
        self.waiter.await_state_event(pid, ProcessStateEnum.RUNNING)

        proc = self.pd_cli.read_process(pid)
        self.assertEqual(proc.process_id, pid)
        self.assertEqual(proc.process_configuration, {})
        self.assertEqual(proc.process_state, ProcessStateEnum.RUNNING)

        # make sure process is readable directly from RR (mirrored)
        # verifies L4-CI-CEI-RQ63
        # verifies L4-CI-CEI-RQ64
        proc = self.rr_cli.read(pid)
        self.assertEqual(proc.process_id, pid)

        # now try communicating with the process to make sure it is really running
        test_client = TestClient()
        for i in range(5):
            self.assertEqual(i + 1, test_client.count(timeout=10))

        # verifies L4-CI-CEI-RQ147

        # check the process name was set in container
        got_proc_name = test_client.get_process_name(pid=pid2)
        self.assertEqual(proc_name, got_proc_name)

        # kill the process and start it again
        self.pd_cli.cancel_process(pid)

        self.waiter.await_state_event(pid, ProcessStateEnum.TERMINATED)

        pid2 = self.pd_cli.schedule_process(self.process_definition_id,
                                            process_schedule,
                                            configuration={},
                                            process_id=pid)
        self.assertEqual(pid, pid2)

        self.waiter.await_state_event(pid, ProcessStateEnum.RUNNING)

        for i in range(5):
            self.assertEqual(i + 1, test_client.count(timeout=10))

        # kill the process for good
        self.pd_cli.cancel_process(pid)
        self.waiter.await_state_event(pid, ProcessStateEnum.TERMINATED)
    def test_create_schedule_cancel(self):
        process_schedule = ProcessSchedule()
        process_schedule.queueing_mode = ProcessQueueingMode.ALWAYS

        pid = self.pd_cli.create_process(self.process_definition_id)
        self.waiter.start(pid)

        pid2 = self.pd_cli.schedule_process(self.process_definition_id,
            process_schedule, configuration={}, process_id=pid)
        self.assertEqual(pid, pid2)

        # verifies L4-CI-CEI-RQ141 and L4-CI-CEI-RQ142
        self.waiter.await_state_event(pid, ProcessStateEnum.RUNNING)

        proc = self.pd_cli.read_process(pid)
        self.assertEqual(proc.process_id, pid)
        self.assertEqual(proc.process_configuration, {})
        self.assertEqual(proc.process_state, ProcessStateEnum.RUNNING)

        # make sure process is readable directly from RR (mirrored)
        # verifies L4-CI-CEI-RQ63
        # verifies L4-CI-CEI-RQ64
        proc = self.rr_cli.read(pid)
        self.assertEqual(proc.process_id, pid)

        # now try communicating with the process to make sure it is really running
        test_client = TestClient()
        for i in range(5):
            self.assertEqual(i + 1, test_client.count(timeout=10))

        # verifies L4-CI-CEI-RQ147

        # kill the process and start it again
        self.pd_cli.cancel_process(pid)

        self.waiter.await_state_event(pid, ProcessStateEnum.TERMINATED)
        self.waiter.stop()

        oldpid = pid

        pid = self.pd_cli.create_process(self.process_definition_id)
        self.waiter.start(pid)

        pid2 = self.pd_cli.schedule_process(self.process_definition_id,
            process_schedule, configuration={}, process_id=pid)
        self.assertEqual(pid, pid2)
        self.assertNotEqual(oldpid, pid)

        self.waiter.await_state_event(pid, ProcessStateEnum.RUNNING)

        for i in range(5):
            self.assertEqual(i + 1, test_client.count(timeout=10))

        # kill the process for good
        self.pd_cli.cancel_process(pid)
        self.waiter.await_state_event(pid, ProcessStateEnum.TERMINATED)
    def _add_test_process(self, restart_mode=None):
        process_schedule = ProcessSchedule()
        if restart_mode is not None:
            process_schedule.restart_mode = restart_mode
        pid = self.pd_cli.create_process(self.process_definition_id)

        pid_listen_name = "PDtestproc_%s" % uuid.uuid4().hex
        config = {'process': {'listen_name': pid_listen_name}}

        self.pd_cli.schedule_process(self.process_definition_id,
            process_schedule, process_id=pid, configuration=config)

        client = TestClient(to_name=pid_listen_name)
        return pid, client
    def test_code_download(self):
        # create a process definition that has no URL; only module and class.
        process_definition_no_url = ProcessDefinition(
            name='test_process_nodownload')
        process_definition_no_url.executable = {
            'module': 'ion.my.test.process',
            'class': 'TestProcess'
        }
        process_definition_id_no_url = self.pd_cli.create_process_definition(
            process_definition_no_url)

        # create another that has a URL of the python file (this very file)
        # verifies L4-CI-CEI-RQ114
        url = "file://%s" % os.path.join(os.path.dirname(__file__),
                                         'test_process_dispatcher.py')
        process_definition = ProcessDefinition(name='test_process_download')
        process_definition.executable = {
            'module': 'ion.my.test.process',
            'class': 'TestProcess',
            'url': url
        }
        process_definition_id = self.pd_cli.create_process_definition(
            process_definition)

        process_target = ProcessTarget()
        process_schedule = ProcessSchedule()
        process_schedule.queueing_mode = ProcessQueueingMode.ALWAYS
        process_schedule.target = process_target

        self.waiter.start()

        # Test a module with no download fails
        pid_no_url = self.pd_cli.create_process(process_definition_id_no_url)

        self.pd_cli.schedule_process(process_definition_id_no_url,
                                     process_schedule,
                                     process_id=pid_no_url)

        self.waiter.await_state_event(pid_no_url, ProcessStateEnum.FAILED)

        # Test a module with a URL runs
        pid = self.pd_cli.create_process(process_definition_id)

        self.pd_cli.schedule_process(process_definition_id,
                                     process_schedule,
                                     process_id=pid)

        self.waiter.await_state_event(pid, ProcessStateEnum.RUNNING)
Beispiel #13
0
    def _launch_highcharts(self, viz_id, data_product_id, out_stream_id):
        '''
        Launches the high-charts transform
        '''
        stream_ids, _ = self.clients.resource_registry.find_objects(
            data_product_id, PRED.hasStream, id_only=True)
        if not stream_ids:
            raise BadRequest(
                "Can't launch high charts streaming: data product doesn't have associated stream (%s)"
                % data_product_id)

        queue_name = 'viz_%s' % data_product_id
        sub_id = self.clients.pubsub_management.create_subscription(
            name='viz transform for %s' % data_product_id,
            exchange_name=queue_name,
            stream_ids=stream_ids)

        self.clients.pubsub_management.activate_subscription(sub_id)

        self.clients.resource_registry.create_association(
            viz_id, PRED.hasSubscription, sub_id)

        config = DotDict()
        config.process.publish_streams.highcharts = out_stream_id
        config.process.queue_name = queue_name

        # This process MUST be launched the first time or fail so the user
        # doesn't wait there for nothing to happen.
        schedule = ProcessSchedule()
        schedule.restart_mode = ProcessRestartMode.NEVER
        schedule.queueing_mode = ProcessQueueingMode.NEVER

        # Launch the process
        procdef_id = self._get_highcharts_procdef()
        pid = self.clients.process_dispatcher.schedule_process(
            process_definition_id=procdef_id,
            schedule=schedule,
            configuration=config)

        # Make sure it launched or raise an error

        process_gate = ProcessStateGate(
            self.clients.process_dispatcher.read_process, pid,
            ProcessStateEnum.RUNNING)
        if not process_gate. await (self.CFG.get_safe(
                'endpoint.receive.timeout', 10)):
            raise ServiceUnavailable(
                "Failed to launch high charts realtime visualization")
    def test_node_exclusive_eeid(self):

        proc_def = DotDict()
        proc_def['name'] = "someprocess"
        proc_def['executable'] = {'module': 'my_module', 'class': 'class'}
        mock_read_definition = Mock()
        mock_read_definition.return_value = proc_def
        self.pd_service.backend.read_definition = mock_read_definition

        pid = self.pd_service.create_process("fake-process-def-id")

        node_exclusive = "someattr"
        ee_id = "some_ee"

        proc_schedule = ProcessSchedule()
        proc_schedule.target.node_exclusive = node_exclusive
        proc_schedule.target.execution_engine_id = ee_id

        configuration = {"some": "value"}

        pid2 = self.pd_service.schedule_process("fake-process-def-id",
                                                proc_schedule, configuration,
                                                pid)

        self.assertEqual(self.mock_core.schedule_process.call_count, 1)
        call_args, call_kwargs = self.mock_core.schedule_process.call_args
        self.assertEqual(call_kwargs['execution_engine_id'], ee_id)
        self.assertEqual(call_kwargs['node_exclusive'], node_exclusive)
    def _add_test_process(self, restart_mode=None):
        process_schedule = ProcessSchedule()
        if restart_mode is not None:
            process_schedule.restart_mode = restart_mode
        pid = self.pd_cli.create_process(self.process_definition_id)

        pid_listen_name = "PDtestproc_%s" % uuid.uuid4().hex
        config = {'process': {'listen_name': pid_listen_name}}

        self.pd_cli.schedule_process(self.process_definition_id,
                                     process_schedule,
                                     process_id=pid,
                                     configuration=config)

        client = TestClient(to_name=pid_listen_name)
        return pid, client
    def schedule_process(self, upid, definition_id, configuration=None,
            subscribers=None, constraints=None, queueing_mode=None,
            restart_mode=None, execution_engine_id=None, node_exclusive=None):

        definition = self.real_client.read_process_definition(definition_id)
        self.event_pub.publish_event(event_type="ProcessLifecycleEvent",
            origin=definition.name, origin_type="DispatchedHAProcess",
            state=ProcessStateEnum.RUNNING)

        create_upid = self.real_client.create_process(definition_id)

        process_schedule = ProcessSchedule()
        if queueing_mode is not None:
            try:
                process_schedule.queueing_mode = ProcessQueueingMode._value_map[queueing_mode]
            except KeyError:
                msg = "%s is not a known ProcessQueueingMode" % (queueing_mode)
                raise BadRequest(msg)

        if restart_mode is not None:
            try:
                process_schedule.restart_mode = ProcessRestartMode._value_map[restart_mode]
            except KeyError:
                msg = "%s is not a known ProcessRestartMode" % (restart_mode)
                raise BadRequest(msg)

        target = ProcessTarget()
        if execution_engine_id is not None:
            target.execution_engine_id = execution_engine_id
        if node_exclusive is not None:
            target.node_exclusive = node_exclusive
        if constraints is not None:
            target.constraints = constraints

        process_schedule.target = target

        sched_pid = self.real_client.schedule_process(definition_id,
                process_schedule, configuration=configuration, process_id=create_upid)

        proc = self.real_client.read_process(sched_pid)

        self._associate_process(proc)

        dict_proc = {'upid': proc.process_id,
                'state': self.state_map.get(proc.process_state, self.unknown_state),
                }
        return dict_proc
    def test_idempotency(self):
        # ensure every operation can be safely retried
        process_schedule = ProcessSchedule()
        process_schedule.queueing_mode = ProcessQueueingMode.ALWAYS

        proc_name = 'myreallygoodname'
        pid = self.pd_cli.create_process(self.process_definition_id)
        self.waiter.start(pid)

        # note: if we import UNSCHEDULED state into ProcessStateEnum,
        # this assertion will need to change.
        proc = self.pd_cli.read_process(pid)
        self.assertEqual(proc.process_id, pid)
        self.assertEqual(proc.process_state, ProcessStateEnum.REQUESTED)

        pid2 = self.pd_cli.schedule_process(self.process_definition_id,
                                            process_schedule,
                                            configuration={},
                                            process_id=pid,
                                            name=proc_name)
        self.assertEqual(pid, pid2)

        self.waiter.await_state_event(pid, ProcessStateEnum.RUNNING)

        # repeating schedule is harmless
        pid2 = self.pd_cli.schedule_process(self.process_definition_id,
                                            process_schedule,
                                            configuration={},
                                            process_id=pid,
                                            name=proc_name)
        self.assertEqual(pid, pid2)

        proc = self.pd_cli.read_process(pid)
        self.assertEqual(proc.process_id, pid)
        self.assertEqual(proc.process_configuration, {})
        self.assertEqual(proc.process_state, ProcessStateEnum.RUNNING)

        self.pd_cli.cancel_process(pid)
        self.waiter.await_state_event(pid, ProcessStateEnum.TERMINATED)

        # repeating cancel is harmless
        self.pd_cli.cancel_process(pid)
        proc = self.pd_cli.read_process(pid)
        self.assertEqual(proc.process_id, pid)
        self.assertEqual(proc.process_configuration, {})
        self.assertEqual(proc.process_state, ProcessStateEnum.TERMINATED)
    def test_create_schedule_cancel(self):
        process_schedule = ProcessSchedule()
        process_schedule.queueing_mode = ProcessQueueingMode.ALWAYS

        pid = self.pd_cli.create_process(self.process_definition_id)
        self.subscribe_events(pid)

        pid2 = self.pd_cli.schedule_process(self.process_definition_id,
            process_schedule, configuration={}, process_id=pid)
        self.assertEqual(pid, pid2)

        self.await_state_event(pid, ProcessStateEnum.SPAWN)

        proc = self.pd_cli.read_process(pid)
        self.assertEqual(proc.process_id, pid)
        self.assertEqual(proc.process_configuration, {})
        self.assertEqual(proc.process_state, ProcessStateEnum.SPAWN)

        # now try communicating with the process to make sure it is really running
        test_client = TestClient()
        for i in range(5):
            self.assertEqual(i + 1, test_client.count(timeout=10))

        # kill the process and start it again
        self.pd_cli.cancel_process(pid)

        self.await_state_event(pid, ProcessStateEnum.TERMINATE)

        oldpid = pid

        pid = self.pd_cli.create_process(self.process_definition_id)
        self.subscribe_events(pid)

        pid2 = self.pd_cli.schedule_process(self.process_definition_id,
            process_schedule, configuration={}, process_id=pid)
        self.assertEqual(pid, pid2)
        self.assertNotEqual(oldpid, pid)

        self.await_state_event(pid, ProcessStateEnum.SPAWN)

        for i in range(5):
            self.assertEqual(i + 1, test_client.count(timeout=10))

        # kill the process for good
        self.pd_cli.cancel_process(pid)
        self.await_state_event(pid, ProcessStateEnum.TERMINATE)
Beispiel #19
0
    def test_schedule_bad_config(self):

        process_schedule = ProcessSchedule()

        # a non-JSON-serializable IonObject
        o = ProcessTarget()

        with self.assertRaises(BadRequest) as ar:
            self.pd_cli.schedule_process(self.process_definition_id,
                                         process_schedule,
                                         configuration={"bad": o})
        self.assertTrue(ar.exception.message.startswith("bad configuration"))
def _get_process_schedule(**kwargs):
    queueing_mode = kwargs.get('queueing_mode')
    restart_mode = kwargs.get('restart_mode')
    execution_engine_id = kwargs.get('execution_engine_id')
    node_exclusive = kwargs.get('node_exclusive')
    constraints = kwargs.get('constraints')

    process_schedule = ProcessSchedule()
    if queueing_mode is not None:
        try:
            process_schedule.queueing_mode = ProcessQueueingMode._value_map[queueing_mode]
        except KeyError:
            msg = "%s is not a known ProcessQueueingMode" % (queueing_mode)
            raise BadRequest(msg)

    if restart_mode is not None:
        try:
            process_schedule.restart_mode = ProcessRestartMode._value_map[restart_mode]
        except KeyError:
            msg = "%s is not a known ProcessRestartMode" % (restart_mode)
            raise BadRequest(msg)
    else:
        # if restart mode isn't specified, use NEVER. HA Agent itself will reschedule failures.
        process_schedule.restart_mode = ProcessRestartMode.NEVER

    target = ProcessTarget()
    if execution_engine_id is not None:
        target.execution_engine_id = execution_engine_id
    if node_exclusive is not None:
        target.node_exclusive = node_exclusive
    if constraints is not None:
        target.constraints = constraints

    process_schedule.target = target
    return process_schedule
    def test_idempotency(self):
        # ensure every operation can be safely retried
        process_schedule = ProcessSchedule()
        process_schedule.queueing_mode = ProcessQueueingMode.ALWAYS

        proc_name = 'myreallygoodname'
        pid = self.pd_cli.create_process(self.process_definition_id)
        self.waiter.start(pid)

        # note: if we import UNSCHEDULED state into ProcessStateEnum,
        # this assertion will need to change.
        proc = self.pd_cli.read_process(pid)
        self.assertEqual(proc.process_id, pid)
        self.assertEqual(proc.process_state, ProcessStateEnum.REQUESTED)

        pid2 = self.pd_cli.schedule_process(self.process_definition_id,
            process_schedule, configuration={}, process_id=pid, name=proc_name)
        self.assertEqual(pid, pid2)

        self.waiter.await_state_event(pid, ProcessStateEnum.RUNNING)

        # repeating schedule is harmless
        pid2 = self.pd_cli.schedule_process(self.process_definition_id,
            process_schedule, configuration={}, process_id=pid, name=proc_name)
        self.assertEqual(pid, pid2)

        proc = self.pd_cli.read_process(pid)
        self.assertEqual(proc.process_id, pid)
        self.assertEqual(proc.process_configuration, {})
        self.assertEqual(proc.process_state, ProcessStateEnum.RUNNING)

        self.pd_cli.cancel_process(pid)
        self.waiter.await_state_event(pid, ProcessStateEnum.TERMINATED)

        # repeating cancel is harmless
        self.pd_cli.cancel_process(pid)
        proc = self.pd_cli.read_process(pid)
        self.assertEqual(proc.process_id, pid)
        self.assertEqual(proc.process_configuration, {})
        self.assertEqual(proc.process_state, ProcessStateEnum.TERMINATED)
    def _launch_highcharts(self, viz_id, data_product_id, out_stream_id):
        '''
        Launches the high-charts transform
        '''
        stream_ids, _ = self.clients.resource_registry.find_objects(data_product_id, PRED.hasStream, id_only=True)
        if not stream_ids:
            raise BadRequest("Can't launch high charts streaming: data product doesn't have associated stream (%s)" % data_product_id)

        queue_name = 'viz_%s' % data_product_id
        sub_id = self.clients.pubsub_management.create_subscription(
                    name='viz transform for %s' % data_product_id, 
                    exchange_name=queue_name,
                    stream_ids=stream_ids)

        self.clients.pubsub_management.activate_subscription(sub_id)

        self.clients.resource_registry.create_association(viz_id, PRED.hasSubscription, sub_id)
        
        config = DotDict()
        config.process.publish_streams.highcharts = out_stream_id
        config.process.queue_name = queue_name

        # This process MUST be launched the first time or fail so the user
        # doesn't wait there for nothing to happen.
        schedule = ProcessSchedule()
        schedule.restart_mode = ProcessRestartMode.NEVER
        schedule.queueing_mode = ProcessQueueingMode.NEVER

        # Launch the process
        procdef_id = self._get_highcharts_procdef()
        pid = self.clients.process_dispatcher.schedule_process(
                process_definition_id=procdef_id,
                schedule=schedule,
                configuration=config)

        # Make sure it launched or raise an error

        process_gate = ProcessStateGate(self.clients.process_dispatcher.read_process, pid, ProcessStateEnum.RUNNING)
        if not process_gate.await(self.CFG.get_safe('endpoint.receive.timeout', 10)):
            raise ServiceUnavailable("Failed to launch high charts realtime visualization")
    def test_schedule_with_config(self):

        process_schedule = ProcessSchedule()
        process_schedule.queueing_mode = ProcessQueueingMode.ALWAYS

        pid = self.pd_cli.create_process(self.process_definition_id)
        self.waiter.start(pid)

        # verifies L4-CI-CEI-RQ66

        # feed in a string that the process will return -- verifies that
        # configuration actually makes it to the instantiated process
        test_response = uuid.uuid4().hex
        configuration = {"test_response": test_response}

        pid2 = self.pd_cli.schedule_process(self.process_definition_id,
                                            process_schedule,
                                            configuration=configuration,
                                            process_id=pid)
        self.assertEqual(pid, pid2)

        self.waiter.await_state_event(pid, ProcessStateEnum.RUNNING)

        test_client = TestClient()

        # verifies L4-CI-CEI-RQ139
        # assure that configuration block (which can contain inputs, outputs,
        # and arbitrary config) 1) makes it to the process and 2) is returned
        # in process queries

        self.assertEqual(test_client.query(), test_response)

        proc = self.pd_cli.read_process(pid)
        self.assertEqual(proc.process_id, pid)
        self.assertEqual(proc.process_configuration, configuration)

        # kill the process for good
        self.pd_cli.cancel_process(pid)
        self.waiter.await_state_event(pid, ProcessStateEnum.TERMINATED)
    def test_schedule_cancel(self):
        process_schedule = ProcessSchedule()
        process_schedule.target = ProcessTarget()
        process_schedule.target.constraints = {'site' : 'chicago'}

        config = {'some': "value"}

        pid = self.pd_cli.schedule_process(self.process_definition_id,
            process_schedule, configuration=config)

        self.assertEqual(self.fake_pd.dispatch_process.call_count, 1)
        args, kwargs = self.fake_pd.dispatch_process.call_args
        self.assertFalse(args)
        self.assertEqual(set(kwargs),
            set(['upid', 'spec', 'subscribers', 'constraints']))

        spec = kwargs['spec']
        self.assertEqual(spec['run_type'], 'pyon_single')
        self.assertEqual(spec['parameters']['rel']['apps'][0]['config'],
            config)

        self.pd_cli.cancel_process(pid)
        self.fake_pd.terminate_process.assert_called_once_with(upid=pid)
    def launch(self, agent_config, process_definition_id):
        """
        schedule the launch
        """

        log.debug("schedule agent process")
        process_schedule = ProcessSchedule(restart_mode=ProcessRestartMode.ABNORMAL,
                                           queueing_mode=ProcessQueueingMode.ALWAYS)
        process_id = self.process_dispatcher_client.schedule_process(process_definition_id=process_definition_id,
                                                                      schedule=process_schedule,
                                                                      configuration=agent_config)

        self.process_id = process_id
        return process_id
    def test_schedule_with_config(self):

        process_schedule = ProcessSchedule()
        process_schedule.queueing_mode = ProcessQueueingMode.ALWAYS

        pid = self.pd_cli.create_process(self.process_definition_id)
        self.waiter.start(pid)

        # verifies L4-CI-CEI-RQ66

        # feed in a string that the process will return -- verifies that
        # configuration actually makes it to the instantiated process
        test_response = uuid.uuid4().hex
        configuration = {"test_response" : test_response}

        pid2 = self.pd_cli.schedule_process(self.process_definition_id,
            process_schedule, configuration=configuration, process_id=pid)
        self.assertEqual(pid, pid2)

        self.waiter.await_state_event(pid, ProcessStateEnum.RUNNING)

        test_client = TestClient()

        # verifies L4-CI-CEI-RQ139
        # assure that configuration block (which can contain inputs, outputs,
        # and arbitrary config) 1) makes it to the process and 2) is returned
        # in process queries

        self.assertEqual(test_client.query(), test_response)

        proc = self.pd_cli.read_process(pid)
        self.assertEqual(proc.process_id, pid)
        self.assertEqual(proc.process_configuration, configuration)

        # kill the process for good
        self.pd_cli.cancel_process(pid)
        self.waiter.await_state_event(pid, ProcessStateEnum.TERMINATED)
    def test_code_download(self):
        # create a process definition that has no URL; only module and class.
        process_definition_no_url = ProcessDefinition(name='test_process_nodownload')
        process_definition_no_url.executable = {'module': 'ion.my.test.process',
                'class': 'TestProcess'}
        process_definition_id_no_url = self.pd_cli.create_process_definition(process_definition_no_url)

        # create another that has a URL of the python file (this very file)
        # verifies L4-CI-CEI-RQ114
        url = "file://%s" % os.path.join(os.path.dirname(__file__), 'test_process_dispatcher.py')
        process_definition = ProcessDefinition(name='test_process_download')
        process_definition.executable = {'module': 'ion.my.test.process',
                'class': 'TestProcess', 'url': url}
        process_definition_id = self.pd_cli.create_process_definition(process_definition)

        process_target = ProcessTarget()
        process_schedule = ProcessSchedule()
        process_schedule.queueing_mode = ProcessQueueingMode.ALWAYS
        process_schedule.target = process_target

        self.waiter.start()

        # Test a module with no download fails
        pid_no_url = self.pd_cli.create_process(process_definition_id_no_url)

        self.pd_cli.schedule_process(process_definition_id_no_url,
            process_schedule, process_id=pid_no_url)

        self.waiter.await_state_event(pid_no_url, ProcessStateEnum.FAILED)

        # Test a module with a URL runs
        pid = self.pd_cli.create_process(process_definition_id)

        self.pd_cli.schedule_process(process_definition_id,
            process_schedule, process_id=pid)

        self.waiter.await_state_event(pid, ProcessStateEnum.RUNNING)
Beispiel #28
0
    def test_create_schedule_cancel(self):
        process_schedule = ProcessSchedule()

        pid = self.pd_cli.create_process(self.process_definition_id)
        self.subscribe_events(pid)

        pid2 = self.pd_cli.schedule_process(self.process_definition_id,
                                            process_schedule,
                                            configuration={},
                                            process_id=pid)
        self.assertEqual(pid, pid2)

        self.await_state_event(pid, ProcessStateEnum.SPAWN)

        # now try communicating with the process to make sure it is really running
        test_client = TestClient()
        for i in range(5):
            # this timeout may be too low
            self.assertEqual(i + 1, test_client.count(timeout=1))

        # kill the process and start it again
        self.pd_cli.cancel_process(pid)

        self.await_state_event(pid, ProcessStateEnum.TERMINATE)

        oldpid = pid

        pid = self.pd_cli.create_process(self.process_definition_id)
        self.subscribe_events(pid)

        pid2 = self.pd_cli.schedule_process(self.process_definition_id,
                                            process_schedule,
                                            configuration={},
                                            process_id=pid)
        self.assertEqual(pid, pid2)
        self.assertNotEqual(oldpid, pid)

        self.await_state_event(pid, ProcessStateEnum.SPAWN)

        for i in range(5):
            # this timeout may be too low
            self.assertEqual(i + 1, test_client.count(timeout=1))

        # kill the process for good
        self.pd_cli.cancel_process(pid)
        self.await_state_event(pid, ProcessStateEnum.TERMINATE)
    def start_external_dataset_agent_instance(self, external_dataset_agent_instance_id=''):
        """Launch an dataset agent instance process and return its process id. Agent instance resource
        must exist and be associated with an external dataset

        @param external_dataset_agent_instance_id    str
        @retval process_id    str
        @throws NotFound    object with specified id does not exist
        """
        #todo: may want to call retrieve_external_dataset_agent_instance here
        #todo:  if instance running, then return or throw
        #todo: if instance exists and dataset_agent_instance_obj.dataset_agent_config is completd then just schedule_process


        dataset_agent_instance_obj = self.clients.resource_registry.read(external_dataset_agent_instance_id)

        #retrieve the associated external dataset device
        ext_dataset_ids, _ = self.clients.resource_registry.find_subjects(RT.ExternalDataset, PRED.hasAgentInstance, external_dataset_agent_instance_id, True)
        if not ext_dataset_ids:
            raise NotFound("No External Dataset attached to this Dataset Agent Instance " + str(external_dataset_agent_instance_id))
        if len(ext_dataset_ids) > 1:
            raise BadRequest("Dataset Agent Instance should only have ONE External Dataset" + str(external_dataset_agent_instance_id))
        ext_dataset_id = ext_dataset_ids[0]
        log.debug("start_external_dataset_agent_instance: external dataset is %s connected to dataset agent instance %s ", str(ext_dataset_id),  str(external_dataset_agent_instance_id))


        #retrieve the external dataset model
        model_ids, _ = self.clients.resource_registry.find_objects(ext_dataset_id, PRED.hasModel, RT.ExternalDatasetModel, True)
        if not model_ids:
            raise NotFound("No External Dataset Model  attached to this External Dataset " + str(ext_dataset_id))

        ext_dataset_model_id = model_ids[0]
        log.debug("start_external_dataset_agent_instance:External Dataset Model %s"  +  str(ext_dataset_model_id))


        #retrieve the associated instrument agent
        agent_ids, _ = self.clients.resource_registry.find_subjects(RT.ExternalDatasetAgent, PRED.hasModel, ext_dataset_model_id, True)
        if not agent_ids:
            raise NotFound("No External Dataset Agent  attached to this External Dataset Model " + str(ext_dataset_model_id))

        ext_dataset_agent_id = agent_ids[0]
        log.debug("start_external_dataset_agent_instance: external dataset agent '%s'" % ext_dataset_agent_id)

        #retrieve the associated process definition
        process_def_ids, _ = self.clients.resource_registry.find_objects(ext_dataset_agent_id, PRED.hasProcessDefinition, RT.ProcessDefinition, True)
        if not process_def_ids:
            raise NotFound("No Process Definition  attached to this ExtDataset Agent " + str(ext_dataset_agent_id))
        if len(process_def_ids) > 1:
            raise BadRequest("ExtDataset Agent should only have ONE Process Definition" + str(ext_dataset_agent_id))

        process_definition_id = process_def_ids[0]
        log.debug("activate_instrument: agent process definition %s"  +  str(process_definition_id))

        # retrieve the process definition information
        process_def_obj = self.clients.resource_registry.read(process_definition_id)

        out_streams = {}
        #retrieve the output products
        data_product_ids, _ = self.clients.resource_registry.find_objects(ext_dataset_id, PRED.hasOutputProduct, RT.DataProduct, True)
        if not data_product_ids:
            raise NotFound("No output Data Products attached to this External Dataset " + str(ext_dataset_id))

        for product_id in data_product_ids:
            stream_ids, _ = self.clients.resource_registry.find_objects(product_id, PRED.hasStream, RT.Stream, True)

            log.debug("start_external_dataset_agent_instance:output stream ids: %s"  +  str(stream_ids))
            #One stream per product ...for now.
            if not stream_ids:
                raise NotFound("No Stream  attached to this Data Product " + str(product_id))
            if len(stream_ids) > 1:
                raise BadRequest("Data Product should only have ONE Stream" + str(product_id))

            # retrieve the stream
            stream_obj = self.clients.resource_registry.read(stream_ids[0])

            out_streams['parsed'] = stream_ids[0]


        # Create agent config.
        dataset_agent_instance_obj.dataset_agent_config = {
            'driver_config' : dataset_agent_instance_obj.dataset_driver_config,
            'stream_config' : out_streams,
            'agent'         : {'resource_id': ext_dataset_id},
            'test_mode' : True
        }

        log.debug("start_external_dataset_agent_instance: agent_config %s ", str(dataset_agent_instance_obj.dataset_agent_config))

        # Setting the restart mode
        schedule = ProcessSchedule()
        schedule.restart_mode = ProcessRestartMode.ABNORMAL

        pid = self.clients.process_dispatcher.schedule_process(process_definition_id=process_definition_id,
                                                               schedule=schedule,
                                                               configuration=dataset_agent_instance_obj.dataset_agent_config)
        log.debug("start_external_dataset_agent_instance: schedule_process %s", pid)


        # add the process id and update the resource
        dataset_agent_instance_obj.agent_process_id = pid
        self.update_external_dataset_agent_instance(dataset_agent_instance_obj)

        return pid
    def test_requested_ee(self):

        # request non-default engine

        process_target = ProcessTarget(execution_engine_id="engine2")
        process_schedule = ProcessSchedule()
        process_schedule.queueing_mode = ProcessQueueingMode.ALWAYS
        process_schedule.target = process_target

        pid = self.pd_cli.create_process(self.process_definition_id)
        self.waiter.start()

        self.pd_cli.schedule_process(self.process_definition_id,
            process_schedule, process_id=pid)

        self.waiter.await_state_event(pid, ProcessStateEnum.WAITING)


        # request unknown engine, with NEVER queuing mode. The request
        # should be rejected.
        # verifies L4-CI-CEI-RQ52

        process_target = ProcessTarget(execution_engine_id="not-a-real-ee")
        process_schedule = ProcessSchedule()
        process_schedule.queueing_mode = ProcessQueueingMode.NEVER
        process_schedule.target = process_target

        rejected_pid = self.pd_cli.create_process(self.process_definition_id)

        self.pd_cli.schedule_process(self.process_definition_id,
            process_schedule, process_id=rejected_pid)

        self.waiter.await_state_event(rejected_pid, ProcessStateEnum.REJECTED)

        # now add a node and eeagent for engine2. original process should leave
        # queue and start running
        node2_id = uuid.uuid4().hex
        self._send_node_state("engine2", node2_id)
        self._start_eeagent(node2_id)

        self.waiter.await_state_event(pid, ProcessStateEnum.RUNNING)

        # spawn another process. it should start immediately.

        process_target = ProcessTarget(execution_engine_id="engine2")
        process_schedule = ProcessSchedule()
        process_schedule.queueing_mode = ProcessQueueingMode.NEVER
        process_schedule.target = process_target

        pid2 = self.pd_cli.create_process(self.process_definition_id)

        self.pd_cli.schedule_process(self.process_definition_id,
            process_schedule, process_id=pid2)

        self.waiter.await_state_event(pid2, ProcessStateEnum.RUNNING)

        # one more with node exclusive

        process_target = ProcessTarget(execution_engine_id="engine2",
            node_exclusive="hats")
        process_schedule = ProcessSchedule()
        process_schedule.queueing_mode = ProcessQueueingMode.NEVER
        process_schedule.target = process_target

        pid3 = self.pd_cli.create_process(self.process_definition_id)

        self.pd_cli.schedule_process(self.process_definition_id,
            process_schedule, process_id=pid3)

        self.waiter.await_state_event(pid3, ProcessStateEnum.RUNNING)

        # kill the processes for good
        self.pd_cli.cancel_process(pid)
        self.waiter.await_state_event(pid, ProcessStateEnum.TERMINATED)
        self.pd_cli.cancel_process(pid2)
        self.waiter.await_state_event(pid2, ProcessStateEnum.TERMINATED)
        self.pd_cli.cancel_process(pid3)
        self.waiter.await_state_event(pid3, ProcessStateEnum.TERMINATED)
    def test_node_exclusive(self):

        # the node_exclusive constraint is used to ensure multiple processes
        # of the same "kind" each get a VM exclusive of each other. Other
        # processes may run on these VMs, just not processes with the same
        # node_exclusive tag. Since we cannot directly query the contents
        # of each node in this test, we prove the capability by scheduling
        # processes one by one and checking their state.

        # verifies L4-CI-CEI-RQ121
        # verifies L4-CI-CEI-RQ57

        # first off, setUp() created a single node and eeagent.
        # We schedule two processes with the same "abc" node_exclusive
        # tag. Since there is only one node, the first process should run
        # and the second should be queued.

        process_target = ProcessTarget(execution_engine_id="engine1")
        process_target.node_exclusive = "abc"
        process_schedule = ProcessSchedule()
        process_schedule.queueing_mode = ProcessQueueingMode.ALWAYS
        process_schedule.target = process_target

        pid1 = self.pd_cli.create_process(self.process_definition_id)
        self.waiter.start()

        self.pd_cli.schedule_process(self.process_definition_id,
                                     process_schedule,
                                     process_id=pid1)

        self.waiter.await_state_event(pid1, ProcessStateEnum.RUNNING)

        pid2 = self.pd_cli.create_process(self.process_definition_id)
        self.pd_cli.schedule_process(self.process_definition_id,
                                     process_schedule,
                                     process_id=pid2)
        self.waiter.await_state_event(pid2, ProcessStateEnum.WAITING)

        # now demonstrate that the node itself is not full by launching
        # a third process without a node_exclusive tag -- it should start
        # immediately

        process_target.node_exclusive = None
        pid3 = self.pd_cli.create_process(self.process_definition_id)
        self.pd_cli.schedule_process(self.process_definition_id,
                                     process_schedule,
                                     process_id=pid3)
        self.waiter.await_state_event(pid3, ProcessStateEnum.RUNNING)

        # finally, add a second node to the engine. pid2 should be started
        # since there is an exclusive "abc" node free.
        node2_id = uuid.uuid4().hex
        self._send_node_state("engine1", node2_id)
        self._start_eeagent(node2_id)
        self.waiter.await_state_event(pid2, ProcessStateEnum.RUNNING)

        # kill the processes for good
        self.pd_cli.cancel_process(pid1)
        self.waiter.await_state_event(pid1, ProcessStateEnum.TERMINATED)
        self.pd_cli.cancel_process(pid2)
        self.waiter.await_state_event(pid2, ProcessStateEnum.TERMINATED)
        self.pd_cli.cancel_process(pid3)
        self.waiter.await_state_event(pid3, ProcessStateEnum.TERMINATED)
    def test_requested_ee(self):

        # request non-default engine

        process_target = ProcessTarget(execution_engine_id="engine2")
        process_schedule = ProcessSchedule()
        process_schedule.queueing_mode = ProcessQueueingMode.ALWAYS
        process_schedule.target = process_target

        pid = self.pd_cli.create_process(self.process_definition_id)
        self.waiter.start()

        self.pd_cli.schedule_process(self.process_definition_id,
                                     process_schedule,
                                     process_id=pid)

        self.waiter.await_state_event(pid, ProcessStateEnum.WAITING)

        # request unknown engine, with NEVER queuing mode. The request
        # should be rejected.
        # verifies L4-CI-CEI-RQ52

        process_target = ProcessTarget(execution_engine_id="not-a-real-ee")
        process_schedule = ProcessSchedule()
        process_schedule.queueing_mode = ProcessQueueingMode.NEVER
        process_schedule.target = process_target

        rejected_pid = self.pd_cli.create_process(self.process_definition_id)

        self.pd_cli.schedule_process(self.process_definition_id,
                                     process_schedule,
                                     process_id=rejected_pid)

        self.waiter.await_state_event(rejected_pid, ProcessStateEnum.REJECTED)

        # now add a node and eeagent for engine2. original process should leave
        # queue and start running
        node2_id = uuid.uuid4().hex
        self._send_node_state("engine2", node2_id)
        self._start_eeagent(node2_id)

        self.waiter.await_state_event(pid, ProcessStateEnum.RUNNING)

        # spawn another process. it should start immediately.

        process_target = ProcessTarget(execution_engine_id="engine2")
        process_schedule = ProcessSchedule()
        process_schedule.queueing_mode = ProcessQueueingMode.NEVER
        process_schedule.target = process_target

        pid2 = self.pd_cli.create_process(self.process_definition_id)

        self.pd_cli.schedule_process(self.process_definition_id,
                                     process_schedule,
                                     process_id=pid2)

        self.waiter.await_state_event(pid2, ProcessStateEnum.RUNNING)

        # one more with node exclusive

        process_target = ProcessTarget(execution_engine_id="engine2",
                                       node_exclusive="hats")
        process_schedule = ProcessSchedule()
        process_schedule.queueing_mode = ProcessQueueingMode.NEVER
        process_schedule.target = process_target

        pid3 = self.pd_cli.create_process(self.process_definition_id)

        self.pd_cli.schedule_process(self.process_definition_id,
                                     process_schedule,
                                     process_id=pid3)

        self.waiter.await_state_event(pid3, ProcessStateEnum.RUNNING)

        # kill the processes for good
        self.pd_cli.cancel_process(pid)
        self.waiter.await_state_event(pid, ProcessStateEnum.TERMINATED)
        self.pd_cli.cancel_process(pid2)
        self.waiter.await_state_event(pid2, ProcessStateEnum.TERMINATED)
        self.pd_cli.cancel_process(pid3)
        self.waiter.await_state_event(pid3, ProcessStateEnum.TERMINATED)