def test_node_exclusive(self):

        # the node_exclusive constraint is used to ensure multiple processes
        # of the same "kind" each get a VM exclusive of each other. Other
        # processes may run on these VMs, just not processes with the same
        # node_exclusive tag. Since we cannot directly query the contents
        # of each node in this test, we prove the capability by scheduling
        # processes one by one and checking their state.

        # verifies L4-CI-CEI-RQ121
        # verifies L4-CI-CEI-RQ57

        # first off, setUp() created a single node and eeagent.
        # We schedule two processes with the same "abc" node_exclusive
        # tag. Since there is only one node, the first process should run
        # and the second should be queued.

        process_target = ProcessTarget(execution_engine_id="engine1")
        process_target.node_exclusive = "abc"
        process_schedule = ProcessSchedule()
        process_schedule.queueing_mode = ProcessQueueingMode.ALWAYS
        process_schedule.target = process_target

        pid1 = self.pd_cli.create_process(self.process_definition_id)
        self.waiter.start()

        self.pd_cli.schedule_process(self.process_definition_id,
            process_schedule, process_id=pid1)

        self.waiter.await_state_event(pid1, ProcessStateEnum.RUNNING)

        pid2 = self.pd_cli.create_process(self.process_definition_id)
        self.pd_cli.schedule_process(self.process_definition_id,
            process_schedule, process_id=pid2)
        self.waiter.await_state_event(pid2, ProcessStateEnum.WAITING)

        # now demonstrate that the node itself is not full by launching
        # a third process without a node_exclusive tag -- it should start
        # immediately

        process_target.node_exclusive = None
        pid3 = self.pd_cli.create_process(self.process_definition_id)
        self.pd_cli.schedule_process(self.process_definition_id,
            process_schedule, process_id=pid3)
        self.waiter.await_state_event(pid3, ProcessStateEnum.RUNNING)

        # finally, add a second node to the engine. pid2 should be started
        # since there is an exclusive "abc" node free.
        node2_id = uuid.uuid4().hex
        self._send_node_state("engine1", node2_id)
        self._start_eeagent(node2_id)
        self.waiter.await_state_event(pid2, ProcessStateEnum.RUNNING)

        # kill the processes for good
        self.pd_cli.cancel_process(pid1)
        self.waiter.await_state_event(pid1, ProcessStateEnum.TERMINATED)
        self.pd_cli.cancel_process(pid2)
        self.waiter.await_state_event(pid2, ProcessStateEnum.TERMINATED)
        self.pd_cli.cancel_process(pid3)
        self.waiter.await_state_event(pid3, ProcessStateEnum.TERMINATED)
def _get_process_schedule(**kwargs):
    queueing_mode = kwargs.get('queueing_mode')
    restart_mode = kwargs.get('restart_mode')
    execution_engine_id = kwargs.get('execution_engine_id')
    node_exclusive = kwargs.get('node_exclusive')
    constraints = kwargs.get('constraints')

    process_schedule = ProcessSchedule()
    if queueing_mode is not None:
        try:
            process_schedule.queueing_mode = ProcessQueueingMode._value_map[queueing_mode]
        except KeyError:
            msg = "%s is not a known ProcessQueueingMode" % (queueing_mode)
            raise BadRequest(msg)

    if restart_mode is not None:
        try:
            process_schedule.restart_mode = ProcessRestartMode._value_map[restart_mode]
        except KeyError:
            msg = "%s is not a known ProcessRestartMode" % (restart_mode)
            raise BadRequest(msg)
    else:
        # if restart mode isn't specified, use NEVER. HA Agent itself will reschedule failures.
        process_schedule.restart_mode = ProcessRestartMode.NEVER

    target = ProcessTarget()
    if execution_engine_id is not None:
        target.execution_engine_id = execution_engine_id
    if node_exclusive is not None:
        target.node_exclusive = node_exclusive
    if constraints is not None:
        target.constraints = constraints

    process_schedule.target = target
    return process_schedule
def _get_process_schedule(**kwargs):
    queueing_mode = kwargs.get('queueing_mode')
    restart_mode = kwargs.get('restart_mode')
    execution_engine_id = kwargs.get('execution_engine_id')
    node_exclusive = kwargs.get('node_exclusive')
    constraints = kwargs.get('constraints')

    process_schedule = ProcessSchedule()
    if queueing_mode is not None:
        try:
            process_schedule.queueing_mode = ProcessQueueingMode._value_map[queueing_mode]
        except KeyError:
            msg = "%s is not a known ProcessQueueingMode" % (queueing_mode)
            raise BadRequest(msg)

    if restart_mode is not None:
        try:
            process_schedule.restart_mode = ProcessRestartMode._value_map[restart_mode]
        except KeyError:
            msg = "%s is not a known ProcessRestartMode" % (restart_mode)
            raise BadRequest(msg)
    else:
        # if restart mode isn't specified, use NEVER. HA Agent itself will reschedule failures.
        process_schedule.restart_mode = ProcessRestartMode.NEVER

    target = ProcessTarget()
    if execution_engine_id is not None:
        target.execution_engine_id = execution_engine_id
    if node_exclusive is not None:
        target.node_exclusive = node_exclusive
    if constraints is not None:
        target.constraints = constraints

    process_schedule.target = target
    return process_schedule
    def schedule_process(self, upid, definition_id, configuration=None,
            subscribers=None, constraints=None, queueing_mode=None,
            restart_mode=None, execution_engine_id=None, node_exclusive=None):

        definition = self.real_client.read_process_definition(definition_id)
        self.event_pub.publish_event(event_type="ProcessLifecycleEvent",
            origin=definition.name, origin_type="DispatchedHAProcess",
            state=ProcessStateEnum.RUNNING)

        create_upid = self.real_client.create_process(definition_id)

        process_schedule = ProcessSchedule()
        if queueing_mode is not None:
            try:
                process_schedule.queueing_mode = ProcessQueueingMode._value_map[queueing_mode]
            except KeyError:
                msg = "%s is not a known ProcessQueueingMode" % (queueing_mode)
                raise BadRequest(msg)

        if restart_mode is not None:
            try:
                process_schedule.restart_mode = ProcessRestartMode._value_map[restart_mode]
            except KeyError:
                msg = "%s is not a known ProcessRestartMode" % (restart_mode)
                raise BadRequest(msg)

        target = ProcessTarget()
        if execution_engine_id is not None:
            target.execution_engine_id = execution_engine_id
        if node_exclusive is not None:
            target.node_exclusive = node_exclusive
        if constraints is not None:
            target.constraints = constraints

        process_schedule.target = target

        sched_pid = self.real_client.schedule_process(definition_id,
                process_schedule, configuration=configuration, process_id=create_upid)

        proc = self.real_client.read_process(sched_pid)

        self._associate_process(proc)

        dict_proc = {'upid': proc.process_id,
                'state': self.state_map.get(proc.process_state, self.unknown_state),
                }
        return dict_proc
Example #5
0
    def test_schedule_bad_config(self):

        process_schedule = ProcessSchedule()

        # a non-JSON-serializable IonObject
        o = ProcessTarget()

        with self.assertRaises(BadRequest) as ar:
            self.pd_cli.schedule_process(self.process_definition_id,
                                         process_schedule,
                                         configuration={"bad": o})
        self.assertTrue(ar.exception.message.startswith("bad configuration"))
    def test_code_download(self):
        # create a process definition that has no URL; only module and class.
        process_definition_no_url = ProcessDefinition(
            name='test_process_nodownload')
        process_definition_no_url.executable = {
            'module': 'ion.my.test.process',
            'class': 'TestProcess'
        }
        process_definition_id_no_url = self.pd_cli.create_process_definition(
            process_definition_no_url)

        # create another that has a URL of the python file (this very file)
        # verifies L4-CI-CEI-RQ114
        url = "file://%s" % os.path.join(os.path.dirname(__file__),
                                         'test_process_dispatcher.py')
        process_definition = ProcessDefinition(name='test_process_download')
        process_definition.executable = {
            'module': 'ion.my.test.process',
            'class': 'TestProcess',
            'url': url
        }
        process_definition_id = self.pd_cli.create_process_definition(
            process_definition)

        process_target = ProcessTarget()
        process_schedule = ProcessSchedule()
        process_schedule.queueing_mode = ProcessQueueingMode.ALWAYS
        process_schedule.target = process_target

        self.waiter.start()

        # Test a module with no download fails
        pid_no_url = self.pd_cli.create_process(process_definition_id_no_url)

        self.pd_cli.schedule_process(process_definition_id_no_url,
                                     process_schedule,
                                     process_id=pid_no_url)

        self.waiter.await_state_event(pid_no_url, ProcessStateEnum.FAILED)

        # Test a module with a URL runs
        pid = self.pd_cli.create_process(process_definition_id)

        self.pd_cli.schedule_process(process_definition_id,
                                     process_schedule,
                                     process_id=pid)

        self.waiter.await_state_event(pid, ProcessStateEnum.RUNNING)
    def test_requested_ee(self):

        # request non-default engine

        process_target = ProcessTarget(execution_engine_id="engine2")
        process_schedule = ProcessSchedule()
        process_schedule.queueing_mode = ProcessQueueingMode.ALWAYS
        process_schedule.target = process_target

        pid = self.pd_cli.create_process(self.process_definition_id)
        self.waiter.start()

        self.pd_cli.schedule_process(self.process_definition_id,
                                     process_schedule,
                                     process_id=pid)

        self.waiter.await_state_event(pid, ProcessStateEnum.WAITING)

        # request unknown engine, with NEVER queuing mode. The request
        # should be rejected.
        # verifies L4-CI-CEI-RQ52

        process_target = ProcessTarget(execution_engine_id="not-a-real-ee")
        process_schedule = ProcessSchedule()
        process_schedule.queueing_mode = ProcessQueueingMode.NEVER
        process_schedule.target = process_target

        rejected_pid = self.pd_cli.create_process(self.process_definition_id)

        self.pd_cli.schedule_process(self.process_definition_id,
                                     process_schedule,
                                     process_id=rejected_pid)

        self.waiter.await_state_event(rejected_pid, ProcessStateEnum.REJECTED)

        # now add a node and eeagent for engine2. original process should leave
        # queue and start running
        node2_id = uuid.uuid4().hex
        self._send_node_state("engine2", node2_id)
        self._start_eeagent(node2_id)

        self.waiter.await_state_event(pid, ProcessStateEnum.RUNNING)

        # spawn another process. it should start immediately.

        process_target = ProcessTarget(execution_engine_id="engine2")
        process_schedule = ProcessSchedule()
        process_schedule.queueing_mode = ProcessQueueingMode.NEVER
        process_schedule.target = process_target

        pid2 = self.pd_cli.create_process(self.process_definition_id)

        self.pd_cli.schedule_process(self.process_definition_id,
                                     process_schedule,
                                     process_id=pid2)

        self.waiter.await_state_event(pid2, ProcessStateEnum.RUNNING)

        # one more with node exclusive

        process_target = ProcessTarget(execution_engine_id="engine2",
                                       node_exclusive="hats")
        process_schedule = ProcessSchedule()
        process_schedule.queueing_mode = ProcessQueueingMode.NEVER
        process_schedule.target = process_target

        pid3 = self.pd_cli.create_process(self.process_definition_id)

        self.pd_cli.schedule_process(self.process_definition_id,
                                     process_schedule,
                                     process_id=pid3)

        self.waiter.await_state_event(pid3, ProcessStateEnum.RUNNING)

        # kill the processes for good
        self.pd_cli.cancel_process(pid)
        self.waiter.await_state_event(pid, ProcessStateEnum.TERMINATED)
        self.pd_cli.cancel_process(pid2)
        self.waiter.await_state_event(pid2, ProcessStateEnum.TERMINATED)
        self.pd_cli.cancel_process(pid3)
        self.waiter.await_state_event(pid3, ProcessStateEnum.TERMINATED)
    def test_node_exclusive(self):

        # the node_exclusive constraint is used to ensure multiple processes
        # of the same "kind" each get a VM exclusive of each other. Other
        # processes may run on these VMs, just not processes with the same
        # node_exclusive tag. Since we cannot directly query the contents
        # of each node in this test, we prove the capability by scheduling
        # processes one by one and checking their state.

        # verifies L4-CI-CEI-RQ121
        # verifies L4-CI-CEI-RQ57

        # first off, setUp() created a single node and eeagent.
        # We schedule two processes with the same "abc" node_exclusive
        # tag. Since there is only one node, the first process should run
        # and the second should be queued.

        process_target = ProcessTarget(execution_engine_id="engine1")
        process_target.node_exclusive = "abc"
        process_schedule = ProcessSchedule()
        process_schedule.queueing_mode = ProcessQueueingMode.ALWAYS
        process_schedule.target = process_target

        pid1 = self.pd_cli.create_process(self.process_definition_id)
        self.waiter.start()

        self.pd_cli.schedule_process(self.process_definition_id,
                                     process_schedule,
                                     process_id=pid1)

        self.waiter.await_state_event(pid1, ProcessStateEnum.RUNNING)

        pid2 = self.pd_cli.create_process(self.process_definition_id)
        self.pd_cli.schedule_process(self.process_definition_id,
                                     process_schedule,
                                     process_id=pid2)
        self.waiter.await_state_event(pid2, ProcessStateEnum.WAITING)

        # now demonstrate that the node itself is not full by launching
        # a third process without a node_exclusive tag -- it should start
        # immediately

        process_target.node_exclusive = None
        pid3 = self.pd_cli.create_process(self.process_definition_id)
        self.pd_cli.schedule_process(self.process_definition_id,
                                     process_schedule,
                                     process_id=pid3)
        self.waiter.await_state_event(pid3, ProcessStateEnum.RUNNING)

        # finally, add a second node to the engine. pid2 should be started
        # since there is an exclusive "abc" node free.
        node2_id = uuid.uuid4().hex
        self._send_node_state("engine1", node2_id)
        self._start_eeagent(node2_id)
        self.waiter.await_state_event(pid2, ProcessStateEnum.RUNNING)

        # kill the processes for good
        self.pd_cli.cancel_process(pid1)
        self.waiter.await_state_event(pid1, ProcessStateEnum.TERMINATED)
        self.pd_cli.cancel_process(pid2)
        self.waiter.await_state_event(pid2, ProcessStateEnum.TERMINATED)
        self.pd_cli.cancel_process(pid3)
        self.waiter.await_state_event(pid3, ProcessStateEnum.TERMINATED)