Exemplo n.º 1
0
def test_wfp_start_processor():

    p = Pipeline()
    s = Stage()
    t = Task()
    t.executable = ['/bin/date']
    s.add_tasks(t)
    p.add_stages(s)

    amgr = Amgr(hostname=hostname, port=port)
    amgr._setup_mqs()

    wfp = WFprocessor(sid=amgr._sid,
                      workflow=[p],
                      pending_queue=amgr._pending_queue,
                      completed_queue=amgr._completed_queue,
                      mq_hostname=amgr._mq_hostname,
                      port=amgr._port,
                      resubmit_failed=False)

    assert wfp.start_processor()
    assert not wfp._enqueue_thread
    assert not wfp._dequeue_thread
    assert not wfp._enqueue_thread_terminate.is_set()
    assert not wfp._dequeue_thread_terminate.is_set()
    assert not wfp._wfp_terminate.is_set()
    assert wfp._wfp_process.is_alive()

    wfp._wfp_terminate.set()
    wfp._wfp_process.join()
Exemplo n.º 2
0
def test_wfp_initialization(s, b, l):

    p  = Pipeline()
    stage = Stage()
    t  = Task()

    t.executable = '/bin/date'
    stage.add_tasks(t)
    p.add_stages(stage)
    rmq_conn_params = pika.ConnectionParameters(host=hostname, port=port)

    wfp = WFprocessor(sid='rp.session.local.0000',
                      workflow=set([p]),
                      pending_queue=['pending'],
                      completed_queue=['completed'],
                      rmq_conn_params=rmq_conn_params,
                      resubmit_failed=True)

    assert len(wfp._uid.split('.')) == 2
    assert 'wfprocessor'            == wfp._uid.split('.')[0]
    assert wfp._pending_queue       == ['pending']
    assert wfp._completed_queue     == ['completed']
    assert wfp._rmq_conn_params     == rmq_conn_params
    assert wfp._wfp_process         is None
    assert wfp._workflow            == set([p])

    if not isinstance(s, str):
        wfp = WFprocessor(sid=s,
                          workflow=set([p]),
                          pending_queue=l,
                          completed_queue=l,
                          rmq_conn_params=rmq_conn_params,
                          resubmit_failed=b)
def test_wfp_start_processor():

    p = Pipeline()
    s = Stage()
    t = Task()
    t.executable = ['/bin/date']
    s.add_tasks(t)
    p.add_stages(s)

    amgr = Amgr(hostname=hostname, port=port)
    amgr._setup_mqs()

    wfp = WFprocessor(sid=amgr._sid,
                      workflow=[p],
                      pending_queue=amgr._pending_queue,
                      completed_queue=amgr._completed_queue,
                      mq_hostname=amgr._mq_hostname,
                      port=amgr._port,
                      resubmit_failed=False)

    assert wfp.start_processor()
    assert not wfp._enqueue_thread
    assert not wfp._dequeue_thread
    assert not wfp._enqueue_thread_terminate.is_set()
    assert not wfp._dequeue_thread_terminate.is_set()
    assert not wfp._wfp_terminate.is_set()
    assert wfp._wfp_process.is_alive()

    wfp._wfp_terminate.set()
    wfp._wfp_process.join()
Exemplo n.º 4
0
    def test_wfp_workflow_incomplete(self, mocked_init, mocked_Logger):

        wfp = WFprocessor(sid='test_sid',
                          workflow='workflow',
                          pending_queue='pending_queue',
                          completed_queue='completed_queue',
                          rmq_conn_params='test_rmq_params',
                          resubmit_failed=False)
        wfp._logger = mocked_Logger
        pipe = mock.Mock()
        pipe.lock = mt.Lock()
        pipe.completed = False
        wfp._workflow = set([pipe])
        self.assertTrue(wfp.workflow_incomplete())

        pipe.completed = True
        self.assertFalse(wfp.workflow_incomplete())

        wfp = WFprocessor(sid='test_sid',
                          workflow='workflow',
                          pending_queue='pending_queue',
                          completed_queue='completed_queue',
                          rmq_conn_params='test_rmq_params',
                          resubmit_failed=False)
        with self.assertRaises(Exception):
            wfp.workflow_incomplete()
Exemplo n.º 5
0
    def test_check_processor(self, mocked_init):

        wfp = WFprocessor(sid='test_sid',
                          workflow='workflow',
                          pending_queue='pending_queue',
                          completed_queue='completed_queue',
                          rmq_conn_params='test_rmq_params',
                          resubmit_failed=False)

        wfp._enqueue_thread = None
        wfp._dequeue_thread = None

        self.assertFalse(wfp.check_processor())

        wfp._enqueue_thread = mock.Mock()
        wfp._enqueue_thread.is_alive = mock.MagicMock(
            side_effect=[False, False, True, True])
        wfp._dequeue_thread = mock.Mock()
        wfp._dequeue_thread.is_alive = mock.MagicMock(
            side_effect=[False, True, False, True])

        self.assertFalse(wfp.check_processor())
        self.assertFalse(wfp.check_processor())
        self.assertFalse(wfp.check_processor())
        self.assertTrue(wfp.check_processor())
Exemplo n.º 6
0
    def test_create_workload(self, mocked_init, mocked_advance, mocked_Logger,
                             mocked_Reporter):
        wfp = WFprocessor(sid='test_sid',
                          workflow='workflow',
                          pending_queue='pending_queue',
                          completed_queue='completed_queue',
                          rmq_conn_params='test_rmq_params',
                          resubmit_failed=False)

        wfp._resubmit_failed = False
        pipe = mock.Mock()
        pipe.lock = mt.Lock()
        pipe.state = states.INITIAL
        pipe.completed = False
        pipe.current_stage = 1

        stage = mock.Mock()
        stage.uid = 'stage.0000'
        stage.state = states.SCHEDULING

        task = mock.Mock()
        task.uid = 'task.0000'
        task.state = states.INITIAL

        stage.tasks = [task]
        pipe.stages = [stage]
        wfp._workflow = set([pipe])

        workload, scheduled_stages = wfp._create_workload()

        self.assertEqual(workload, [task])
        self.assertEqual(scheduled_stages, [stage])
Exemplo n.º 7
0
def test_wfp_initialization(s, i, b, l):

    p = Pipeline()
    st = Stage()
    t = Task()
    t.executable = ['/bin/date']
    st.add_tasks(t)
    p.add_stages(st)

    wfp = WFprocessor(sid='rp.session.local.0000',
                      workflow=set([p]),
                      pending_queue=['pending'],
                      completed_queue=['completed'],
                      mq_hostname=hostname,
                      port=port,
                      resubmit_failed=True)

    assert len(wfp._uid.split('.')) == 2
    assert 'wfprocessor' == wfp._uid.split('.')[0]
    assert wfp._pending_queue == ['pending']
    assert wfp._completed_queue == ['completed']
    assert wfp._mq_hostname == hostname
    assert wfp._port == port
    assert wfp._wfp_process == None
    assert wfp._workflow == set([p])

    if not isinstance(s, unicode):
        wfp = WFprocessor(sid=s,
                          workflow=set([p]),
                          pending_queue=l,
                          completed_queue=l,
                          mq_hostname=s,
                          port=i,
                          resubmit_failed=b)
def test_wfp_workflow_incomplete():

    p = Pipeline()
    s = Stage()
    t = Task()
    t.executable = ['/bin/date']
    s.add_tasks(t)
    p.add_stages(s)

    amgr = Amgr(hostname=hostname, port=port)
    amgr._setup_mqs()

    wfp = WFprocessor(sid=amgr._sid,
                      workflow=[p],
                      pending_queue=amgr._pending_queue,
                      completed_queue=amgr._completed_queue,
                      mq_hostname=amgr._mq_hostname,
                      port=amgr._port,
                      resubmit_failed=False)

    wfp._initialize_workflow()

    assert wfp.workflow_incomplete()

    amgr.workflow = [p]
    profiler = ru.Profiler(name='radical.entk.temp')

    p.stages[0].state == states.SCHEDULING
    p.state == states.SCHEDULED
    for t in p.stages[0].tasks:
        t.state = states.COMPLETED

    import json
    import pika

    task_as_dict = json.dumps(t.to_dict())
    mq_connection = pika.BlockingConnection(pika.ConnectionParameters(host=amgr._mq_hostname, port=amgr._port))
    mq_channel = mq_connection.channel()
    mq_channel.basic_publish(exchange='',
                             routing_key='%s-completedq-1' % amgr._sid,
                             body=task_as_dict)

    amgr._terminate_sync = Event()
    sync_thread = Thread(target=amgr._synchronizer, name='synchronizer-thread')
    sync_thread.start()

    proc = Process(target=func_for_dequeue_test, name='temp-proc', args=(wfp,))
    proc.start()
    proc.join()

    amgr._terminate_sync.set()
    sync_thread.join()

    assert not wfp.workflow_incomplete()
Exemplo n.º 9
0
    def test_workflow(self, mocked_init, mocked_Logger):

        wfp = WFprocessor(sid='test_sid',
                          workflow='workflow',
                          pending_queue='pending_queue',
                          completed_queue='completed_queue',
                          rmq_conn_params='test_rmq_params',
                          resubmit_failed=False)

        wfp._workflow = 'test_workflow'

        self.assertEqual(wfp.workflow, 'test_workflow')
Exemplo n.º 10
0
    def test_execute_post_exec(self, mocked_init, mocked_Logger,
                               mocked_Profiler):

        global_advs = set()

        def _advance_side_effect(obj, obj_type, state):
            nonlocal global_advs
            global_advs.add((obj, obj_type, state))

        wfp = WFprocessor(sid='test_sid',
                          workflow='workflow',
                          pending_queue='pending_queue',
                          completed_queue='completed_queue',
                          rmq_conn_params='test_rmq_params',
                          resubmit_failed=False)
        wfp._uid = 'wfp.0000'
        wfp._logger = mocked_Logger
        wfp._prof = mocked_Profiler
        wfp._advance = mock.MagicMock(side_effect=_advance_side_effect)

        pipe = mock.Mock()
        pipe.lock = mt.Lock()
        pipe.state = states.INITIAL
        pipe.uid = 'pipe.0000'
        pipe.completed = False
        pipe._increment_stage = mock.MagicMock(return_value=True)
        pipe.current_stage = 1

        pipe2 = mock.Mock()
        pipe2.lock = mt.Lock()
        pipe2.state = states.INITIAL
        pipe2.uid = 'pipe.0001'
        pipe2.completed = False
        pipe2._increment_stage = mock.MagicMock(return_value=True)
        pipe2.current_stage = 1

        pipe3 = mock.Mock()
        pipe3.lock = mt.Lock()
        pipe3.state = states.INITIAL
        pipe3.uid = 'pipe.0002'
        pipe3.completed = True
        pipe3._increment_stage = mock.MagicMock(return_value=True)
        pipe3.current_stage = 1
        wfp._workflow = set([pipe2, pipe3])

        stage = mock.Mock()
        stage.uid = 'stage.0000'
        stage.state = states.SCHEDULING
        stage.post_exec = mock.MagicMock(
            return_value=['pipe.0001', 'pipe.0002'])

        wfp._execute_post_exec(pipe, stage)
        exp_out = set([(pipe2, 'Pipeline', states.INITIAL),
                       (pipe3, 'Pipeline', states.DONE)])
        self.assertEqual(global_advs, exp_out)
Exemplo n.º 11
0
    def test_execute_workload(self, mocked_init, mocked_dumps, mocked_Logger,
                              mocked_Reporter, mocked_BlockingConnection):

        global_advs = []

        def _advance_side_effect(obj, obj_type, state):
            nonlocal global_advs
            global_advs.append([obj, obj_type, state])

        wfp = WFprocessor(sid='test_sid',
                          workflow='workflow',
                          pending_queue='pending_queue',
                          completed_queue='completed_queue',
                          rmq_conn_params='test_rmq_params',
                          resubmit_failed=False)
        wfp._rmq_conn_params = 'test_rmq_params'
        wfp._pending_queue = ['test_queue']
        wfp._logger = mocked_Logger
        wfp._advance = mock.MagicMock(side_effect=_advance_side_effect)

        stage = mock.Mock()
        stage.uid = 'stage.0000'
        stage.state = states.SCHEDULING

        task = mock.Mock()
        task.uid = 'task.0000'
        task.state = states.INITIAL
        workload = [task]
        stage.tasks = [task]
        scheduled_stages = [stage]

        wfp._execute_workload(workload, scheduled_stages)
        self.assertEqual(global_advs[0], [task, 'Task', 'SCHEDULED'])
        self.assertEqual(global_advs[1], [stage, 'Stage', 'SCHEDULED'])
Exemplo n.º 12
0
def test_wfp_check_processor():

    p = Pipeline()
    s = Stage()
    t = Task()

    t.executable = '/bin/date'
    s.add_tasks(t)
    p.add_stages(s)

    amgr = Amgr(hostname=hostname, port=port)
    amgr._setup_mqs()

    wfp = WFprocessor(sid=amgr._sid,
                      workflow=[p],
                      pending_queue=amgr._pending_queue,
                      completed_queue=amgr._completed_queue,
                      rmq_conn_params=amgr._rmq_conn_params,
                      resubmit_failed=False)

    wfp.start_processor()
    assert wfp.check_processor()

    wfp.terminate_processor()
    assert not wfp.check_processor()
Exemplo n.º 13
0
def test_wfp_enqueue():

    p = Pipeline()
    s = Stage()
    t = Task()
    t.executable = ['/bin/date']
    s.add_tasks(t)
    p.add_stages(s)

    amgr = Amgr(hostname=hostname, port=port)
    amgr._setup_mqs()

    wfp = WFprocessor(sid=amgr._sid,
                      workflow=[p],
                      pending_queue=amgr._pending_queue,
                      completed_queue=amgr._completed_queue,
                      mq_hostname=amgr._mq_hostname,
                      port=amgr._port,
                      resubmit_failed=False)

    wfp._initialize_workflow()

    amgr.workflow = [p]
    profiler = ru.Profiler(name='radical.entk.temp')

    for t in p.stages[0].tasks:
        assert t.state == states.INITIAL

    assert p.stages[0].state == states.INITIAL
    assert p.state == states.INITIAL

    amgr._terminate_sync = Event()
    sync_thread = Thread(target=amgr._synchronizer, name='synchronizer-thread')
    sync_thread.start()

    proc = Process(target=func_for_enqueue_test,
                   name='temp-proc',
                   args=(wfp, ))
    proc.start()
    proc.join()

    amgr._terminate_sync.set()
    sync_thread.join()

    for t in p.stages[0].tasks:
        assert t.state == states.SCHEDULED

    assert p.stages[0].state == states.SCHEDULED
    assert p.state == states.SCHEDULING
Exemplo n.º 14
0
def test_write_session_description():

    amgr = AppManager(hostname=hostname, port=port)
    amgr.resource_desc = {'resource' : 'xsede.stampede',
                          'walltime' : 59,
                          'cpus'     : 128,
                          'gpus'     : 64,
                          'project'  : 'xyz',
                          'queue'    : 'high'}

    workflow      = [generate_pipeline(1), generate_pipeline(2)]
    amgr.workflow = workflow

    amgr._wfp = WFprocessor(sid=amgr.sid,
                            workflow=amgr._workflow,
                            pending_queue=amgr._pending_queue,
                            completed_queue=amgr._completed_queue,
                            resubmit_failed=amgr._resubmit_failed,
                            rmq_conn_params=amgr._rmq_conn_params)
    amgr._workflow = amgr._wfp.workflow

    amgr._task_manager = TaskManager(sid=amgr._sid,
                                     pending_queue=amgr._pending_queue,
                                     completed_queue=amgr._completed_queue,
                                     rmgr=amgr._rmgr,
                                     rmq_conn_params=amgr._rmq_conn_params
                                     )

    write_session_description(amgr)

    desc = ru.read_json('%s/radical.entk.%s.json' % (amgr._sid, amgr._sid))
    src  = '%s/sample_data' % pwd

    assert desc == ru.read_json('%s/expected_desc_write_session.json' % src)
Exemplo n.º 15
0
def test_wfp_dequeue():

    p = Pipeline()
    s = Stage()
    t = Task()

    t.executable = '/bin/date'
    s.add_tasks(t)
    p.add_stages(s)

    amgr = Amgr(hostname=hostname, port=port)
    amgr._setup_mqs()

    wfp = WFprocessor(sid=amgr._sid,
                      workflow=[p],
                      pending_queue=amgr._pending_queue,
                      completed_queue=amgr._completed_queue,
                      mq_hostname=amgr._hostname,
                      port=amgr._port,
                      resubmit_failed=False)

    wfp.initialize_workflow()

    assert p.state == states.INITIAL
    assert p.stages[0].state == states.INITIAL

    for t in p.stages[0].tasks:
        assert t.state == states.INITIAL

    p.state == states.SCHEDULED
    p.stages[0].state == states.SCHEDULING

    for t in p.stages[0].tasks:
        t.state = states.COMPLETED

    task_as_dict = json.dumps(t.to_dict())
    mq_connection = pika.BlockingConnection(
        pika.ConnectionParameters(host=amgr._hostname, port=amgr._port))
    mq_channel = mq_connection.channel()

    mq_channel.basic_publish(exchange='',
                             routing_key='%s' % amgr._completed_queue[0],
                             body=task_as_dict)

    wfp.start_processor()

    th = mt.Thread(target=func_for_dequeue_test, name='temp-proc', args=(p, ))
    th.start()
    th.join()

    wfp.terminate_processor()

    assert p.state == states.DONE
    assert p.stages[0].state == states.DONE

    for t in p.stages[0].tasks:
        assert t.state == states.DONE
def test_wfp_enqueue():

    p = Pipeline()
    s = Stage()
    t = Task()
    t.executable = ['/bin/date']
    s.add_tasks(t)
    p.add_stages(s)

    amgr = Amgr(hostname=hostname, port=port)
    amgr._setup_mqs()

    wfp = WFprocessor(sid=amgr._sid,
                      workflow=[p],
                      pending_queue=amgr._pending_queue,
                      completed_queue=amgr._completed_queue,
                      mq_hostname=amgr._mq_hostname,
                      port=amgr._port,
                      resubmit_failed=False)

    wfp._initialize_workflow()

    amgr.workflow = [p]
    profiler = ru.Profiler(name='radical.entk.temp')

    for t in p.stages[0].tasks:
        assert t.state == states.INITIAL

    assert p.stages[0].state == states.INITIAL
    assert p.state == states.INITIAL

    amgr._terminate_sync = Event()
    sync_thread = Thread(target=amgr._synchronizer, name='synchronizer-thread')
    sync_thread.start()

    proc = Process(target=func_for_enqueue_test, name='temp-proc', args=(wfp,))
    proc.start()
    proc.join()

    amgr._terminate_sync.set()
    sync_thread.join()

    for t in p.stages[0].tasks:
        assert t.state == states.SCHEDULED

    assert p.stages[0].state == states.SCHEDULED
    assert p.state == states.SCHEDULING
Exemplo n.º 17
0
    def test_wfp_initialization(self, mocked_generate_id, mocked_getcwd,
                                mocked_Logger, mocked_Profiler,
                                mocked_Reporter):

        wfp = WFprocessor(sid='test_sid',
                          workflow='workflow',
                          pending_queue='pending_queue',
                          completed_queue='completed_queue',
                          rmq_conn_params='test_rmq_params',
                          resubmit_failed=False)

        self.assertIsNone(wfp._wfp_process)
        self.assertIsNone(wfp._enqueue_thread)
        self.assertIsNone(wfp._dequeue_thread)
        self.assertIsNone(wfp._enqueue_thread_terminate)
        self.assertIsNone(wfp._dequeue_thread_terminate)
        self.assertEqual(wfp._rmq_ping_interval, 10)
        self.assertEqual(wfp._path, 'test_folder/test_sid')
        self.assertEqual(wfp._workflow, 'workflow')

        self.assertEqual(wfp._sid, 'test_sid')
        self.assertEqual(wfp._pending_queue, 'pending_queue')
        self.assertEqual(wfp._completed_queue, 'completed_queue')
        self.assertFalse(wfp._resubmit_failed)
        self.assertEqual(wfp._rmq_conn_params, 'test_rmq_params')
        self.assertEqual(wfp._uid, 'wfp.0000')

        os.environ['RMQ_PING_INTERVAL'] = '20'
        wfp = WFprocessor(sid='test_sid',
                          workflow='workflow',
                          pending_queue='pending_queue',
                          completed_queue='completed_queue',
                          rmq_conn_params='test_rmq_params',
                          resubmit_failed=True)

        self.assertIsNone(wfp._wfp_process)
        self.assertIsNone(wfp._enqueue_thread)
        self.assertIsNone(wfp._dequeue_thread)
        self.assertEqual(wfp._rmq_ping_interval, 20)
        self.assertEqual(wfp._path, 'test_folder/test_sid')
        self.assertEqual(wfp._workflow, 'workflow')
        self.assertEqual(wfp._sid, 'test_sid')
        self.assertEqual(wfp._pending_queue, 'pending_queue')
        self.assertEqual(wfp._completed_queue, 'completed_queue')
        self.assertTrue(wfp._resubmit_failed)
        self.assertEqual(wfp._rmq_conn_params, 'test_rmq_params')
        self.assertEqual(wfp._uid, 'wfp.0000')
Exemplo n.º 18
0
    def test_enqueue(self, mocked_init, mocked_Logger, mocked_Profiler,
                     mocked_sleep, mocked_create_workload,
                     mocked_execute_workload):
        wfp = WFprocessor(sid='test_sid',
                          workflow='workflow',
                          pending_queue='pending_queue',
                          completed_queue='completed_queue',
                          rmq_conn_params='test_rmq_params',
                          resubmit_failed=False)
        wfp._logger = mocked_Logger
        wfp._prof = mocked_Profiler
        wfp._uid = 'wfp.0000'
        wfp._enqueue_thread_terminate = mock.Mock()
        wfp._enqueue_thread_terminate.is_set = mock.MagicMock(
            side_effect=[False, True])

        wfp._enqueue()

        with self.assertRaises(Exception):
            wfp._enqueue()
def test_wfp_initialize_workflow():

    p = Pipeline()
    s = Stage()
    t = Task()
    t.executable = ['/bin/date']
    s.add_tasks(t)
    p.add_stages(s)

    wfp = WFprocessor(sid='test',
                      workflow=[p],
                      pending_queue=list(),
                      completed_queue=list(),
                      mq_hostname=hostname,
                      port=port,
                      resubmit_failed=False)

    wfp._initialize_workflow()
    assert p.uid is not None
    assert p.stages[0].uid is not None
    for t in p.stages[0].tasks:
        assert t.uid is not None
Exemplo n.º 20
0
def test_wfp_initialize_workflow():

    p = Pipeline()
    s = Stage()
    t = Task()
    t.executable = ['/bin/date']
    s.add_tasks(t)
    p.add_stages(s)

    wfp = WFprocessor(sid='test',
                      workflow=[p],
                      pending_queue=list(),
                      completed_queue=list(),
                      mq_hostname=hostname,
                      port=port,
                      resubmit_failed=False)

    wfp._initialize_workflow()
    assert p.uid is not None
    assert p.stages[0].uid is not None
    for t in p.stages[0].tasks:
        assert t.uid is not None
Exemplo n.º 21
0
def test_wfp_workflow_incomplete():

    p = Pipeline()
    s = Stage()
    t = Task()
    t.executable = ['/bin/date']
    s.add_tasks(t)
    p.add_stages(s)

    amgr = Amgr(hostname=hostname, port=port)
    amgr._setup_mqs()

    wfp = WFprocessor(sid=amgr._sid,
                      workflow=[p],
                      pending_queue=amgr._pending_queue,
                      completed_queue=amgr._completed_queue,
                      mq_hostname=amgr._mq_hostname,
                      port=amgr._port,
                      resubmit_failed=False)

    wfp._initialize_workflow()

    assert wfp.workflow_incomplete()

    amgr.workflow = [p]
    profiler = ru.Profiler(name='radical.entk.temp')

    p.stages[0].state == states.SCHEDULING
    p.state == states.SCHEDULED
    for t in p.stages[0].tasks:
        t.state = states.COMPLETED

    import json
    import pika

    task_as_dict = json.dumps(t.to_dict())
    mq_connection = pika.BlockingConnection(
        pika.ConnectionParameters(host=amgr._mq_hostname, port=amgr._port))
    mq_channel = mq_connection.channel()
    mq_channel.basic_publish(exchange='',
                             routing_key='%s-completedq-1' % amgr._sid,
                             body=task_as_dict)

    amgr._terminate_sync = Event()
    sync_thread = Thread(target=amgr._synchronizer, name='synchronizer-thread')
    sync_thread.start()

    proc = Process(target=func_for_dequeue_test,
                   name='temp-proc',
                   args=(wfp, ))
    proc.start()
    proc.join()

    amgr._terminate_sync.set()
    sync_thread.join()

    assert not wfp.workflow_incomplete()
Exemplo n.º 22
0
def test_wfp_initialize_workflow():

    p = Pipeline()
    s = Stage()
    t = Task()

    t.executable = '/bin/date'
    s.add_tasks(t)
    p.add_stages(s)
    rmq_conn_params = pika.ConnectionParameters(host=hostname, port=port)

    wfp = WFprocessor(sid='test',
                      workflow=[p],
                      pending_queue=list(),
                      completed_queue=list(),
                      rmq_conn_params=rmq_conn_params,
                      resubmit_failed=False)

    wfp.initialize_workflow()
    assert p.uid is not None
    assert p.stages[0].uid is not None

    for t in p.stages[0].tasks:
        assert t.uid is not None
Exemplo n.º 23
0
def test_write_workflow():

    try:
        wf = list()
        wf.append(generate_pipeline(1))
        wf.append(generate_pipeline(2))

        amgr = AppManager(hostname=hostname, port=port)
        amgr.workflow = wf
        amgr._wfp = WFprocessor(sid=amgr._sid,
                                workflow=amgr._workflow,
                                pending_queue=amgr._pending_queue,
                                completed_queue=amgr._completed_queue,
                                mq_hostname=amgr._mq_hostname,
                                port=amgr._port,
                                resubmit_failed=amgr._resubmit_failed)
        amgr._wfp._initialize_workflow()
        wf = amgr._wfp.workflow

        write_workflow(wf, 'test')

        data = ru.read_json('test/entk_workflow.json')
        assert len(data) == len(wf) + 1

        stack = data.pop(0)
        assert stack.keys() == ['stack']
        assert stack['stack'].keys() == ['sys','radical']
        assert stack['stack']['sys'].keys() == ["python","pythonpath","virtualenv"]
        assert stack['stack']['radical'].keys() == ['saga', 'radical.pilot', 'radical.utils', 'radical.entk']

        p_cnt = 0
        for p in data:
            assert p['uid'] == wf[p_cnt].uid
            assert p['name'] == wf[p_cnt].name
            assert p['state_history'] == wf[p_cnt].state_history
            s_cnt = 0
            for s in p['stages']:
                assert s['uid'] == wf[p_cnt].stages[s_cnt].uid
                assert s['name'] == wf[p_cnt].stages[s_cnt].name
                assert s['state_history'] == wf[p_cnt].stages[s_cnt].state_history
                for t in wf[p_cnt].stages[s_cnt].tasks:
                    assert t.to_dict() in s['tasks']
                s_cnt += 1
            p_cnt += 1

    except Exception as ex:
        shutil.rmtree('test')
        raise
Exemplo n.º 24
0
def test_wfp_enqueue():

    p = Pipeline()
    s = Stage()
    t = Task()

    t.executable = '/bin/date'
    s.add_tasks(t)
    p.add_stages(s)

    amgr = Amgr(hostname=hostname, port=port)
    amgr._setup_mqs()

    wfp = WFprocessor(sid=amgr._sid,
                      workflow=[p],
                      pending_queue=amgr._pending_queue,
                      completed_queue=amgr._completed_queue,
                      mq_hostname=amgr._hostname,
                      port=amgr._port,
                      resubmit_failed=False)

    wfp.initialize_workflow()

    assert p.state == states.INITIAL
    assert p.stages[0].state == states.INITIAL

    for t in p.stages[0].tasks:
        assert t.state == states.INITIAL

    wfp.start_processor()

    th = mt.Thread(target=func_for_enqueue_test, name='temp-proc', args=(p, ))
    th.start()
    th.join()

    wfp.terminate_processor()

    assert p.state == states.SCHEDULING
    assert p.stages[0].state == states.SCHEDULED

    for t in p.stages[0].tasks:
        assert t.state == states.SCHEDULED
Exemplo n.º 25
0
def test_write_session_description():

    amgr = AppManager(hostname=hostname,
                      port=port,
                      username=username,
                      password=password)
    amgr.resource_desc = {
        'resource': 'xsede.stampede',
        'walltime': 59,
        'cpus': 128,
        'gpus': 64,
        'project': 'xyz',
        'queue': 'high'
    }

    workflow = [generate_pipeline(1), generate_pipeline(2)]
    amgr.workflow = workflow

    amgr._wfp = WFprocessor(sid=amgr.sid,
                            workflow=amgr._workflow,
                            pending_queue=amgr._pending_queue,
                            completed_queue=amgr._completed_queue,
                            resubmit_failed=amgr._resubmit_failed,
                            rmq_conn_params=amgr._rmq_conn_params)
    amgr._workflow = amgr._wfp.workflow

    amgr._task_manager = TaskManager(sid=amgr._sid,
                                     pending_queue=amgr._pending_queue,
                                     completed_queue=amgr._completed_queue,
                                     rmgr=amgr._rmgr,
                                     rmq_conn_params=amgr._rmq_conn_params)

    write_session_description(amgr)

    desc = ru.read_json('%s/radical.entk.%s.json' % (amgr._sid, amgr._sid))
    # tasks are originally set but saved as a list in json
    # uses sorting for convenient comparison, this doesn't change validity
    for k, v in (desc['tree'].items()):
        if k.startswith("stage"):
            desc['tree'][k]['children'] = sorted(v['children'])

    src = '%s/sample_data' % pwd

    assert desc == ru.read_json('%s/expected_desc_write_session.json' % src)
Exemplo n.º 26
0
def test_write_session_description():

    hostname = os.environ.get('RMQ_HOSTNAME', 'localhost')
    port = int(os.environ.get('RMQ_PORT', 5672))
    amgr = AppManager(hostname=hostname, port=port)
    amgr.resource_desc = {
        'resource': 'xsede.stampede',
        'walltime': 60,
        'cpus': 128,
        'gpus': 64,
        'project': 'xyz',
        'queue': 'high'
    }

    workflow = [generate_pipeline(1), generate_pipeline(2)]
    amgr.workflow = workflow

    amgr._wfp = WFprocessor(sid=amgr._sid,
                            workflow=amgr._workflow,
                            pending_queue=amgr._pending_queue,
                            completed_queue=amgr._completed_queue,
                            mq_hostname=amgr._mq_hostname,
                            port=amgr._port,
                            resubmit_failed=amgr._resubmit_failed)
    amgr._wfp._initialize_workflow()
    amgr._workflow = amgr._wfp.workflow

    amgr._task_manager = TaskManager(sid=amgr._sid,
                                     pending_queue=amgr._pending_queue,
                                     completed_queue=amgr._completed_queue,
                                     mq_hostname=amgr._mq_hostname,
                                     rmgr=amgr._resource_manager,
                                     port=amgr._port
                                     )

    # os.mkdir(amgr._sid)

    write_session_description(amgr)

    desc = ru.read_json('%s/radical.entk.%s.json' % (amgr._sid, amgr._sid))
    curdir = os.path.dirname(os.path.abspath(__file__))
    src = '%s/sample_data' % curdir
    assert desc == ru.read_json('%s/expected_desc_write_session.json' % src)
Exemplo n.º 27
0
def test_wfp_workflow_incomplete():

    p = Pipeline()
    s = Stage()
    t = Task()

    t.executable = '/bin/date'
    s.add_tasks(t)
    p.add_stages(s)

    amgr = Amgr(hostname=hostname, port=port, username=username,
            password=password)
    amgr._setup_mqs()

    wfp = WFprocessor(sid=amgr._sid,
                      workflow=[p],
                      pending_queue=amgr._pending_queue,
                      completed_queue=amgr._completed_queue,
                      rmq_conn_params=amgr._rmq_conn_params,
                      resubmit_failed=False)

    for t in p.stages[0].tasks:
        t.state = states.COMPLETED

    task_as_dict  = json.dumps(t.to_dict())
    credentials = pika.PlainCredentials(amgr._username, amgr._password)
    mq_connection = pika.BlockingConnection(pika.ConnectionParameters(
                                          host=amgr._hostname, port=amgr._port,
                                          credentials=credentials))
    mq_channel    = mq_connection.channel()

    mq_channel.basic_publish(exchange    = '',
                             routing_key = '%s' % amgr._completed_queue[0],
                             body        = task_as_dict)

    wfp.start_processor()

    th = mt.Thread(target=func_for_dequeue_test, name='temp-proc', args=(p,))
    th.start()
    th.join()

    wfp.terminate_processor()

    assert not wfp.workflow_incomplete()
Exemplo n.º 28
0
def test_write_workflow():

    wf = list()
    wf.append(generate_pipeline(1))
    wf.append(generate_pipeline(2))

    amgr = AppManager(hostname=hostname, port=port)
    amgr.workflow = wf
    amgr._wfp = WFprocessor(sid=amgr._sid,
                            workflow=amgr._workflow,
                            pending_queue=amgr._pending_queue,
                            completed_queue=amgr._completed_queue,
                            mq_hostname=amgr._mq_hostname,
                            port=amgr._port,
                            resubmit_failed=amgr._resubmit_failed)
    amgr._wfp._initialize_workflow()
    wf = amgr._wfp.workflow

    write_workflow(wf, 'test')

    data = ru.read_json('test/entk_workflow.json')
    assert len(data) == len(wf)

    p_cnt = 0
    for p in data:
        assert p['uid'] == wf[p_cnt].uid
        assert p['name'] == wf[p_cnt].name
        assert p['state_history'] == wf[p_cnt].state_history
        s_cnt = 0
        for s in p['stages']:
            assert s['uid'] == wf[p_cnt].stages[s_cnt].uid
            assert s['name'] == wf[p_cnt].stages[s_cnt].name
            assert s['state_history'] == wf[p_cnt].stages[s_cnt].state_history
            for t in wf[p_cnt].stages[s_cnt].tasks:
                assert t.to_dict() in s['tasks']
            s_cnt += 1
        p_cnt += 1

    shutil.rmtree('test')
Exemplo n.º 29
0
    def test_terminate_processor(self, mocked_init, mocked_Logger,
                                 mocked_Profiler):

        global_boolean = {}

        def _dequeue_side_effect():
            nonlocal global_boolean
            time.sleep(0.1)
            global_boolean['dequeue'] = True

        def _enqueue_side_effect():
            nonlocal global_boolean
            time.sleep(0.1)
            global_boolean['enqueue'] = True

        wfp = WFprocessor(sid='test_sid',
                          workflow='workflow',
                          pending_queue='pending_queue',
                          completed_queue='completed_queue',
                          rmq_conn_params='test_rmq_params',
                          resubmit_failed=False)

        wfp._uid = 'wfp.0000'
        wfp._logger = mocked_Logger
        wfp._prof = mocked_Profiler
        wfp._enqueue_thread = mt.Thread(target=_enqueue_side_effect)
        wfp._dequeue_thread = mt.Thread(target=_dequeue_side_effect)
        wfp._enqueue_thread_terminate = mt.Event()
        wfp._dequeue_thread_terminate = mt.Event()

        wfp._enqueue_thread.start()
        wfp._dequeue_thread.start()

        wfp.terminate_processor()

        self.assertTrue(wfp._enqueue_thread_terminate.is_set())
        self.assertTrue(wfp._dequeue_thread_terminate.is_set())
        self.assertIsNone(wfp._dequeue_thread)
        self.assertIsNone(wfp._enqueue_thread)
        self.assertTrue(global_boolean['dequeue'])
        self.assertTrue(global_boolean['enqueue'])
Exemplo n.º 30
0
    def test_start_processor(self, mocked_init, mocked_Logger,
                             mocked_Profiler):

        global_boolean = {}

        def _dequeue_side_effect():
            nonlocal global_boolean
            global_boolean['dequeue'] = True

        def _enqueue_side_effect():
            nonlocal global_boolean
            global_boolean['enqueue'] = True

        wfp = WFprocessor(sid='test_sid',
                          workflow='workflow',
                          pending_queue='pending_queue',
                          completed_queue='completed_queue',
                          rmq_conn_params='test_rmq_params',
                          resubmit_failed=False)

        wfp._uid = 'wfp.0000'
        wfp._logger = mocked_Logger
        wfp._prof = mocked_Profiler
        wfp._enqueue_thread = None
        wfp._dequeue_thread = None
        wfp._enqueue_thread_terminate = None
        wfp._dequeue_thread_terminate = None
        wfp._dequeue = mock.MagicMock(side_effect=_dequeue_side_effect)
        wfp._enqueue = mock.MagicMock(side_effect=_enqueue_side_effect)

        wfp.start_processor()
        time.sleep(1)
        try:
            self.assertIsInstance(wfp._enqueue_thread_terminate, mt.Event)
            self.assertIsInstance(wfp._dequeue_thread_terminate, mt.Event)
            self.assertIsInstance(wfp._dequeue_thread, mt.Thread)
            self.assertIsInstance(wfp._enqueue_thread, mt.Thread)
            self.assertTrue(global_boolean['dequeue'])
            self.assertTrue(global_boolean['enqueue'])
        finally:
            if wfp._dequeue_thread.is_alive():
                wfp._dequeue_thread.join()
            if wfp._enqueue_thread.is_alive():
                wfp._enqueue_thread.join()
Exemplo n.º 31
0
    def test_update_dequeued_task(self, mocked_init, mocked_advance,
                                  mocked_Logger, mocked_Profiler):
        global_advs = list()

        def _advance_side_effect(obj, obj_type, state):
            nonlocal global_advs
            global_advs.append([obj, obj_type, state])

        wfp = WFprocessor(sid='test_sid',
                          workflow='workflow',
                          pending_queue='pending_queue',
                          completed_queue='completed_queue',
                          rmq_conn_params='test_rmq_params',
                          resubmit_failed=False)

        wfp._uid = 'wfp.0000'
        wfp._logger = mocked_Logger
        wfp._prof = mocked_Profiler
        wfp._resubmit_failed = False
        wfp._advance = mock.MagicMock(side_effect=_advance_side_effect)

        pipe = mock.Mock()
        pipe.uid = 'pipe.0000'
        pipe.lock = mt.Lock()
        pipe.state = states.INITIAL
        pipe.completed = False
        pipe.current_stage = 1
        pipe._increment_stage = mock.MagicMock(return_value=2)

        stage = mock.Mock()
        stage.uid = 'stage.0000'
        stage.state = states.SCHEDULING
        stage._check_stage_complete = mock.MagicMock(return_value=True)
        stage.post_exec = None

        task = mock.Mock()
        task.uid = 'task.0000'
        task.parent_pipeline = {'uid': 'pipe.0000'}
        task.parent_stage = {'uid': 'stage.0000'}
        task.state = states.INITIAL
        task.exit_code = 0

        stage.tasks = [task]
        pipe.stages = [stage]
        wfp._workflow = set([pipe])

        # Test for issue #271
        wfp._update_dequeued_task(task)
        self.assertEqual(global_advs[0], [task, 'Task', states.DONE])
        self.assertEqual(global_advs[1], [stage, 'Stage', states.DONE])

        task.state = states.INITIAL
        task.exit_code = None

        wfp._update_dequeued_task(task)
        self.assertEqual(global_advs[2], [task, 'Task', states.INITIAL])
        self.assertEqual(global_advs[3], [stage, 'Stage', states.DONE])

        task.exit_code = 1

        wfp._update_dequeued_task(task)
        self.assertEqual(global_advs[4], [task, 'Task', states.FAILED])
        self.assertEqual(global_advs[5], [stage, 'Stage', states.DONE])
Exemplo n.º 32
0
def test_write_session_description():

    hostname = os.environ.get('RMQ_HOSTNAME', 'localhost')
    port = int(os.environ.get('RMQ_PORT', 5672))
    amgr = AppManager(hostname=hostname, port=port)
    amgr.resource_desc = {
        'resource': 'xsede.stampede',
        'walltime': 60,
        'cpus': 128,
        'gpus': 64,
        'project': 'xyz',
        'queue': 'high'
    }

    workflow = [generate_pipeline(1), generate_pipeline(2)]
    amgr.workflow = workflow

    amgr._wfp = WFprocessor(sid=amgr._sid,
                            workflow=amgr._workflow,
                            pending_queue=amgr._pending_queue,
                            completed_queue=amgr._completed_queue,
                            mq_hostname=amgr._mq_hostname,
                            port=amgr._port,
                            resubmit_failed=amgr._resubmit_failed)
    amgr._wfp._initialize_workflow()
    amgr._workflow = amgr._wfp.workflow

    amgr._task_manager = TaskManager(sid=amgr._sid,
                                     pending_queue=amgr._pending_queue,
                                     completed_queue=amgr._completed_queue,
                                     mq_hostname=amgr._mq_hostname,
                                     rmgr=amgr._resource_manager,
                                     port=amgr._port)

    # os.mkdir(amgr._sid)

    write_session_description(amgr)

    desc = ru.read_json('%s/radical.entk.%s.json' % (amgr._sid, amgr._sid))

    assert desc == {
        'config': {},
        'entities': {
            'appmanager': {
                'event_model': {},
                'state_model': None,
                'state_values': None
            },
            'pipeline': {
                'event_model': {},
                'state_model': {
                    'CANCELED': 9,
                    'DESCRIBED': 1,
                    'DONE': 9,
                    'FAILED': 9,
                    'SCHEDULING': 2
                },
                'state_values': {
                    '1': 'DESCRIBED',
                    '2': 'SCHEDULING',
                    '9': ['DONE', 'CANCELED', 'FAILED']
                }
            },
            'stage': {
                'event_model': {},
                'state_model': {
                    'CANCELED': 9,
                    'DESCRIBED': 1,
                    'DONE': 9,
                    'FAILED': 9,
                    'SCHEDULED': 3,
                    'SCHEDULING': 2
                },
                'state_values': {
                    '1': 'DESCRIBED',
                    '2': 'SCHEDULING',
                    '3': 'SCHEDULED',
                    '9': ['FAILED', 'CANCELED', 'DONE']
                }
            },
            'task': {
                'event_model': {},
                'state_model': {
                    'CANCELED': 9,
                    'DEQUEUED': 8,
                    'DEQUEUEING': 7,
                    'DESCRIBED': 1,
                    'DONE': 9,
                    'EXECUTED': 6,
                    'FAILED': 9,
                    'SCHEDULED': 3,
                    'SCHEDULING': 2,
                    'SUBMITTED': 5,
                    'SUBMITTING': 4
                },
                'state_values': {
                    '1': 'DESCRIBED',
                    '2': 'SCHEDULING',
                    '3': 'SCHEDULED',
                    '4': 'SUBMITTING',
                    '5': 'SUBMITTED',
                    '6': 'EXECUTED',
                    '7': 'DEQUEUEING',
                    '8': 'DEQUEUED',
                    '9': ['DONE', 'CANCELED', 'FAILED']
                }
            }
        },
        'tree': {
            'appmanager.0000': {
                'cfg': {},
                'children': [
                    'wfprocessor.0000', 'resource_manager.0000',
                    'task_manager.0000', 'pipeline.0000', 'pipeline.0001'
                ],
                'etype':
                'appmanager',
                'has': [
                    'pipeline', 'wfprocessor', 'resource_manager',
                    'task_manager'
                ],
                'uid':
                'appmanager.0000'
            },
            'pipeline.0000': {
                'cfg': {},
                'children': ['stage.0000', 'stage.0001'],
                'etype': 'pipeline',
                'has': ['stage'],
                'uid': 'pipeline.0000'
            },
            'pipeline.0001': {
                'cfg': {},
                'children': ['stage.0002', 'stage.0003'],
                'etype': 'pipeline',
                'has': ['stage'],
                'uid': 'pipeline.0001'
            },
            'resource_manager.0000': {
                'cfg': {},
                'children': [],
                'etype': 'resource_manager',
                'has': [],
                'uid': 'resource_manager.0000'
            },
            'stage.0000': {
                'cfg': {},
                'children': ['task.0000'],
                'etype': 'stage',
                'has': ['task'],
                'uid': 'stage.0000'
            },
            'stage.0001': {
                'cfg': {},
                'children': [
                    'task.0001', 'task.0002', 'task.0003', 'task.0004',
                    'task.0005', 'task.0006', 'task.0007', 'task.0008',
                    'task.0009', 'task.0010'
                ],
                'etype':
                'stage',
                'has': ['task'],
                'uid':
                'stage.0001'
            },
            'stage.0002': {
                'cfg': {},
                'children': ['task.0011'],
                'etype': 'stage',
                'has': ['task'],
                'uid': 'stage.0002'
            },
            'stage.0003': {
                'cfg': {},
                'children': [
                    'task.0012', 'task.0013', 'task.0014', 'task.0015',
                    'task.0016', 'task.0017', 'task.0018', 'task.0019',
                    'task.0020', 'task.0021'
                ],
                'etype':
                'stage',
                'has': ['task'],
                'uid':
                'stage.0003'
            },
            'task.0000': {
                'cfg': {},
                'children': [],
                'etype': 'task',
                'has': [],
                'uid': 'task.0000'
            },
            'task.0001': {
                'cfg': {},
                'children': [],
                'etype': 'task',
                'has': [],
                'uid': 'task.0001'
            },
            'task.0002': {
                'cfg': {},
                'children': [],
                'etype': 'task',
                'has': [],
                'uid': 'task.0002'
            },
            'task.0003': {
                'cfg': {},
                'children': [],
                'etype': 'task',
                'has': [],
                'uid': 'task.0003'
            },
            'task.0004': {
                'cfg': {},
                'children': [],
                'etype': 'task',
                'has': [],
                'uid': 'task.0004'
            },
            'task.0005': {
                'cfg': {},
                'children': [],
                'etype': 'task',
                'has': [],
                'uid': 'task.0005'
            },
            'task.0006': {
                'cfg': {},
                'children': [],
                'etype': 'task',
                'has': [],
                'uid': 'task.0006'
            },
            'task.0007': {
                'cfg': {},
                'children': [],
                'etype': 'task',
                'has': [],
                'uid': 'task.0007'
            },
            'task.0008': {
                'cfg': {},
                'children': [],
                'etype': 'task',
                'has': [],
                'uid': 'task.0008'
            },
            'task.0009': {
                'cfg': {},
                'children': [],
                'etype': 'task',
                'has': [],
                'uid': 'task.0009'
            },
            'task.0010': {
                'cfg': {},
                'children': [],
                'etype': 'task',
                'has': [],
                'uid': 'task.0010'
            },
            'task.0011': {
                'cfg': {},
                'children': [],
                'etype': 'task',
                'has': [],
                'uid': 'task.0011'
            },
            'task.0012': {
                'cfg': {},
                'children': [],
                'etype': 'task',
                'has': [],
                'uid': 'task.0012'
            },
            'task.0013': {
                'cfg': {},
                'children': [],
                'etype': 'task',
                'has': [],
                'uid': 'task.0013'
            },
            'task.0014': {
                'cfg': {},
                'children': [],
                'etype': 'task',
                'has': [],
                'uid': 'task.0014'
            },
            'task.0015': {
                'cfg': {},
                'children': [],
                'etype': 'task',
                'has': [],
                'uid': 'task.0015'
            },
            'task.0016': {
                'cfg': {},
                'children': [],
                'etype': 'task',
                'has': [],
                'uid': 'task.0016'
            },
            'task.0017': {
                'cfg': {},
                'children': [],
                'etype': 'task',
                'has': [],
                'uid': 'task.0017'
            },
            'task.0018': {
                'cfg': {},
                'children': [],
                'etype': 'task',
                'has': [],
                'uid': 'task.0018'
            },
            'task.0019': {
                'cfg': {},
                'children': [],
                'etype': 'task',
                'has': [],
                'uid': 'task.0019'
            },
            'task.0020': {
                'cfg': {},
                'children': [],
                'etype': 'task',
                'has': [],
                'uid': 'task.0020'
            },
            'task.0021': {
                'cfg': {},
                'children': [],
                'etype': 'task',
                'has': [],
                'uid': 'task.0021'
            },
            'task_manager.0000': {
                'cfg': {},
                'children': [],
                'etype': 'task_manager',
                'has': [],
                'uid': 'task_manager.0000'
            },
            'wfprocessor.0000': {
                'cfg': {},
                'children': [],
                'etype': 'wfprocessor',
                'has': [],
                'uid': 'wfprocessor.0000'
            }
        }
    }

    shutil.rmtree(amgr._sid)
Exemplo n.º 33
0
def test_write_workflows():

    # --------------------------------------------------------------------------
    def check_stack(stack):

        assert 'sys'           in stack
        assert 'radical'       in stack

        assert 'python'        in stack['sys']
        assert 'pythonpath'    in stack['sys']
        assert 'virtualenv'    in stack['sys']

        assert 'radical.utils' in stack['radical']
        assert 'radical.saga'  in stack['radical']
        assert 'radical.pilot' in stack['radical']
        assert 'radical.entk'  in stack['radical']

    # --------------------------------------------------------------------------
    def check_wf(wf, check):

        for p_idx,p in enumerate(wf['pipes']):

            assert p['uid']           == check[p_idx].uid
            assert p['name']          == check[p_idx].name
            assert p['state_history'] == check[p_idx].state_history

            for s_idx,s in enumerate(p['stages']):

                assert s['uid']           == check[p_idx].stages[s_idx].uid
                assert s['name']          == check[p_idx].stages[s_idx].name
                assert s['state_history'] == check[p_idx].stages[s_idx].state_history

                for t in check[p_idx].stages[s_idx].tasks:
                    assert t.to_dict() in s['tasks']

    # --------------------------------------------------------------------------
    try:
        wf = list()
        wf.append(generate_pipeline(1))
        wf.append(generate_pipeline(2))

        amgr          = AppManager(hostname=hostname, port=port)
        amgr.workflow = wf
        amgr._wfp     = WFprocessor(sid=amgr._sid,
                                    workflow=amgr._workflow,
                                    pending_queue=amgr._pending_queue,
                                    completed_queue=amgr._completed_queue,
                                    resubmit_failed=amgr._resubmit_failed,
                                    rmq_conn_params=amgr._rmq_conn_params)

        check = amgr.workflow

        # ----------------------------------------------------------------------
        # check json output, with defaut and custom fname
        for fname in [None, 'wf.json']:
            write_workflows(amgr.workflows, 'test', fname=fname)

            if not fname: fname = 'entk_workflow.json'
            data = ru.read_json('test/%s' % fname)

            check_stack(data['stack'])
            check_wf(data['workflows'][0], check)

            assert len(data['workflows']) == 1

            shutil.rmtree('test')


        # ----------------------------------------------------------------------
        # check with data return
        data = write_workflows(amgr.workflows, 'test', fwrite=False)

        check_stack(data['stack'])
        check_wf(data['workflows'][0], check)

        assert len(data['workflows']) == 1

        # ----------------------------------------------------------------------
        # check with two workflows
        amgr.workflow = wf
        amgr._wfp     = WFprocessor(sid=amgr._sid,
                                    workflow=amgr._workflow,
                                    pending_queue=amgr._pending_queue,
                                    completed_queue=amgr._completed_queue,
                                    resubmit_failed=amgr._resubmit_failed,
                                    rmq_conn_params=amgr._rmq_conn_params)
        check = amgr.workflows

        data = write_workflows(amgr.workflows, 'test', fwrite=False)

        check_stack(data['stack'])
        check_wf(data['workflows'][0], check[0])
        check_wf(data['workflows'][1], check[0])

        assert len(data['workflows']) == 2

        shutil.rmtree('test')

    finally:
        try:    shutil.rmtree('test')
        except: pass
Exemplo n.º 34
0
    def test_advance(self, mocked_init, mocked_Logger, mocked_Reporter):
        wfp = WFprocessor(sid='test_sid',
                          workflow='workflow',
                          pending_queue='pending_queue',
                          completed_queue='completed_queue',
                          rmq_conn_params='test_rmq_params',
                          resubmit_failed=False)

        global_profs = []

        def _log(log_entry, uid, state, msg):
            nonlocal global_profs
            global_profs.append([log_entry, uid, state, msg])

        wfp._logger = mocked_Logger
        wfp._report = mocked_Reporter
        wfp._prof = mock.Mock()
        wfp._prof.prof = mock.MagicMock(side_effect=_log)
        wfp._uid = 'wfp.0000'
        obj = mock.Mock()
        obj.parent_stage = {'uid': 'test_stage'}
        obj.parent_pipeline = {'uid': 'test_pipe'}
        obj.uid = 'test_object'
        obj.state = 'test_state'
        wfp._advance(obj, 'Task', None)
        self.assertEqual(global_profs[0],
                         ['advance', 'test_object', None, 'test_stage'])
        global_profs = []
        wfp._advance(obj, 'Stage', 'new_state')
        self.assertEqual(global_profs[0],
                         ['advance', 'test_object', 'new_state', 'test_pipe'])
        global_profs = []
        wfp._advance(obj, 'Pipe', 'new_state')
        self.assertEqual(global_profs[0],
                         ['advance', 'test_object', 'new_state', None])