def test_write_session_description(): amgr = AppManager(hostname=hostname, port=port) amgr.resource_desc = {'resource' : 'xsede.stampede', 'walltime' : 59, 'cpus' : 128, 'gpus' : 64, 'project' : 'xyz', 'queue' : 'high'} workflow = [generate_pipeline(1), generate_pipeline(2)] amgr.workflow = workflow amgr._wfp = WFprocessor(sid=amgr.sid, workflow=amgr._workflow, pending_queue=amgr._pending_queue, completed_queue=amgr._completed_queue, resubmit_failed=amgr._resubmit_failed, rmq_conn_params=amgr._rmq_conn_params) amgr._workflow = amgr._wfp.workflow amgr._task_manager = TaskManager(sid=amgr._sid, pending_queue=amgr._pending_queue, completed_queue=amgr._completed_queue, rmgr=amgr._rmgr, rmq_conn_params=amgr._rmq_conn_params ) write_session_description(amgr) desc = ru.read_json('%s/radical.entk.%s.json' % (amgr._sid, amgr._sid)) src = '%s/sample_data' % pwd assert desc == ru.read_json('%s/expected_desc_write_session.json' % src)
def test_write_workflow(): try: wf = list() wf.append(generate_pipeline(1)) wf.append(generate_pipeline(2)) amgr = AppManager(hostname=hostname, port=port) amgr.workflow = wf amgr._wfp = WFprocessor(sid=amgr._sid, workflow=amgr._workflow, pending_queue=amgr._pending_queue, completed_queue=amgr._completed_queue, mq_hostname=amgr._mq_hostname, port=amgr._port, resubmit_failed=amgr._resubmit_failed) amgr._wfp._initialize_workflow() wf = amgr._wfp.workflow write_workflow(wf, 'test') data = ru.read_json('test/entk_workflow.json') assert len(data) == len(wf) + 1 stack = data.pop(0) assert stack.keys() == ['stack'] assert stack['stack'].keys() == ['sys','radical'] assert stack['stack']['sys'].keys() == ["python","pythonpath","virtualenv"] assert stack['stack']['radical'].keys() == ['saga', 'radical.pilot', 'radical.utils', 'radical.entk'] p_cnt = 0 for p in data: assert p['uid'] == wf[p_cnt].uid assert p['name'] == wf[p_cnt].name assert p['state_history'] == wf[p_cnt].state_history s_cnt = 0 for s in p['stages']: assert s['uid'] == wf[p_cnt].stages[s_cnt].uid assert s['name'] == wf[p_cnt].stages[s_cnt].name assert s['state_history'] == wf[p_cnt].stages[s_cnt].state_history for t in wf[p_cnt].stages[s_cnt].tasks: assert t.to_dict() in s['tasks'] s_cnt += 1 p_cnt += 1 except Exception as ex: shutil.rmtree('test') raise
def test_write_session_description(): amgr = AppManager(hostname=hostname, port=port, username=username, password=password) amgr.resource_desc = { 'resource': 'xsede.stampede', 'walltime': 59, 'cpus': 128, 'gpus': 64, 'project': 'xyz', 'queue': 'high' } workflow = [generate_pipeline(1), generate_pipeline(2)] amgr.workflow = workflow amgr._wfp = WFprocessor(sid=amgr.sid, workflow=amgr._workflow, pending_queue=amgr._pending_queue, completed_queue=amgr._completed_queue, resubmit_failed=amgr._resubmit_failed, rmq_conn_params=amgr._rmq_conn_params) amgr._workflow = amgr._wfp.workflow amgr._task_manager = TaskManager(sid=amgr._sid, pending_queue=amgr._pending_queue, completed_queue=amgr._completed_queue, rmgr=amgr._rmgr, rmq_conn_params=amgr._rmq_conn_params) write_session_description(amgr) desc = ru.read_json('%s/radical.entk.%s.json' % (amgr._sid, amgr._sid)) # tasks are originally set but saved as a list in json # uses sorting for convenient comparison, this doesn't change validity for k, v in (desc['tree'].items()): if k.startswith("stage"): desc['tree'][k]['children'] = sorted(v['children']) src = '%s/sample_data' % pwd assert desc == ru.read_json('%s/expected_desc_write_session.json' % src)
def test_write_session_description(): hostname = os.environ.get('RMQ_HOSTNAME', 'localhost') port = int(os.environ.get('RMQ_PORT', 5672)) amgr = AppManager(hostname=hostname, port=port) amgr.resource_desc = { 'resource': 'xsede.stampede', 'walltime': 60, 'cpus': 128, 'gpus': 64, 'project': 'xyz', 'queue': 'high' } workflow = [generate_pipeline(1), generate_pipeline(2)] amgr.workflow = workflow amgr._wfp = WFprocessor(sid=amgr._sid, workflow=amgr._workflow, pending_queue=amgr._pending_queue, completed_queue=amgr._completed_queue, mq_hostname=amgr._mq_hostname, port=amgr._port, resubmit_failed=amgr._resubmit_failed) amgr._wfp._initialize_workflow() amgr._workflow = amgr._wfp.workflow amgr._task_manager = TaskManager(sid=amgr._sid, pending_queue=amgr._pending_queue, completed_queue=amgr._completed_queue, mq_hostname=amgr._mq_hostname, rmgr=amgr._resource_manager, port=amgr._port ) # os.mkdir(amgr._sid) write_session_description(amgr) desc = ru.read_json('%s/radical.entk.%s.json' % (amgr._sid, amgr._sid)) curdir = os.path.dirname(os.path.abspath(__file__)) src = '%s/sample_data' % curdir assert desc == ru.read_json('%s/expected_desc_write_session.json' % src)
def test_write_workflow(): wf = list() wf.append(generate_pipeline(1)) wf.append(generate_pipeline(2)) amgr = AppManager(hostname=hostname, port=port) amgr.workflow = wf amgr._wfp = WFprocessor(sid=amgr._sid, workflow=amgr._workflow, pending_queue=amgr._pending_queue, completed_queue=amgr._completed_queue, mq_hostname=amgr._mq_hostname, port=amgr._port, resubmit_failed=amgr._resubmit_failed) amgr._wfp._initialize_workflow() wf = amgr._wfp.workflow write_workflow(wf, 'test') data = ru.read_json('test/entk_workflow.json') assert len(data) == len(wf) p_cnt = 0 for p in data: assert p['uid'] == wf[p_cnt].uid assert p['name'] == wf[p_cnt].name assert p['state_history'] == wf[p_cnt].state_history s_cnt = 0 for s in p['stages']: assert s['uid'] == wf[p_cnt].stages[s_cnt].uid assert s['name'] == wf[p_cnt].stages[s_cnt].name assert s['state_history'] == wf[p_cnt].stages[s_cnt].state_history for t in wf[p_cnt].stages[s_cnt].tasks: assert t.to_dict() in s['tasks'] s_cnt += 1 p_cnt += 1 shutil.rmtree('test')
def test_run_workflow(self, mocked_init, mocked_ResourceManager, mocked_WFprocessor, mocked_TaskManager, mocked_Profiler): mocked_TaskManager.check_heartbeat = mock.MagicMock(return_value=False) mocked_TaskManager.terminate_heartbeat = mock.MagicMock( return_value=True) mocked_TaskManager.terminate_manager = mock.MagicMock( return_value=True) mocked_TaskManager.start_manager = mock.MagicMock(return_value=True) mocked_TaskManager.start_heartbeat = mock.MagicMock(return_value=True) mocked_ResourceManager.get_resource_allocation_state = mock.MagicMock( return_value='RUNNING') mocked_ResourceManager.get_completed_states = mock.MagicMock( return_value=['DONE']) mocked_WFprocessor.workflow_incomplete = mock.MagicMock( return_value=True) mocked_WFprocessor.check_processor = mock.MagicMock(return_value=True) appman = Amgr() appman._uid = 'appman.0000' appman._logger = ru.Logger(name='radical.entk.taskmanager', ns='radical.entk') appman._prof = mocked_Profiler pipe = mock.Mock() pipe.lock = mt.Lock() pipe.completed = False pipe.uid = 'pipe.0000' appman._workflow = set([pipe]) appman._cur_attempt = 0 appman._reattempts = 3 appman._rmgr = mocked_ResourceManager appman._wfp = mocked_WFprocessor appman._task_manager = mocked_TaskManager appman._sync_thread = mock.Mock() appman._sync_thread.is_alive = mock.MagicMock(return_value=True) with self.assertRaises(ree.EnTKError): appman._run_workflow()
def test_write_session_description(): hostname = os.environ.get('RMQ_HOSTNAME', 'localhost') port = int(os.environ.get('RMQ_PORT', 5672)) amgr = AppManager(hostname=hostname, port=port) amgr.resource_desc = { 'resource': 'xsede.stampede', 'walltime': 60, 'cpus': 128, 'gpus': 64, 'project': 'xyz', 'queue': 'high' } workflow = [generate_pipeline(1), generate_pipeline(2)] amgr.workflow = workflow amgr._wfp = WFprocessor(sid=amgr._sid, workflow=amgr._workflow, pending_queue=amgr._pending_queue, completed_queue=amgr._completed_queue, mq_hostname=amgr._mq_hostname, port=amgr._port, resubmit_failed=amgr._resubmit_failed) amgr._wfp._initialize_workflow() amgr._workflow = amgr._wfp.workflow amgr._task_manager = TaskManager(sid=amgr._sid, pending_queue=amgr._pending_queue, completed_queue=amgr._completed_queue, mq_hostname=amgr._mq_hostname, rmgr=amgr._resource_manager, port=amgr._port) # os.mkdir(amgr._sid) write_session_description(amgr) desc = ru.read_json('%s/radical.entk.%s.json' % (amgr._sid, amgr._sid)) assert desc == { 'config': {}, 'entities': { 'appmanager': { 'event_model': {}, 'state_model': None, 'state_values': None }, 'pipeline': { 'event_model': {}, 'state_model': { 'CANCELED': 9, 'DESCRIBED': 1, 'DONE': 9, 'FAILED': 9, 'SCHEDULING': 2 }, 'state_values': { '1': 'DESCRIBED', '2': 'SCHEDULING', '9': ['DONE', 'CANCELED', 'FAILED'] } }, 'stage': { 'event_model': {}, 'state_model': { 'CANCELED': 9, 'DESCRIBED': 1, 'DONE': 9, 'FAILED': 9, 'SCHEDULED': 3, 'SCHEDULING': 2 }, 'state_values': { '1': 'DESCRIBED', '2': 'SCHEDULING', '3': 'SCHEDULED', '9': ['FAILED', 'CANCELED', 'DONE'] } }, 'task': { 'event_model': {}, 'state_model': { 'CANCELED': 9, 'DEQUEUED': 8, 'DEQUEUEING': 7, 'DESCRIBED': 1, 'DONE': 9, 'EXECUTED': 6, 'FAILED': 9, 'SCHEDULED': 3, 'SCHEDULING': 2, 'SUBMITTED': 5, 'SUBMITTING': 4 }, 'state_values': { '1': 'DESCRIBED', '2': 'SCHEDULING', '3': 'SCHEDULED', '4': 'SUBMITTING', '5': 'SUBMITTED', '6': 'EXECUTED', '7': 'DEQUEUEING', '8': 'DEQUEUED', '9': ['DONE', 'CANCELED', 'FAILED'] } } }, 'tree': { 'appmanager.0000': { 'cfg': {}, 'children': [ 'wfprocessor.0000', 'resource_manager.0000', 'task_manager.0000', 'pipeline.0000', 'pipeline.0001' ], 'etype': 'appmanager', 'has': [ 'pipeline', 'wfprocessor', 'resource_manager', 'task_manager' ], 'uid': 'appmanager.0000' }, 'pipeline.0000': { 'cfg': {}, 'children': ['stage.0000', 'stage.0001'], 'etype': 'pipeline', 'has': ['stage'], 'uid': 'pipeline.0000' }, 'pipeline.0001': { 'cfg': {}, 'children': ['stage.0002', 'stage.0003'], 'etype': 'pipeline', 'has': ['stage'], 'uid': 'pipeline.0001' }, 'resource_manager.0000': { 'cfg': {}, 'children': [], 'etype': 'resource_manager', 'has': [], 'uid': 'resource_manager.0000' }, 'stage.0000': { 'cfg': {}, 'children': ['task.0000'], 'etype': 'stage', 'has': ['task'], 'uid': 'stage.0000' }, 'stage.0001': { 'cfg': {}, 'children': [ 'task.0001', 'task.0002', 'task.0003', 'task.0004', 'task.0005', 'task.0006', 'task.0007', 'task.0008', 'task.0009', 'task.0010' ], 'etype': 'stage', 'has': ['task'], 'uid': 'stage.0001' }, 'stage.0002': { 'cfg': {}, 'children': ['task.0011'], 'etype': 'stage', 'has': ['task'], 'uid': 'stage.0002' }, 'stage.0003': { 'cfg': {}, 'children': [ 'task.0012', 'task.0013', 'task.0014', 'task.0015', 'task.0016', 'task.0017', 'task.0018', 'task.0019', 'task.0020', 'task.0021' ], 'etype': 'stage', 'has': ['task'], 'uid': 'stage.0003' }, 'task.0000': { 'cfg': {}, 'children': [], 'etype': 'task', 'has': [], 'uid': 'task.0000' }, 'task.0001': { 'cfg': {}, 'children': [], 'etype': 'task', 'has': [], 'uid': 'task.0001' }, 'task.0002': { 'cfg': {}, 'children': [], 'etype': 'task', 'has': [], 'uid': 'task.0002' }, 'task.0003': { 'cfg': {}, 'children': [], 'etype': 'task', 'has': [], 'uid': 'task.0003' }, 'task.0004': { 'cfg': {}, 'children': [], 'etype': 'task', 'has': [], 'uid': 'task.0004' }, 'task.0005': { 'cfg': {}, 'children': [], 'etype': 'task', 'has': [], 'uid': 'task.0005' }, 'task.0006': { 'cfg': {}, 'children': [], 'etype': 'task', 'has': [], 'uid': 'task.0006' }, 'task.0007': { 'cfg': {}, 'children': [], 'etype': 'task', 'has': [], 'uid': 'task.0007' }, 'task.0008': { 'cfg': {}, 'children': [], 'etype': 'task', 'has': [], 'uid': 'task.0008' }, 'task.0009': { 'cfg': {}, 'children': [], 'etype': 'task', 'has': [], 'uid': 'task.0009' }, 'task.0010': { 'cfg': {}, 'children': [], 'etype': 'task', 'has': [], 'uid': 'task.0010' }, 'task.0011': { 'cfg': {}, 'children': [], 'etype': 'task', 'has': [], 'uid': 'task.0011' }, 'task.0012': { 'cfg': {}, 'children': [], 'etype': 'task', 'has': [], 'uid': 'task.0012' }, 'task.0013': { 'cfg': {}, 'children': [], 'etype': 'task', 'has': [], 'uid': 'task.0013' }, 'task.0014': { 'cfg': {}, 'children': [], 'etype': 'task', 'has': [], 'uid': 'task.0014' }, 'task.0015': { 'cfg': {}, 'children': [], 'etype': 'task', 'has': [], 'uid': 'task.0015' }, 'task.0016': { 'cfg': {}, 'children': [], 'etype': 'task', 'has': [], 'uid': 'task.0016' }, 'task.0017': { 'cfg': {}, 'children': [], 'etype': 'task', 'has': [], 'uid': 'task.0017' }, 'task.0018': { 'cfg': {}, 'children': [], 'etype': 'task', 'has': [], 'uid': 'task.0018' }, 'task.0019': { 'cfg': {}, 'children': [], 'etype': 'task', 'has': [], 'uid': 'task.0019' }, 'task.0020': { 'cfg': {}, 'children': [], 'etype': 'task', 'has': [], 'uid': 'task.0020' }, 'task.0021': { 'cfg': {}, 'children': [], 'etype': 'task', 'has': [], 'uid': 'task.0021' }, 'task_manager.0000': { 'cfg': {}, 'children': [], 'etype': 'task_manager', 'has': [], 'uid': 'task_manager.0000' }, 'wfprocessor.0000': { 'cfg': {}, 'children': [], 'etype': 'wfprocessor', 'has': [], 'uid': 'wfprocessor.0000' } } } shutil.rmtree(amgr._sid)
def test_write_workflows(): # -------------------------------------------------------------------------- def check_stack(stack): assert 'sys' in stack assert 'radical' in stack assert 'python' in stack['sys'] assert 'pythonpath' in stack['sys'] assert 'virtualenv' in stack['sys'] assert 'radical.utils' in stack['radical'] assert 'radical.saga' in stack['radical'] assert 'radical.pilot' in stack['radical'] assert 'radical.entk' in stack['radical'] # -------------------------------------------------------------------------- def check_wf(wf, check): for p_idx,p in enumerate(wf['pipes']): assert p['uid'] == check[p_idx].uid assert p['name'] == check[p_idx].name assert p['state_history'] == check[p_idx].state_history for s_idx,s in enumerate(p['stages']): assert s['uid'] == check[p_idx].stages[s_idx].uid assert s['name'] == check[p_idx].stages[s_idx].name assert s['state_history'] == check[p_idx].stages[s_idx].state_history for t in check[p_idx].stages[s_idx].tasks: assert t.to_dict() in s['tasks'] # -------------------------------------------------------------------------- try: wf = list() wf.append(generate_pipeline(1)) wf.append(generate_pipeline(2)) amgr = AppManager(hostname=hostname, port=port) amgr.workflow = wf amgr._wfp = WFprocessor(sid=amgr._sid, workflow=amgr._workflow, pending_queue=amgr._pending_queue, completed_queue=amgr._completed_queue, resubmit_failed=amgr._resubmit_failed, rmq_conn_params=amgr._rmq_conn_params) check = amgr.workflow # ---------------------------------------------------------------------- # check json output, with defaut and custom fname for fname in [None, 'wf.json']: write_workflows(amgr.workflows, 'test', fname=fname) if not fname: fname = 'entk_workflow.json' data = ru.read_json('test/%s' % fname) check_stack(data['stack']) check_wf(data['workflows'][0], check) assert len(data['workflows']) == 1 shutil.rmtree('test') # ---------------------------------------------------------------------- # check with data return data = write_workflows(amgr.workflows, 'test', fwrite=False) check_stack(data['stack']) check_wf(data['workflows'][0], check) assert len(data['workflows']) == 1 # ---------------------------------------------------------------------- # check with two workflows amgr.workflow = wf amgr._wfp = WFprocessor(sid=amgr._sid, workflow=amgr._workflow, pending_queue=amgr._pending_queue, completed_queue=amgr._completed_queue, resubmit_failed=amgr._resubmit_failed, rmq_conn_params=amgr._rmq_conn_params) check = amgr.workflows data = write_workflows(amgr.workflows, 'test', fwrite=False) check_stack(data['stack']) check_wf(data['workflows'][0], check[0]) check_wf(data['workflows'][1], check[0]) assert len(data['workflows']) == 2 shutil.rmtree('test') finally: try: shutil.rmtree('test') except: pass