def test_wfp_check_processor(): p = Pipeline() s = Stage() t = Task() t.executable = '/bin/date' s.add_tasks(t) p.add_stages(s) amgr = Amgr(hostname=hostname, port=port) amgr._setup_mqs() wfp = WFprocessor(sid=amgr._sid, workflow=[p], pending_queue=amgr._pending_queue, completed_queue=amgr._completed_queue, rmq_conn_params=amgr._rmq_conn_params, resubmit_failed=False) wfp.start_processor() assert wfp.check_processor() wfp.terminate_processor() assert not wfp.check_processor()
def test_amgr_resource_terminate(): res_dict = { 'resource': 'xsede.supermic', 'walltime': 30, 'cpus': 20, 'project': 'TG-MCB090174' } from radical.entk.execman.rp import TaskManager amgr = Amgr(rts='radical.pilot', hostname=hostname, port=port) amgr.resource_desc = res_dict amgr._setup_mqs() amgr._rmq_cleanup = True amgr._task_manager = TaskManager(sid='test', pending_queue=list(), completed_queue=list(), mq_hostname=amgr._mq_hostname, rmgr=amgr._resource_manager, port=amgr._port ) amgr.resource_terminate()
def test_amgr_cleanup_mqs(): amgr = Amgr(hostname=hostname, port=port) sid = amgr._sid amgr._setup_mqs() amgr._cleanup_mqs() mq_connection = pika.BlockingConnection( pika.ConnectionParameters(host=hostname, port=port)) qs = [ '%s-tmgr-to-sync' % sid, '%s-cb-to-sync' % sid, '%s-enq-to-sync' % sid, '%s-deq-to-sync' % sid, '%s-sync-to-tmgr' % sid, '%s-sync-to-cb' % sid, '%s-sync-to-enq' % sid, '%s-sync-to-deq' % sid, '%s-pendingq-1' % sid, '%s-completedq-1' % sid ] for q in qs: with pytest.raises(pika.exceptions.ChannelClosed): mq_channel = mq_connection.channel() mq_channel.queue_purge(q)
def test_wfp_check_processor(): p = Pipeline() s = Stage() t = Task() t.executable = ['/bin/date'] s.add_tasks(t) p.add_stages(s) amgr = Amgr(hostname=hostname, port=port) amgr._setup_mqs() wfp = WFprocessor(sid=amgr._sid, workflow=[p], pending_queue=amgr._pending_queue, completed_queue=amgr._completed_queue, mq_hostname=amgr._mq_hostname, port=amgr._port, resubmit_failed=False) wfp.start_processor() assert wfp.check_processor() wfp.terminate_processor() assert not wfp.check_processor()
def test_amgr_cleanup_mqs(): amgr = Amgr(hostname=hostname, port=port) sid = amgr._sid amgr._setup_mqs() amgr._cleanup_mqs() mq_connection = pika.BlockingConnection( pika.ConnectionParameters(host=hostname, port=port)) qs = ['%s-tmgr-to-sync' % sid, '%s-cb-to-sync' % sid, '%s-enq-to-sync' % sid, '%s-deq-to-sync' % sid, '%s-sync-to-tmgr' % sid, '%s-sync-to-cb' % sid, '%s-sync-to-enq' % sid, '%s-sync-to-deq' % sid, '%s-pendingq-1' % sid, '%s-completedq-1' % sid] for q in qs: with pytest.raises(pika.exceptions.ChannelClosed): mq_channel = mq_connection.channel() mq_channel.queue_purge(q)
def test_amgr_terminate(): res_dict = { 'resource': 'xsede.supermic', 'walltime': 30, 'cpus': 20, 'project': 'TG-MCB090174' } from radical.entk.execman.rp import TaskManager amgr = Amgr(rts='radical.pilot', hostname=hostname, port=port) amgr.resource_desc = res_dict amgr._setup_mqs() amgr._rmq_cleanup = True amgr._task_manager = TaskManager(sid='test', pending_queue=list(), completed_queue=list(), mq_hostname=amgr._mq_hostname, rmgr=amgr._resource_manager, port=amgr._port ) amgr.terminate()
def test_wfp_start_processor(): p = Pipeline() s = Stage() t = Task() t.executable = ['/bin/date'] s.add_tasks(t) p.add_stages(s) amgr = Amgr(hostname=hostname, port=port) amgr._setup_mqs() wfp = WFprocessor(sid=amgr._sid, workflow=[p], pending_queue=amgr._pending_queue, completed_queue=amgr._completed_queue, mq_hostname=amgr._mq_hostname, port=amgr._port, resubmit_failed=False) assert wfp.start_processor() assert not wfp._enqueue_thread assert not wfp._dequeue_thread assert not wfp._enqueue_thread_terminate.is_set() assert not wfp._dequeue_thread_terminate.is_set() assert not wfp._wfp_terminate.is_set() assert wfp._wfp_process.is_alive() wfp._wfp_terminate.set() wfp._wfp_process.join()
def test_amgr_synchronizer(): amgr = Amgr(hostname=host, port=port, username=username, password=password) amgr._setup_mqs() p = Pipeline() s = Stage() # Create and add 10 tasks to the stage for cnt in range(10): t = Task() t.executable = 'some-executable-%s' % cnt s.add_tasks(t) p.add_stages(s) p._validate() amgr.workflow = [p] sid = 'test.0016' rmgr = BaseRmgr({}, sid, None, {}) tmgr = BaseTmgr(sid=sid, pending_queue=['pending-1'], completed_queue=['completed-1'], rmgr=rmgr, rmq_conn_params=amgr._rmq_conn_params, rts=None) amgr._rmgr = rmgr rmgr._task_manager = tmgr for t in p.stages[0].tasks: assert t.state == states.INITIAL assert p.stages[0].state == states.INITIAL assert p.state == states.INITIAL # Start the synchronizer method in a thread amgr._terminate_sync = mt.Event() sync_thread = mt.Thread(target=amgr._synchronizer, name='synchronizer-thread') sync_thread.start() # Start the synchronizer method in a thread proc = mp.Process(target=func_for_synchronizer_test, name='temp-proc', args=(amgr._sid, p, tmgr)) proc.start() proc.join() # Wait for AppManager to finish the message exchange # no need to set *)terminate_sync* but a timeout instead # amgr._terminate_sync.set() sync_thread.join(15) for t in p.stages[0].tasks: assert t.state == states.COMPLETED
def test_amgr_setup_mqs(): amgr = Amgr(hostname=hostname, port=port) amgr._setup_mqs() assert len(amgr._pending_queue) == 1 assert len(amgr._completed_queue) == 1 mq_connection = pika.BlockingConnection(pika.ConnectionParameters( host=amgr._hostname, port=amgr._port)) mq_channel = mq_connection.channel() qs = ['%s-tmgr-to-sync' % amgr._sid, '%s-cb-to-sync' % amgr._sid, '%s-sync-to-tmgr' % amgr._sid, '%s-sync-to-cb' % amgr._sid, '%s-pendingq-1' % amgr._sid, '%s-completedq-1' % amgr._sid] for q in qs: mq_channel.queue_delete(queue=q) with open('.%s.txt' % amgr._sid, 'r') as fp: lines = fp.readlines() for ind, val in enumerate(lines): lines[ind] = val.strip() assert set(qs) == set(lines)
def test_sid_in_mqs(): appman = Amgr(hostname=hostname, port=port) appman._setup_mqs() sid = appman._sid qs = [ '%s-tmgr-to-sync' % sid, '%s-cb-to-sync' % sid, '%s-enq-to-sync' % sid, '%s-deq-to-sync' % sid, '%s-sync-to-tmgr' % sid, '%s-sync-to-cb' % sid, '%s-sync-to-enq' % sid, '%s-sync-to-deq' % sid ] mq_connection = pika.BlockingConnection( pika.ConnectionParameters( host=hostname, port=port) ) mq_channel = mq_connection.channel() def callback(): print True for q in qs: try: mq_channel.basic_consume(callback, queue=q, no_ack=True) except Exception as ex: raise EnTKError(ex)
def test_sid_in_mqs(): appman = Amgr(hostname=hostname, port=port) appman._setup_mqs() sid = appman._sid qs = [ '%s-tmgr-to-sync' % sid, '%s-cb-to-sync' % sid, '%s-enq-to-sync' % sid, '%s-deq-to-sync' % sid, '%s-sync-to-tmgr' % sid, '%s-sync-to-cb' % sid, '%s-sync-to-enq' % sid, '%s-sync-to-deq' % sid ] mq_connection = pika.BlockingConnection( pika.ConnectionParameters(host=hostname, port=port)) mq_channel = mq_connection.channel() def callback(): print True for q in qs: try: mq_channel.basic_consume(callback, queue=q, no_ack=True) except Exception as ex: raise EnTKError(ex)
def test_sid_in_mqs(): # FIXME: what is tested / asserted here? appman = Amgr(hostname=host, port=port) sid = appman._sid appman._setup_mqs() qs = ['%s-tmgr-to-sync' % sid, '%s-cb-to-sync' % sid, '%s-sync-to-tmgr' % sid, '%s-sync-to-cb' % sid] mq_connection = pika.BlockingConnection(pika.ConnectionParameters( host=host, port=port)) mq_channel = mq_connection.channel() def callback(): pass for q in qs: try: mq_channel.basic_consume(callback, queue=q, no_ack=True) except Exception as ex: raise EnTKError(ex)
def test_wfp_workflow_incomplete(): p = Pipeline() s = Stage() t = Task() t.executable = ['/bin/date'] s.add_tasks(t) p.add_stages(s) amgr = Amgr(hostname=hostname, port=port) amgr._setup_mqs() wfp = WFprocessor(sid=amgr._sid, workflow=[p], pending_queue=amgr._pending_queue, completed_queue=amgr._completed_queue, mq_hostname=amgr._mq_hostname, port=amgr._port, resubmit_failed=False) wfp._initialize_workflow() assert wfp.workflow_incomplete() amgr.workflow = [p] profiler = ru.Profiler(name='radical.entk.temp') p.stages[0].state == states.SCHEDULING p.state == states.SCHEDULED for t in p.stages[0].tasks: t.state = states.COMPLETED import json import pika task_as_dict = json.dumps(t.to_dict()) mq_connection = pika.BlockingConnection( pika.ConnectionParameters(host=amgr._mq_hostname, port=amgr._port)) mq_channel = mq_connection.channel() mq_channel.basic_publish(exchange='', routing_key='%s-completedq-1' % amgr._sid, body=task_as_dict) amgr._terminate_sync = Event() sync_thread = Thread(target=amgr._synchronizer, name='synchronizer-thread') sync_thread.start() proc = Process(target=func_for_dequeue_test, name='temp-proc', args=(wfp, )) proc.start() proc.join() amgr._terminate_sync.set() sync_thread.join() assert not wfp.workflow_incomplete()
def test_wfp_dequeue(): p = Pipeline() s = Stage() t = Task() t.executable = '/bin/date' s.add_tasks(t) p.add_stages(s) amgr = Amgr(hostname=hostname, port=port) amgr._setup_mqs() wfp = WFprocessor(sid=amgr._sid, workflow=[p], pending_queue=amgr._pending_queue, completed_queue=amgr._completed_queue, mq_hostname=amgr._hostname, port=amgr._port, resubmit_failed=False) wfp.initialize_workflow() assert p.state == states.INITIAL assert p.stages[0].state == states.INITIAL for t in p.stages[0].tasks: assert t.state == states.INITIAL p.state == states.SCHEDULED p.stages[0].state == states.SCHEDULING for t in p.stages[0].tasks: t.state = states.COMPLETED task_as_dict = json.dumps(t.to_dict()) mq_connection = pika.BlockingConnection( pika.ConnectionParameters(host=amgr._hostname, port=amgr._port)) mq_channel = mq_connection.channel() mq_channel.basic_publish(exchange='', routing_key='%s' % amgr._completed_queue[0], body=task_as_dict) wfp.start_processor() th = mt.Thread(target=func_for_dequeue_test, name='temp-proc', args=(p, )) th.start() th.join() wfp.terminate_processor() assert p.state == states.DONE assert p.stages[0].state == states.DONE for t in p.stages[0].tasks: assert t.state == states.DONE
def test_wfp_workflow_incomplete(): p = Pipeline() s = Stage() t = Task() t.executable = ['/bin/date'] s.add_tasks(t) p.add_stages(s) amgr = Amgr(hostname=hostname, port=port) amgr._setup_mqs() wfp = WFprocessor(sid=amgr._sid, workflow=[p], pending_queue=amgr._pending_queue, completed_queue=amgr._completed_queue, mq_hostname=amgr._mq_hostname, port=amgr._port, resubmit_failed=False) wfp._initialize_workflow() assert wfp.workflow_incomplete() amgr.workflow = [p] profiler = ru.Profiler(name='radical.entk.temp') p.stages[0].state == states.SCHEDULING p.state == states.SCHEDULED for t in p.stages[0].tasks: t.state = states.COMPLETED import json import pika task_as_dict = json.dumps(t.to_dict()) mq_connection = pika.BlockingConnection(pika.ConnectionParameters(host=amgr._mq_hostname, port=amgr._port)) mq_channel = mq_connection.channel() mq_channel.basic_publish(exchange='', routing_key='%s-completedq-1' % amgr._sid, body=task_as_dict) amgr._terminate_sync = Event() sync_thread = Thread(target=amgr._synchronizer, name='synchronizer-thread') sync_thread.start() proc = Process(target=func_for_dequeue_test, name='temp-proc', args=(wfp,)) proc.start() proc.join() amgr._terminate_sync.set() sync_thread.join() assert not wfp.workflow_incomplete()
def test_amgr_synchronizer(): logger = ru.get_logger('radical.entk.temp_logger') profiler = ru.Profiler(name='radical.entk.temp') amgr = Amgr(hostname=hostname, port=port) mq_connection = pika.BlockingConnection(pika.ConnectionParameters(host=hostname, port=port)) mq_channel = mq_connection.channel() amgr._setup_mqs() p = Pipeline() s = Stage() # Create and add 100 tasks to the stage for cnt in range(100): t = Task() t.executable = ['some-executable-%s' % cnt] s.add_tasks(t) p.add_stages(s) p._assign_uid(amgr._sid) p._validate() amgr.workflow = [p] for t in p.stages[0].tasks: assert t.state == states.INITIAL assert p.stages[0].state == states.INITIAL assert p.state == states.INITIAL # Start the synchronizer method in a thread amgr._terminate_sync = Event() sync_thread = Thread(target=amgr._synchronizer, name='synchronizer-thread') sync_thread.start() # Start the synchronizer method in a thread proc = Process(target=func_for_synchronizer_test, name='temp-proc', args=(amgr._sid, p, logger, profiler)) proc.start() proc.join() for t in p.stages[0].tasks: assert t.state == states.SCHEDULING assert p.stages[0].state == states.SCHEDULING assert p.state == states.SCHEDULING amgr._terminate_sync.set() sync_thread.join()
def test_amgr_synchronizer(): logger = ru.Logger('radical.entk.temp_logger') profiler = ru.Profiler(name='radical.entk.temp') amgr = Amgr(hostname=hostname, port=port) amgr._setup_mqs() p = Pipeline() s = Stage() # Create and add 100 tasks to the stage for cnt in range(100): t = Task() t.executable = ['some-executable-%s' % cnt] s.add_tasks(t) p.add_stages(s) p._assign_uid(amgr._sid) p._validate() amgr.workflow = [p] for t in p.stages[0].tasks: assert t.state == states.INITIAL assert p.stages[0].state == states.INITIAL assert p.state == states.INITIAL # Start the synchronizer method in a thread amgr._terminate_sync = Event() sync_thread = Thread(target=amgr._synchronizer, name='synchronizer-thread') sync_thread.start() # Start the synchronizer method in a thread proc = Process(target=func_for_synchronizer_test, name='temp-proc', args=(amgr._sid, p, logger, profiler)) proc.start() proc.join() for t in p.stages[0].tasks: assert t.state == states.SCHEDULING assert p.stages[0].state == states.SCHEDULING assert p.state == states.SCHEDULING amgr._terminate_sync.set() sync_thread.join()
def test_wfp_enqueue(): p = Pipeline() s = Stage() t = Task() t.executable = ['/bin/date'] s.add_tasks(t) p.add_stages(s) amgr = Amgr(hostname=hostname, port=port) amgr._setup_mqs() wfp = WFprocessor(sid=amgr._sid, workflow=[p], pending_queue=amgr._pending_queue, completed_queue=amgr._completed_queue, mq_hostname=amgr._mq_hostname, port=amgr._port, resubmit_failed=False) wfp._initialize_workflow() amgr.workflow = [p] profiler = ru.Profiler(name='radical.entk.temp') for t in p.stages[0].tasks: assert t.state == states.INITIAL assert p.stages[0].state == states.INITIAL assert p.state == states.INITIAL amgr._terminate_sync = Event() sync_thread = Thread(target=amgr._synchronizer, name='synchronizer-thread') sync_thread.start() proc = Process(target=func_for_enqueue_test, name='temp-proc', args=(wfp, )) proc.start() proc.join() amgr._terminate_sync.set() sync_thread.join() for t in p.stages[0].tasks: assert t.state == states.SCHEDULED assert p.stages[0].state == states.SCHEDULED assert p.state == states.SCHEDULING
def test_amgr_setup_mqs(): amgr = Amgr(hostname=hostname, port=port) assert amgr._setup_mqs() == True assert len(amgr._pending_queue) == 1 assert len(amgr._completed_queue) == 1 mq_connection = pika.BlockingConnection(pika.ConnectionParameters(host=amgr._mq_hostname, port=amgr._port)) mq_channel = mq_connection.channel() qs = [ '%s-tmgr-to-sync' % amgr._sid, '%s-cb-to-sync' % amgr._sid, '%s-enq-to-sync' % amgr._sid, '%s-deq-to-sync' % amgr._sid, '%s-sync-to-tmgr' % amgr._sid, '%s-sync-to-cb' % amgr._sid, '%s-sync-to-enq' % amgr._sid, '%s-sync-to-deq' % amgr._sid ] for q in qs: mq_channel.queue_delete(queue=q) with open('.%s.txt' % amgr._sid, 'r') as fp: lines = fp.readlines() for ind, val in enumerate(lines): lines[ind] = val.strip() assert set(qs) < set(lines)
def test_amgr_setup_mqs(): amgr = Amgr(hostname=hostname, port=port) assert amgr._setup_mqs() == True assert len(amgr._pending_queue) == 1 assert len(amgr._completed_queue) == 1 mq_connection = pika.BlockingConnection(pika.ConnectionParameters(host=amgr._mq_hostname, port=amgr._port)) mq_channel = mq_connection.channel() qs = [ '%s-tmgr-to-sync' % amgr._sid, '%s-cb-to-sync' % amgr._sid, '%s-enq-to-sync' % amgr._sid, '%s-deq-to-sync' % amgr._sid, '%s-sync-to-tmgr' % amgr._sid, '%s-sync-to-cb' % amgr._sid, '%s-sync-to-enq' % amgr._sid, '%s-sync-to-deq' % amgr._sid ] for q in qs: mq_channel.queue_delete(queue=q) with open('.%s.txt' % amgr._sid, 'r') as fp: lines = fp.readlines() for i in range(len(lines)): lines[i] = lines[i].strip() assert set(qs) < set(lines)
def test_wfp_enqueue(): p = Pipeline() s = Stage() t = Task() t.executable = ['/bin/date'] s.add_tasks(t) p.add_stages(s) amgr = Amgr(hostname=hostname, port=port) amgr._setup_mqs() wfp = WFprocessor(sid=amgr._sid, workflow=[p], pending_queue=amgr._pending_queue, completed_queue=amgr._completed_queue, mq_hostname=amgr._mq_hostname, port=amgr._port, resubmit_failed=False) wfp._initialize_workflow() amgr.workflow = [p] profiler = ru.Profiler(name='radical.entk.temp') for t in p.stages[0].tasks: assert t.state == states.INITIAL assert p.stages[0].state == states.INITIAL assert p.state == states.INITIAL amgr._terminate_sync = Event() sync_thread = Thread(target=amgr._synchronizer, name='synchronizer-thread') sync_thread.start() proc = Process(target=func_for_enqueue_test, name='temp-proc', args=(wfp,)) proc.start() proc.join() amgr._terminate_sync.set() sync_thread.join() for t in p.stages[0].tasks: assert t.state == states.SCHEDULED assert p.stages[0].state == states.SCHEDULED assert p.state == states.SCHEDULING
def test_wfp_enqueue(): p = Pipeline() s = Stage() t = Task() t.executable = '/bin/date' s.add_tasks(t) p.add_stages(s) amgr = Amgr(hostname=hostname, port=port, username=username, password=password) amgr._setup_mqs() wfp = WFprocessor(sid=amgr._sid, workflow=[p], pending_queue=amgr._pending_queue, completed_queue=amgr._completed_queue, rmq_conn_params=amgr._rmq_conn_params, resubmit_failed=False) assert p.uid is not None assert p.stages[0].uid is not None for t in p.stages[0].tasks: assert t.uid is not None assert p.state == states.INITIAL assert p.stages[0].state == states.INITIAL for t in p.stages[0].tasks: assert t.state == states.INITIAL wfp.start_processor() th = mt.Thread(target=func_for_enqueue_test, name='temp-proc', args=(p,)) th.start() th.join() wfp.terminate_processor() assert p.state == states.SCHEDULING assert p.stages[0].state == states.SCHEDULED for t in p.stages[0].tasks: assert t.state == states.SCHEDULED
def test_wfp_workflow_incomplete(): p = Pipeline() s = Stage() t = Task() t.executable = '/bin/date' s.add_tasks(t) p.add_stages(s) amgr = Amgr(hostname=hostname, port=port, username=username, password=password) amgr._setup_mqs() wfp = WFprocessor(sid=amgr._sid, workflow=[p], pending_queue=amgr._pending_queue, completed_queue=amgr._completed_queue, rmq_conn_params=amgr._rmq_conn_params, resubmit_failed=False) for t in p.stages[0].tasks: t.state = states.COMPLETED task_as_dict = json.dumps(t.to_dict()) credentials = pika.PlainCredentials(amgr._username, amgr._password) mq_connection = pika.BlockingConnection(pika.ConnectionParameters( host=amgr._hostname, port=amgr._port, credentials=credentials)) mq_channel = mq_connection.channel() mq_channel.basic_publish(exchange = '', routing_key = '%s' % amgr._completed_queue[0], body = task_as_dict) wfp.start_processor() th = mt.Thread(target=func_for_dequeue_test, name='temp-proc', args=(p,)) th.start() th.join() wfp.terminate_processor() assert not wfp.workflow_incomplete()
def test_amgr_setup_mqs(): amgr = Amgr(hostname=host, port=port) amgr._setup_mqs() assert len(amgr._pending_queue) == 1 assert len(amgr._completed_queue) == 1 mq_connection = pika.BlockingConnection(pika.ConnectionParameters( host=amgr._hostname, port=amgr._port)) mq_channel = mq_connection.channel() qs = ['%s-tmgr-to-sync' % amgr._sid, '%s-cb-to-sync' % amgr._sid, '%s-sync-to-tmgr' % amgr._sid, '%s-sync-to-cb' % amgr._sid, '%s-pendingq-1' % amgr._sid, '%s-completedq-1' % amgr._sid] for q in qs: mq_channel.queue_delete(queue=q)
def test_amgr_cleanup_mqs(): amgr = Amgr(hostname=host, port=port, username=username, password=password) sid = amgr._sid amgr._setup_mqs() amgr._cleanup_mqs() credentials = pika.PlainCredentials(username, password) mq_connection = pika.BlockingConnection(pika.ConnectionParameters( host=host, port=port, credentials=credentials)) qs = ['%s-tmgr-to-sync' % sid, '%s-cb-to-sync' % sid, '%s-sync-to-tmgr' % sid, '%s-sync-to-cb' % sid, '%s-pendingq-1' % sid, '%s-completedq-1' % sid] for q in qs: with pytest.raises(pika.exceptions.ChannelClosed): mq_channel = mq_connection.channel() mq_channel.queue_purge(q)