def test_amgr_resource_terminate(): res_dict = { 'resource': 'xsede.supermic', 'walltime': 30, 'cpus': 20, 'project': 'TG-MCB090174' } from radical.entk.execman.rp import TaskManager amgr = Amgr(rts='radical.pilot', hostname=hostname, port=port) amgr.resource_desc = res_dict amgr._setup_mqs() amgr._rmq_cleanup = True amgr._task_manager = TaskManager(sid='test', pending_queue=list(), completed_queue=list(), mq_hostname=amgr._mq_hostname, rmgr=amgr._resource_manager, port=amgr._port ) amgr.resource_terminate()
def test_issue_26(): appman = AppManager(hostname=hostname, port=port, username=username, password=password, autoterminate=False) appman.resource_desc = {'resource': 'local.localhost', 'walltime': 10, 'cpus' : 1, 'project' : ''} p1 = create_pipeline() p2 = create_pipeline() appman.workflow = [p1] appman.run() appman.workflow = [p2] appman.run() appman.resource_terminate() lhs = int(p1.stages[0].uid.split('.')[-1]) + 1 rhs = int(p2.stages[0].uid.split('.')[-1]) assert lhs == rhs for t in p1.stages[0].tasks: for tt in p2.stages[0].tasks: lhs = int(t.uid.split('.')[-1]) + 1 rhs = int(tt.uid.split('.')[-1]) assert lhs == rhs
def test_amgr_resource_terminate(): res_dict = { 'resource': 'xsede.supermic', 'walltime': 30, 'cpus': 20, 'project': 'TG-MCB090174' } from radical.entk.execman.rp import TaskManager amgr = Amgr(rts='radical.pilot', hostname=hostname, port=port) amgr.resource_desc = res_dict amgr._setup_mqs() amgr._rmq_cleanup = True amgr._task_manager = TaskManager(sid='test', pending_queue=list(), completed_queue=list(), mq_hostname=amgr._mq_hostname, rmgr=amgr._resource_manager, port=amgr._port ) amgr.resource_terminate()
def test_issue_26(): def create_pipeline(): p = Pipeline() s = Stage() t1 = Task() t1.name = 'simulation' t1.executable = ['/bin/echo'] t1.arguments = ['hello'] t1.copy_input_data = [] t1.copy_output_data = [] s.add_tasks(t1) p.add_stages(s) return p res_dict = { 'resource': 'local.localhost', 'walltime': 10, 'cpus': 1, 'project': '' } os.environ['RADICAL_PILOT_DBURL'] = MLAB appman = AppManager(hostname=hostname, port=port, autoterminate=False) appman.resource_desc = res_dict p1 = create_pipeline() appman.workflow = [p1] appman.run() print p1.uid, p1.stages[0].uid p2 = create_pipeline() appman.workflow = [p2] appman.run() print p2.uid, p2.stages[0].uid appman.resource_terminate() lhs = int(p1.stages[0].uid.split('.')[-1]) + 1 rhs = int(p2.stages[0].uid.split('.')[-1]) assert lhs == rhs for t in p1.stages[0].tasks: for tt in p2.stages[0].tasks: lhs = int(t.uid.split('.')[-1]) + 1 rhs = int(tt.uid.split('.')[-1]) assert lhs == rhs
def test_issue_26(): def create_pipeline(): p = Pipeline() s = Stage() t1 = Task() t1.name = 'simulation' t1.executable = ['/bin/echo'] t1.arguments = ['hello'] t1.copy_input_data = [] t1.copy_output_data = [] s.add_tasks(t1) p.add_stages(s) return p res_dict = { 'resource': 'local.localhost', 'walltime': 5, 'cpus': 1, 'project': '' } os.environ['RADICAL_PILOT_DBURL'] = MLAB appman = AppManager(hostname=hostname, port=port, autoterminate=False) appman.resource_desc = res_dict p1 = create_pipeline() appman.workflow = [p1] appman.run() print p1.uid, p1.stages[0].uid p2 = create_pipeline() appman.workflow = [p2] appman.run() print p2.uid, p2.stages[0].uid appman.resource_terminate() lhs = int(p1.stages[0].uid.split('.')[-1]) + 1 rhs = int(p2.stages[0].uid.split('.')[-1]) assert lhs == rhs for t in p1.stages[0].tasks: for tt in p2.stages[0].tasks: lhs = int(t.uid.split('.')[-1]) + 1 rhs = int(tt.uid.split('.')[-1]) assert lhs == rhs
class Runner(object): def __init__(self): self._cores = 0 self._protocols = list() self._hostname = None self._port = None self.ids = None self.app_manager = None self.total_replicas = 0 # Profiler for Runner self._uid = ru.generate_id('radical.yank.workflow_runner') self._logger = ru.get_logger('radical.yank.workflow_runner') self._prof = ru.Profiler(name=self._uid) self._prof.prof('create workflow_runner obj', uid=self._uid) self._root_directories = list() self.ids = dict() def add_protocol(self, protocol): self._protocols.append(protocol) @property def cores(self): return self._cores @cores.setter def cores(self, val): if isinstance(val, int): self._cores = val else: raise TypeError() def rabbitmq_config(self, hostname='localhost', port=5672): self._hostname = hostname self._port = port def run(self, strong_scaled=1, autoterminate=True, queue='high', walltime=1440): pipelines = set() input_data = list() for protocol in self._protocols: gen_pipeline = protocol.generate_pipeline() pipelines.add(gen_pipeline) input_data.extend(protocol.input_data) self.ids[protocol.id()] = gen_pipeline # protocol.id is the uuid, gen_pipeline.uid is the pipeline self.total_replicas += protocol.replicas self._cores = self._cores * self.total_replicas print 'Running on', self._cores, 'cores.' res_dict = {'resource': 'ncsa.bw_aprun', 'walltime': walltime, 'cores': int(self._cores*strong_scaled), 'project': 'bamm', 'queue': queue, 'access_schema': 'gsissh'} # Create Resource Manager object with the above resource description resource_manager = ResourceManager(res_dict) resource_manager.shared_data = input_data # Create Application Manager self.app_manager = AppManager(hostname=self._hostname, port=self._port, autoterminate=autoterminate) self.app_manager.resource_manager = resource_manager self.app_manager.assign_workflow(pipelines) self._prof.prof('execution_run') print 'Running...' self.app_manager.run() # this method is blocking until all pipelines show state = completed def rerun(self, protocol=None, terminate=True, previous_pipeline=None): if self.ids.get(previous_pipeline.id(), None) is not None: pipelines = set() gen_pipeline = protocol.generate_pipeline(previous_pipeline=self.ids[previous_pipeline.id()]) pipelines.add(gen_pipeline) self.ids[protocol.id()] = gen_pipeline self.app_manager.assign_workflow(pipelines) self.app_manager.run() if terminate: self.app_manager.resource_terminate() else: print "ERROR: previous protocol instance is not found"
#'queue': 'workq', #'project': 'TG-MCB090174', 'project': 'bamm', } rman = ResourceManager(res_dict) # Create Application Manager appman = AppManager(autoterminate=False, port=33004) # Assign resource manager to the Application Manager appman.resource_manager = rman p = init_cycle() # Assign the workflow as a set of Pipelines to the Application Manager appman.assign_workflow(set([p])) # Run the Application Manager appman.run() # for k in range (Cycles): # p = cycle(k) #print p.uid # Assign the workflow as a set of Pipelines to the Application Manager # appman.assign_workflow(set([p])) # Run the Application Manager # appman.run() appman.resource_terminate()
rman = ResourceManager(res_dict) # Create Application Manager appman = AppManager(autoterminate=False, port=33004) # Assign resource manager to the Application Manager appman.resource_manager = rman p = init_cycle() # Assign the workflow as a set of Pipelines to the Application Manager appman.assign_workflow(set([p])) # Run the Application Manager appman.run() # for k in range (Cycles): # p = cycle(k) #print p.uid # Assign the workflow as a set of Pipelines to the Application Manager # appman.assign_workflow(set([p])) # Run the Application Manager # appman.run() appman.resource_terminate()