def test_stage_post_exec(): global p1 p1.name = 'p1' s = Stage() s.name = 's1' for t in range(NUM_TASKS): s.add_tasks(create_single_task()) s.post_exec = condition p1.add_stages(s) res_dict = { 'resource': 'local.localhost', 'walltime': 30, 'cpus': 1, } os.environ['RADICAL_PILOT_DBURL'] = MLAB appman = AppManager(rts='radical.pilot', hostname=hostname, port=port) appman.resource_desc = res_dict appman.workflow = [p1] appman.run()
def generate_machine_learning_stage(self) -> Stage: stage = Stage() stage.name = self.MACHINE_LEARNING_STAGE_NAME cfg = self.cfg.machine_learning_stage stage_api = self.api.machine_learning_stage task_idx = 0 output_path = stage_api.task_dir(self.stage_idx, task_idx, mkdir=True) assert output_path is not None # Update base parameters cfg.task_config.experiment_directory = self.cfg.experiment_directory cfg.task_config.stage_idx = self.stage_idx cfg.task_config.task_idx = task_idx cfg.task_config.node_local_path = self.cfg.node_local_path cfg.task_config.output_path = output_path cfg.task_config.model_tag = stage_api.unique_name(output_path) if self.stage_idx > 0: # Machine learning should use model selection API cfg.task_config.init_weights_path = None # Write yaml configuration cfg_path = stage_api.config_path(self.stage_idx, task_idx) cfg.task_config.dump_yaml(cfg_path) task = generate_task(cfg) task.arguments += ["-c", cfg_path.as_posix()] stage.add_tasks(task) return stage
def generate_pipeline(): global CUR_TASKS, CUR_CORES, duration, MAX_NEW_STAGE def func_condition(): global CUR_NEW_STAGE, MAX_NEW_STAGE if CUR_NEW_STAGE < MAX_NEW_STAGE: return True return False def func_on_true(): global CUR_NEW_STAGE CUR_NEW_STAGE += 1 for t in p.stages[CUR_NEW_STAGE].tasks: cores = randint(1,16) t.arguments = ['-c', str(cores), '-t', str(duration)] def func_on_false(): print 'Done' # Create a Pipeline object p = Pipeline() for s in range(MAX_NEW_STAGE+1): # Create a Stage object s1 = Stage() for i in range(CUR_TASKS): t1 = Task() t1.pre_exec = ['export PATH=/u/sciteam/balasubr/modules/stress-ng-0.09.34:$PATH'] t1.executable = ['stress-ng'] t1.arguments = [ '-c', str(CUR_CORES), '-t', str(duration)] t1.cpu_reqs = { 'processes': 1, 'process_type': '', 'threads_per_process': CUR_CORES, 'thread_type': '' } # Add the Task to the Stage s1.add_tasks(t1) # Add post-exec to the Stage s1.post_exec = { 'condition': func_condition, 'on_true': func_on_true, 'on_false': func_on_false } # Add Stage to the Pipeline p.add_stages(s1) return p
def generate_pipeline(name, stages): # Create a Pipeline object p = Pipeline() p.name = name for s_cnt in range(stages): # Create a Stage object s = Stage() s.name = 'Stage %s'%s_cnt for t_cnt in range(5): # Create a Task object t = Task() t.name = 'my-task' # Assign a name to the task (optional) t.executable = '/bin/echo' # Assign executable to the task # Assign arguments for the task executable t.arguments = ['I am task %s in %s in %s'%(t_cnt, s_cnt, name)] # Add the Task to the Stage s.add_tasks(t) # Add Stage to the Pipeline p.add_stages(s) return p
def describe_MD_stages(): # Docking stage s1 = Stage() s1.name = 'Docking.%d' % CUR_NEW_STAGE # Docking task t1 = Task() t1.executable = ['sleep'] t1.arguments = ['3'] # Add the Docking task to the Docking Stage s1.add_tasks(t1) # MD stage s2 = Stage() s2.name = 'Simulation.%d' % CUR_NEW_STAGE # Each Task() is an OpenMM executable that will run on a single GPU. # Set sleep time for local testing for i in range(6): t2 = Task() t2.executable = ['sleep'] t2.arguments = ['5'] # Add the MD task to the Docking Stage s2.add_tasks(t2) # Add post-exec to the Stage s2.post_exec = func_condition return [s1, s2]
def add_ex_stg(rid, cycle): #ex stg here ex_tsk = Task() ex_stg = Stage() ex_tsk.name = 'extsk-{replica}-{cycle}'.format(replica=rid, cycle=cycle) for rid in range(len(waiting_replicas)): ex_tsk.link_input_data += ['%s/mdinfo-{replica}-{cycle}'.format(replica=rid, cycle=self.cycle)%replica_sandbox] ex_tsk.arguments = ['t_ex_gibbs.py', len(waiting_replicas)] #This needs to be fixed ex_tsk.executable = ['python'] ex_tsk.cpu_reqs = { 'processes': 1, 'process_type': '', 'threads_per_process': 1, 'thread_type': None } ex_tsk.pre_exec = ['export dummy_variable=19'] ex_stg.add_tasks(ex_tsk) ex_stg.post_exec = { 'condition': post_ex, 'on_true': terminate_replicas, 'on_false': continue_md } return ex_stg
def add_md_stg(rid,cycle): #md stg h md_tsk = Task() md_stg = Stage() md_tsk.name = 'mdtsk-{replica}-{cycle}'.format(replica=rid, cycle=cycle) md_tsk.link_input_data += ['%s/inpcrd' %replica_sandbox, '%s/prmtop' %replica_sandbox, '%s/mdin-{replica}-{cycle}'.format(replica=rid, cycle=0) %replica_sandbox] md_tsk.arguments = ['-O', '-i', 'mdin-{replica}-{cycle}'.format(replica=rid, cycle=0), '-p', 'prmtop', '-c', 'inpcrd', '-o', 'out', '-r', '%s/restrt-{replica}-{cycle}'.format(replica=rid, cycle=cycle) %replica_sandbox, '-x', 'mdcrd', '-inf', '%s/mdinfo-{replica}-{cycle}'.format(replica=rid, cycle=cycle) %replica_sandbox] md_tsk.executable = ['/home/scm177/mantel/AMBER/amber14/bin/sander'] md_tsk.cpu_reqs = { 'processes': replica_cores, 'process_type': '', 'threads_per_process': 1, 'thread_type': None } md_tsk.pre_exec = ['export dummy_variable=19', 'echo $SHARED'] md_stg.add_tasks(md_tsk) md_stg.post_exec = { 'condition': md_post, 'on_true': suspend, 'on_false': exchange_stg } return md_stg
def post_stage(): if (not os.path.exists(f'{run_dir}/aggregator/stop.aggregator')): nstages = len(p.stages) s = Stage() s.name = f"{nstages}" t = Task() t.cpu_reqs = { 'processes': 1, 'process_type': None, 'threads_per_process': 4, 'thread_type': 'OpenMP' } t.gpu_reqs = { 'processes': 0, 'process_type': None, 'threads_per_process': 0, 'thread_type': None } t.name = f" {i}_{nstages} " t.executable = PYTHON t.arguments = [ f'{current_dir}/simulation.py', f'{run_dir}/simulations/all/{i}_{nstages}', ADIOS_XML ] subprocess.getstatusoutput( f'ln -s {run_dir}/simulations/all/{i}_{nstages} {run_dir}/simulations/new/{i}_{nstages}' ) s.add_tasks(t) s.post_exec = post_stage p.add_stages(s)
def test_stage_task_addition(self, mocked_init): s = Stage() s._p_pipeline = {'uid': None, 'name': None} s._uid = 'stage.0000' s._name = None s._tasks = set() t1 = mock.MagicMock(spec=Task) t2 = mock.MagicMock(spec=Task) s.add_tasks(set([t1, t2])) self.assertIsInstance(s.tasks, set) self.assertEqual(s._task_count, 2) self.assertIn(t1, s.tasks) self.assertIn(t2, s.tasks) s = Stage() s._uid = 'stage.0000' s._name = None s._p_pipeline = {'uid': None, 'name': None} s._tasks = set() t1 = mock.MagicMock(spec=Task) t2 = mock.MagicMock(spec=Task) s.add_tasks([t1, t2]) self.assertIsInstance(s.tasks, set) self.assertEqual(s._task_count, 2) self.assertIn(t1, s.tasks) self.assertIn(t2, s.tasks)
def generate_ml_stage(self) -> Stage: stage = Stage() stage.name = "learning" cfg = self.cfg.ml_stage task = Task() task.cpu_reqs = cfg.cpu_reqs.dict() task.gpu_reqs = cfg.gpu_reqs.dict() task.pre_exec = cfg.pre_exec task.executable = cfg.executable task.arguments = cfg.arguments # Update base parameters cfg.run_config.input_path = self.aggregated_data_path( self.cur_iteration) cfg.run_config.output_path = self.model_path(self.cur_iteration) if self.cur_iteration > 0: cfg.run_config.init_weights_path = self.latest_ml_checkpoint_path( self.cur_iteration - 1) cfg_path = self.experiment_dirs["ml_runs"].joinpath( f"ml_{self.cur_iteration:03d}.yaml") cfg.run_config.dump_yaml(cfg_path) task.arguments += ["-c", cfg_path] stage.add_tasks(task) return stage
def generate_pipeline(): # Create a Pipeline object p = Pipeline() # Create a Stage object s1 = Stage() # Create a Task object which creates a file named 'output.txt' of size 1 MB for x in range(10): t1 = Task() t1.executable = 'cat' t1.arguments = ['file1.txt', 'file2.txt', '>', 'output.txt'] t1.copy_input_data = ['$SHARED/file1.txt', '$SHARED/file2.txt'] t1.download_output_data = [ 'output.txt > %s/output_%s.txt' % (cur_dir, x + 1) ] # Add the Task to the Stage s1.add_tasks(t1) # Add Stage to the Pipeline p.add_stages(s1) return p
def test_amgr_synchronizer(): amgr = Amgr(hostname=host, port=port, username=username, password=password) amgr._setup_mqs() p = Pipeline() s = Stage() # Create and add 10 tasks to the stage for cnt in range(10): t = Task() t.executable = 'some-executable-%s' % cnt s.add_tasks(t) p.add_stages(s) p._validate() amgr.workflow = [p] sid = 'test.0016' rmgr = BaseRmgr({}, sid, None, {}) tmgr = BaseTmgr(sid=sid, pending_queue=['pending-1'], completed_queue=['completed-1'], rmgr=rmgr, rmq_conn_params=amgr._rmq_conn_params, rts=None) amgr._rmgr = rmgr rmgr._task_manager = tmgr for t in p.stages[0].tasks: assert t.state == states.INITIAL assert p.stages[0].state == states.INITIAL assert p.state == states.INITIAL # Start the synchronizer method in a thread amgr._terminate_sync = mt.Event() sync_thread = mt.Thread(target=amgr._synchronizer, name='synchronizer-thread') sync_thread.start() # Start the synchronizer method in a thread proc = mp.Process(target=func_for_synchronizer_test, name='temp-proc', args=(amgr._sid, p, tmgr)) proc.start() proc.join() # Wait for AppManager to finish the message exchange # no need to set *)terminate_sync* but a timeout instead # amgr._terminate_sync.set() sync_thread.join(15) for t in p.stages[0].tasks: assert t.state == states.COMPLETED
def test_stage_exceptions(self, mocked_generate_id, l, i, b, se): """ ***Purpose***: Test if correct exceptions are raised when attributes are assigned unacceptable values. """ s = Stage() data_type = [l, i, b, se] for data in data_type: if not isinstance(data, str): with self.assertRaises(TypeError): s.name = data # if isinstance(data,str): # with self.assertRaises(ValueError): # s.name = data with self.assertRaises(TypeError): s.tasks = data with self.assertRaises(TypeError): s.add_tasks(data)
def test_wfp_check_processor(): p = Pipeline() s = Stage() t = Task() t.executable = '/bin/date' s.add_tasks(t) p.add_stages(s) amgr = Amgr(hostname=hostname, port=port) amgr._setup_mqs() wfp = WFprocessor(sid=amgr._sid, workflow=[p], pending_queue=amgr._pending_queue, completed_queue=amgr._completed_queue, rmq_conn_params=amgr._rmq_conn_params, resubmit_failed=False) wfp.start_processor() assert wfp.check_processor() wfp.terminate_processor() assert not wfp.check_processor()
def generate_aggregating_stage(self) -> Stage: stage = Stage() stage.name = "aggregating" cfg = self.cfg.aggregation_stage # Aggregation task task = Task() task.cpu_reqs = cfg.cpu_reqs.dict() task.pre_exec = cfg.pre_exec task.executable = cfg.executable task.arguments = cfg.arguments # Update base parameters cfg.run_config.experiment_directory = self.cfg.experiment_directory cfg.run_config.output_path = self.aggregated_data_path( self.cur_iteration) cfg_path = self.experiment_dirs["aggregation_runs"].joinpath( f"aggregation_{self.cur_iteration:03d}.yaml") cfg.run_config.dump_yaml(cfg_path) task.arguments += ["-c", cfg_path] stage.add_tasks(task) return stage
def test_wfp_start_processor(): p = Pipeline() s = Stage() t = Task() t.executable = ['/bin/date'] s.add_tasks(t) p.add_stages(s) amgr = Amgr(hostname=hostname, port=port) amgr._setup_mqs() wfp = WFprocessor(sid=amgr._sid, workflow=[p], pending_queue=amgr._pending_queue, completed_queue=amgr._completed_queue, mq_hostname=amgr._mq_hostname, port=amgr._port, resubmit_failed=False) assert wfp.start_processor() assert not wfp._enqueue_thread assert not wfp._dequeue_thread assert not wfp._enqueue_thread_terminate.is_set() assert not wfp._dequeue_thread_terminate.is_set() assert not wfp._wfp_terminate.is_set() assert wfp._wfp_process.is_alive() wfp._wfp_terminate.set() wfp._wfp_process.join()
def generate_agent_stage(self) -> Stage: stage = Stage() stage.name = self.AGENT_STAGE_NAME cfg = self.cfg.agent_stage stage_api = self.api.agent_stage task_idx = 0 output_path = stage_api.task_dir(self.stage_idx, task_idx, mkdir=True) assert output_path is not None # Update base parameters cfg.task_config.experiment_directory = self.cfg.experiment_directory cfg.task_config.stage_idx = self.stage_idx cfg.task_config.task_idx = task_idx cfg.task_config.node_local_path = self.cfg.node_local_path cfg.task_config.output_path = output_path # Write yaml configuration cfg_path = stage_api.config_path(self.stage_idx, task_idx) cfg.task_config.dump_yaml(cfg_path) task = generate_task(cfg) task.arguments += ["-c", cfg_path.as_posix()] stage.add_tasks(task) return stage
def test_wfp_check_processor(): p = Pipeline() s = Stage() t = Task() t.executable = ['/bin/date'] s.add_tasks(t) p.add_stages(s) amgr = Amgr(hostname=hostname, port=port) amgr._setup_mqs() wfp = WFprocessor(sid=amgr._sid, workflow=[p], pending_queue=amgr._pending_queue, completed_queue=amgr._completed_queue, mq_hostname=amgr._mq_hostname, port=amgr._port, resubmit_failed=False) wfp.start_processor() assert wfp.check_processor() wfp.terminate_processor() assert not wfp.check_processor()
def generate_aggregating_task(self): """ Function to concatenate the MD trajectory (h5 contact map) """ p = Pipeline() p.name = 'aggragating' s2 = Stage() s2.name = 'aggregating' # Aggregation task t2 = Task() # https://github.com/radical-collaboration/hyperspace/blob/MD/microscope/experiments/MD_to_CVAE/MD_to_CVAE.py t2.pre_exec = [] t2.pre_exec += ['. /sw/summit/python/2.7/anaconda2/5.3.0/etc/profile.d/conda.sh'] t2.pre_exec += ['conda activate %s' % conda_path] t2.pre_exec += ['cd %s' % agg_path] t2.executable = ['%s/bin/python' % conda_path] # MD_to_CVAE.py t2.arguments = [ '%s/MD_to_CVAE.py' % agg_path, '--sim_path', md_path, '--train_frames', 100000] # assign hardware the task t2.cpu_reqs = { 'processes': 1, 'process_type': None, 'threads_per_process': 4, 'thread_type': 'OpenMP' } # Add the aggregation task to the aggreagating stage s2.add_tasks(t2) p.add_stages(s2) return p
def func_on_true(): global CUR_NEW_STAGE, CUR_TASKS, CUR_CORES, duration CUR_NEW_STAGE += 1 s = Stage() for i in range(CUR_TASKS): t = Task() t.pre_exec = [ 'export PATH=/u/sciteam/balasubr/modules/stress-ng-0.09.34:$PATH' ] t.executable = ['stress-ng'] t.arguments = ['-c', str(CUR_CORES), '-t', str(duration)] t.cpu_reqs = { 'processes': 1, 'process_type': '', 'threads_per_process': CUR_CORES, 'thread_type': '' } # Add the Task to the Stage s.add_tasks(t) # Add post-exec to the Stage s.post_exec = { 'condition': func_condition, 'on_true': func_on_true, 'on_false': func_on_false } p.add_stages(s)
def generate_aggregating_stage(): """ Function to concatenate the MD trajectory (h5 contact map) """ s2 = Stage() s2.name = 'aggregating' # Aggregation task t2 = Task() # https://github.com/radical-collaboration/hyperspace/blob/MD/microscope/experiments/MD_to_CVAE/MD_to_CVAE.py t2.pre_exec = [] #t2.pre_exec += ['. /sw/summit/python/2.7/anaconda2/5.3.0/etc/profile.d/conda.sh'] #t2.pre_exec += ['conda activate %s' % conda_path] t2.pre_exec += ['module unload python'] t2.pre_exec += ['module load ibm-wml-ce'] t2.pre_exec += ['cd %s' % agg_path] #t2.executable = ['%s/bin/python' % conda_path] # MD_to_CVAE.py t2.executable = [ '/sw/summit/ibm-wml-ce/anaconda-base/envs/ibm-wml-ce-1.7.0-2/bin/python' ] t2.arguments = ['%s/MD_to_CVAE.py' % agg_path, '--sim_path', md_path] # Add the aggregation task to the aggreagating stage s2.add_tasks(t2) return s2
def test_stage_post_exec(): global p1 p1.name = 'p1' s = Stage() s.name = 's1' for t in range(NUM_TASKS): s.add_tasks(create_single_task()) s.post_exec = { 'condition': condition, 'on_true': on_true, 'on_false': on_false } p1.add_stages(s) res_dict = { 'resource': 'local.localhost', 'walltime': 30, 'cpus': 1, } os.environ['RADICAL_PILOT_DBURL'] = MLAB appman = AppManager(rts='radical.pilot', hostname=hostname, port=port) appman.resource_desc = res_dict appman.workflow = [p1] appman.run()
def generate_outlier_detection_stage(self) -> Stage: stage = Stage() stage.name = "outlier_detection" cfg = self.cfg.od_stage task = Task() task.cpu_reqs = cfg.cpu_reqs.dict() task.gpu_reqs = cfg.gpu_reqs.dict() task.pre_exec = cfg.pre_exec task.executable = cfg.executable task.arguments = cfg.arguments self.outlier_pdbs_path(self.cur_iteration).mkdir() # Update base parameters cfg.run_config.experiment_directory = self.cfg.experiment_directory cfg.run_config.input_path = self.aggregated_data_path( self.cur_iteration) cfg.run_config.output_path = self.outlier_pdbs_path(self.cur_iteration) cfg.run_config.weights_path = self.latest_ml_checkpoint_path( self.cur_iteration) cfg.run_config.restart_points_path = self.restart_points_path( self.cur_iteration) cfg_path = self.experiment_dirs["od_runs"].joinpath( f"od_{self.cur_iteration:03d}.yaml") cfg.run_config.dump_yaml(cfg_path) task.arguments += ["-c", cfg_path] stage.add_tasks(task) return stage
def test_stage_task_addition(): s = Stage() t1 = Task() t1.executable = ['/bin/date'] t2 = Task() t2.executable = ['/bin/date'] s.add_tasks(set([t1, t2])) assert type(s.tasks) == set assert s._task_count == 2 assert t1 in s.tasks assert t2 in s.tasks s = Stage() t1 = Task() t1.executable = ['/bin/date'] t2 = Task() t2.executable = ['/bin/date'] s.add_tasks([t1, t2]) assert type(s.tasks) == set assert s._task_count == 2 assert t1 in s.tasks assert t2 in s.tasks
def generate_aggregating_stage(): """ Function to concatenate the MD trajectory (h5 contact map) """ s2 = Stage() s2.name = 'aggregating' # Aggregation task t2 = Task() # https://github.com/radical-collaboration/hyperspace/blob/MD/microscope/experiments/MD_to_CVAE/MD_to_CVAE.py t2.pre_exec = [] t2.pre_exec += [ '. /sw/summit/python/2.7/anaconda2/5.3.0/etc/profile.d/conda.sh' ] t2.pre_exec += ['conda activate rp.copy'] t2.pre_exec += [ 'cd /gpfs/alpine/bip179/scratch/hrlee/hyperspace/microscope/experiments/MD_to_CVAE' ] t2.executable = ['/ccs/home/hrlee/.conda/envs/rp.copy/bin/python' ] # MD_to_CVAE.py t2.arguments = [ '/gpfs/alpine/bip179/scratch/hrlee/hyperspace/microscope/experiments/MD_to_CVAE/MD_to_CVAE.py', '-f', '/gpfs/alpine/bip179/scratch/hrlee/hyperspace/microscope/experiments/MD_exps/fs-pep' ] # Add the aggregation task to the aggreagating stage s2.add_tasks(t2) return s2
def generate_pipeline(): # Create a Pipeline object p = Pipeline() p.name = 'p1' # Create a Stage object s1 = Stage() s1.name = 's1' s1_task_uids = [] for cnt in range(128): # Create a Task object t1 = Task() t1.name = 't%s' % (cnt + 1) # to make a python script executable: # 1) add to first line "shebang": #!/usr/bin/env python # 2) chmod +x SerialCode.py # The executable always has to be in the Target Machine t1.executable = '~/SerialCode.py' # Add the Task to the Stage s1.add_tasks(t1) s1_task_uids.append(t1.name) # Add Stage to the Pipeline p.add_stages(s1) return p
def test_wfp_initialization(s, b, l): p = Pipeline() stage = Stage() t = Task() t.executable = '/bin/date' stage.add_tasks(t) p.add_stages(stage) rmq_conn_params = pika.ConnectionParameters(host=hostname, port=port) wfp = WFprocessor(sid='rp.session.local.0000', workflow=set([p]), pending_queue=['pending'], completed_queue=['completed'], rmq_conn_params=rmq_conn_params, resubmit_failed=True) assert len(wfp._uid.split('.')) == 2 assert 'wfprocessor' == wfp._uid.split('.')[0] assert wfp._pending_queue == ['pending'] assert wfp._completed_queue == ['completed'] assert wfp._rmq_conn_params == rmq_conn_params assert wfp._wfp_process is None assert wfp._workflow == set([p]) if not isinstance(s, str): wfp = WFprocessor(sid=s, workflow=set([p]), pending_queue=l, completed_queue=l, rmq_conn_params=rmq_conn_params, resubmit_failed=b)
def generate_pipeline(name, stages): # Create a Pipeline object p = Pipeline() p.name = name for s_cnt in range(stages): # Create a Stage object s = Stage() s.name = 'Stage %s' % s_cnt for t_cnt in range(5): # Create a Task object t = Task() t.name = 'my-task' # Assign a name to the task (optional) t.executable = '/bin/echo' # Assign executable to the task # Assign arguments for the task executable t.arguments = ['I am task %s in %s in %s' % (t_cnt, s_cnt, name)] # Add the Task to the Stage s.add_tasks(t) # Add Stage to the Pipeline p.add_stages(s) return p
def test_stage_task_addition(): s = Stage() t1 = Task() t1.executable = '/bin/date' t2 = Task() t2.executable = '/bin/date' s.add_tasks(set([t1, t2])) assert type(s.tasks) == set assert s._task_count == 2 assert t1 in s.tasks assert t2 in s.tasks s = Stage() t1 = Task() t1.executable = '/bin/date' t2 = Task() t2.executable = '/bin/date' s.add_tasks([t1, t2]) assert type(s.tasks) == set assert s._task_count == 2 assert t1 in s.tasks assert t2 in s.tasks
def generate_pipeline(nid): p = Pipeline() s1 = Stage() s2 = Stage() t1 = Task() p.name = 'p%s' % nid s1.name = 's1' s2.name = 's2' t1.name = 't1' t1.executable = '/bin/echo' t1.arguments = ['hello'] s1.add_tasks(t1) p.add_stages(s1) for cnt in range(10): tn = Task() tn.name = 't%s' % (cnt + 1) tn.executable = '/bin/echo' tn.arguments = ['world'] # Copy data from the task in first stage to the current task's location tn.copy_input_data = ['$Pipeline_%s_Stage_%s_Task_%s/output.txt' % (p.name, s1.name, t1.name)] s2.add_tasks(tn) p.add_stages(s2) return p
def create_inversion_dict_stage(cmt_file_db, param_path, task_counter): """Creates stage for the creation of the inversion files. This stage is tiny, but required before the actual inversion. :param cmt_file_db: :param param_path: :param task_counter: :return: """ # Get database parameter path databaseparam_path = os.path.join(param_path, "Database/DatabaseParameters.yml") # Load Parameters DB_params = read_yaml_file(databaseparam_path) # Earthquake specific database parameters: Dir and Cid Cdir, Cid = get_Centry_path(DB_params["databasedir"], cmt_file_db) # Function inv_dict_func = os.path.join(bin_path, "write_inversion_dicts.py") # Create Process Paths Stage (CPP) # Create a Stage object inv_dict_stage = Stage() inv_dict_stage.name = "Creating" # Create Task inv_dict_task = Task() # This way the task gets the name of the path file inv_dict_task.name = "Inversion-Dictionaries" inv_dict_task.pre_exec = [ # Conda activate DB_params["conda-activate"] ] inv_dict_task.executable = [DB_params["bin-python"]] # Assign exec # to the task inv_dict_task.arguments = [ inv_dict_func, "-f", cmt_file_db, "-p", param_path ] # In the future maybe to database dir as a total log? inv_dict_task.stdout = os.path.join( "%s" % Cdir, "logs", "stdout.pipeline_%s.task_%s.%s" % (Cid, str(task_counter).zfill(4), inv_dict_task.name)) inv_dict_task.stderr = os.path.join( "%s" % Cdir, "logs", "stderr.pipeline_%s.task_%s.%s" % (Cid, str(task_counter).zfill(4), inv_dict_task.name)) inv_dict_stage.add_tasks(inv_dict_task) task_counter += 1 return inv_dict_stage, task_counter
def generate_stage(self): s = Stage() s.name = self.name s.add_tasks( {self.generate_task(**x) for x in self._ensemble_product()}) return s
def main(): cmd = "{0} 'ls {1}'".format(ssh, dir_) p = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE) out, _ = p.communicate() out = out.decode('utf-8').strip().split(linesep) fullpaths = [op.join(dir_, p) for p in out] print(fullpaths) # Start radical entk pipeline p = Pipeline() for i in range(iterations): s = Stage() for fp in fullpaths: t = Task() t.name = 'Incrementation {}'.format(i) t.pre_exec = [ 'source /home/vhayot/miniconda3/etc/profile.d/conda.sh', 'conda activate radenv' ] t.executable = 'python /home/vhayot/inc.py' if i == 0: t.arguments = [fp, out_dir, i] else: # Note: assuming all data is accessible through shared dir # radical entk functions without sharedfs, however t.arguments = [ op.join(out_dir, "it-{0}-{1}".format(i - 1, op.basename(fp))), out_dir, i ] s.add_tasks(t) # Create a new stage everytime there's a dependency p.add_stages(s) appman = AppManager(hostname=hostname, port=port) appman.resource_desc = { 'resource': 'xsede.bridges', 'walltime': 20, 'cpus': 5, 'project': 'mc3bggp', 'schema': 'gsissh' } appman.workflow = set([p]) appman.run()
def test_wfp_workflow_incomplete(): p = Pipeline() s = Stage() t = Task() t.executable = ['/bin/date'] s.add_tasks(t) p.add_stages(s) amgr = Amgr(hostname=hostname, port=port) amgr._setup_mqs() wfp = WFprocessor(sid=amgr._sid, workflow=[p], pending_queue=amgr._pending_queue, completed_queue=amgr._completed_queue, mq_hostname=amgr._mq_hostname, port=amgr._port, resubmit_failed=False) wfp._initialize_workflow() assert wfp.workflow_incomplete() amgr.workflow = [p] profiler = ru.Profiler(name='radical.entk.temp') p.stages[0].state == states.SCHEDULING p.state == states.SCHEDULED for t in p.stages[0].tasks: t.state = states.COMPLETED import json import pika task_as_dict = json.dumps(t.to_dict()) mq_connection = pika.BlockingConnection( pika.ConnectionParameters(host=amgr._mq_hostname, port=amgr._port)) mq_channel = mq_connection.channel() mq_channel.basic_publish(exchange='', routing_key='%s-completedq-1' % amgr._sid, body=task_as_dict) amgr._terminate_sync = Event() sync_thread = Thread(target=amgr._synchronizer, name='synchronizer-thread') sync_thread.start() proc = Process(target=func_for_dequeue_test, name='temp-proc', args=(wfp, )) proc.start() proc.join() amgr._terminate_sync.set() sync_thread.join() assert not wfp.workflow_incomplete()
def test_state_order(): """ **Purpose**: Test if the Pipeline, Stage and Task are assigned their states in the correct order """ def create_single_task(): t1 = Task() t1.name = 'simulation' t1.executable = ['/bin/date'] t1.copy_input_data = [] t1.copy_output_data = [] return t1 p1 = Pipeline() p1.name = 'p1' s = Stage() s.name = 's1' s.tasks = create_single_task() s.add_tasks(create_single_task()) p1.add_stages(s) res_dict = { 'resource': 'local.localhost', 'walltime': 5, 'cpus': 1, 'project': '' } os.environ['RADICAL_PILOT_DBURL'] = MLAB os.environ['RP_ENABLE_OLD_DEFINES'] = 'True' appman = Amgr(hostname=hostname, port=port) appman.resource_desc = res_dict appman.workflow = [p1] appman.run() p_state_hist = p1.state_history assert p_state_hist == ['DESCRIBED', 'SCHEDULING', 'DONE'] s_state_hist = p1.stages[0].state_history assert s_state_hist == ['DESCRIBED', 'SCHEDULING', 'SCHEDULED', 'DONE'] tasks = p1.stages[0].tasks for t in tasks: t_state_hist = t.state_history assert t_state_hist == ['DESCRIBED', 'SCHEDULING', 'SCHEDULED', 'SUBMITTING', 'SUBMITTED', 'EXECUTED', 'DEQUEUEING', 'DEQUEUED', 'DONE']
def get_pipeline(shared_fs=False, size=1): p = Pipeline() p.name = 'p' n = 4 s1 = Stage() s1.name = 's1' for x in range(n): t = Task() t.name = 't%s'%x # dd if=/dev/random bs=<byte size of a chunk> count=<number of chunks> of=<output file name> t.executable = 'dd' if not shared_fs: t.arguments = ['if=/dev/urandom','bs=%sM'%size, 'count=1', 'of=$NODE_LFS_PATH/s1_t%s.txt'%x] else: t.arguments = ['if=/dev/urandom','bs=%sM'%size, 'count=1', 'of=/home/vivek91/s1_t%s.txt'%x] t.cpu_reqs['processes'] = 1 t.cpu_reqs['threads_per_process'] = 24 t.cpu_reqs['thread_type'] = '' t.cpu_reqs['process_type'] = '' t.lfs_per_process = 1024 s1.add_tasks(t) p.add_stages(s1) s2 = Stage() s2.name = 's2' for x in range(n): t = Task() t.executable = ['dd'] if not shared_fs: t.arguments = ['if=$NODE_LFS_PATH/s1_t%s.txt'%x,'bs=%sM'%size, 'count=1', 'of=$NODE_LFS_PATH/s2_t%s.txt'%x] else: t.arguments = ['if=/home/vivek91/s1_t%s.txt'%x,'bs=%sM'%size, 'count=1', 'of=/home/vivek91/s2_t%s.txt'%x] t.cpu_reqs['processes'] = 1 t.cpu_reqs['threads_per_process'] = 24 t.cpu_reqs['thread_type'] = '' t.cpu_reqs['process_type'] = '' t.tag = 't%s'%x s2.add_tasks(t) p.add_stages(s2) return p
def test_wfp_workflow_incomplete(): p = Pipeline() s = Stage() t = Task() t.executable = ['/bin/date'] s.add_tasks(t) p.add_stages(s) amgr = Amgr(hostname=hostname, port=port) amgr._setup_mqs() wfp = WFprocessor(sid=amgr._sid, workflow=[p], pending_queue=amgr._pending_queue, completed_queue=amgr._completed_queue, mq_hostname=amgr._mq_hostname, port=amgr._port, resubmit_failed=False) wfp._initialize_workflow() assert wfp.workflow_incomplete() amgr.workflow = [p] profiler = ru.Profiler(name='radical.entk.temp') p.stages[0].state == states.SCHEDULING p.state == states.SCHEDULED for t in p.stages[0].tasks: t.state = states.COMPLETED import json import pika task_as_dict = json.dumps(t.to_dict()) mq_connection = pika.BlockingConnection(pika.ConnectionParameters(host=amgr._mq_hostname, port=amgr._port)) mq_channel = mq_connection.channel() mq_channel.basic_publish(exchange='', routing_key='%s-completedq-1' % amgr._sid, body=task_as_dict) amgr._terminate_sync = Event() sync_thread = Thread(target=amgr._synchronizer, name='synchronizer-thread') sync_thread.start() proc = Process(target=func_for_dequeue_test, name='temp-proc', args=(wfp,)) proc.start() proc.join() amgr._terminate_sync.set() sync_thread.join() assert not wfp.workflow_incomplete()
def test_stage_check_complete(): s = Stage() t1 = Task() t1.executable = ['/bin/date'] t2 = Task() t2.executable = ['/bin/date'] s.add_tasks([t1, t2]) assert s._check_stage_complete() == False s._set_tasks_state(states.DONE) assert s._check_stage_complete() == True
def test_amgr_synchronizer(): logger = ru.Logger('radical.entk.temp_logger') profiler = ru.Profiler(name='radical.entk.temp') amgr = Amgr(hostname=hostname, port=port) amgr._setup_mqs() p = Pipeline() s = Stage() # Create and add 100 tasks to the stage for cnt in range(100): t = Task() t.executable = ['some-executable-%s' % cnt] s.add_tasks(t) p.add_stages(s) p._assign_uid(amgr._sid) p._validate() amgr.workflow = [p] for t in p.stages[0].tasks: assert t.state == states.INITIAL assert p.stages[0].state == states.INITIAL assert p.state == states.INITIAL # Start the synchronizer method in a thread amgr._terminate_sync = Event() sync_thread = Thread(target=amgr._synchronizer, name='synchronizer-thread') sync_thread.start() # Start the synchronizer method in a thread proc = Process(target=func_for_synchronizer_test, name='temp-proc', args=(amgr._sid, p, logger, profiler)) proc.start() proc.join() for t in p.stages[0].tasks: assert t.state == states.SCHEDULING assert p.stages[0].state == states.SCHEDULING assert p.state == states.SCHEDULING amgr._terminate_sync.set() sync_thread.join()
def generate_pipeline(): def func_condition(): global CUR_NEW_STAGE, MAX_NEW_STAGE if CUR_NEW_STAGE <= MAX_NEW_STAGE: return True return False def func_on_true(): global CUR_NEW_STAGE CUR_NEW_STAGE += 1 shuffle(p.stages[CUR_NEW_STAGE:]) def func_on_false(): print 'Done' # Create a Pipeline object p = Pipeline() for s in range(MAX_NEW_STAGE+1): # Create a Stage object s1 = Stage() for i in range(CUR_TASKS): t1 = Task() t1.executable = '/bin/sleep' t1.arguments = [ '30'] # Add the Task to the Stage s1.add_tasks(t1) # Add post-exec to the Stage s1.post_exec = { condition': func_condition, on_true': func_on_true, on_false': func_on_false } # Add Stage to the Pipeline p.add_stages(s1) return p
def test_stage_set_tasks_state(): s = Stage() t1 = Task() t1.executable = ['/bin/date'] t2 = Task() t2.executable = ['/bin/date'] s.add_tasks([t1, t2]) with pytest.raises(ValueError): s._set_tasks_state(2) s._set_tasks_state(states.DONE) assert t1.state == states.DONE assert t2.state == states.DONE
def on_true(): global NUM_TASKS, CUR_STAGE NUM_TASKS *= 2 s = Stage() s.name = 's%s'%CUR_STAGE for t in range(NUM_TASKS): s.add_tasks(create_single_task()) s.post_exec = condition p1.add_stages(s)
def create_pipeline(): p = Pipeline() s = Stage() t1 = Task() t1.name = 'simulation' t1.executable = ['sleep'] t1.arguments = ['10'] s.add_tasks(t1) p.add_stages(s) return p
def test_wfp_enqueue(): p = Pipeline() s = Stage() t = Task() t.executable = ['/bin/date'] s.add_tasks(t) p.add_stages(s) amgr = Amgr(hostname=hostname, port=port) amgr._setup_mqs() wfp = WFprocessor(sid=amgr._sid, workflow=[p], pending_queue=amgr._pending_queue, completed_queue=amgr._completed_queue, mq_hostname=amgr._mq_hostname, port=amgr._port, resubmit_failed=False) wfp._initialize_workflow() amgr.workflow = [p] profiler = ru.Profiler(name='radical.entk.temp') for t in p.stages[0].tasks: assert t.state == states.INITIAL assert p.stages[0].state == states.INITIAL assert p.state == states.INITIAL amgr._terminate_sync = Event() sync_thread = Thread(target=amgr._synchronizer, name='synchronizer-thread') sync_thread.start() proc = Process(target=func_for_enqueue_test, name='temp-proc', args=(wfp,)) proc.start() proc.join() amgr._terminate_sync.set() sync_thread.join() for t in p.stages[0].tasks: assert t.state == states.SCHEDULED assert p.stages[0].state == states.SCHEDULED assert p.state == states.SCHEDULING
def create_pipeline(): p = Pipeline() s = Stage() t1 = Task() t1.name = 'simulation' t1.executable = ['/bin/echo'] t1.arguments = ['hello'] t1.copy_input_data = [] t1.copy_output_data = [] s.add_tasks(t1) p.add_stages(s) return p
def generate_pipeline(): def func_condition(): p.suspend() print 'Suspending pipeline %s for 10 seconds' %p.uid sleep(10) return True def func_on_true(): print 'Resuming pipeline %s' %p.uid p.resume() def func_on_false(): pass # Create a Pipeline object p = Pipeline() # Create a Stage object s1 = Stage() for i in range(10): t1 = Task() t1.executable = '/bin/sleep' t1.arguments = ['30'] # Add the Task to the Stage s1.add_tasks(t1) # Add post-exec to the Stage s1.post_exec = { 'condition': func_condition, 'on_true': func_on_true, 'on_false': func_on_false } # Add Stage to the Pipeline p.add_stages(s1) return p
def test_integration_local(): """ **Purpose**: Run an EnTK application on localhost """ def create_single_task(): t1 = Task() t1.name = 'simulation' t1.executable = ['/bin/echo'] t1.arguments = ['hello'] t1.copy_input_data = [] t1.copy_output_data = [] return t1 p1 = Pipeline() p1.name = 'p1' s = Stage() s.name = 's1' s.tasks = create_single_task() s.add_tasks(create_single_task()) p1.add_stages(s) res_dict = { 'resource': 'local.localhost', 'walltime': 5, 'cpus': 1, 'project': '' } os.environ['RADICAL_PILOT_DBURL'] = MLAB appman = AppManager(hostname=hostname, port=port) appman.resource_desc = res_dict appman.workflow = [p1] appman.run()
def generate_pipeline(): # Create a Pipeline object p = Pipeline() # Create a Stage object s1 = Stage() # Create a Task object which creates a file named 'output.txt' of size 1 MB t1 = Task() t1.executable = ['/bin/sleep'] t1.arguments = ['300'] # Add the Task to the Stage s1.add_tasks(t1) # Add Stage to the Pipeline p.add_stages(s1) return p
def generate_pipeline(): # Create a Pipeline object p = Pipeline() # Create a Stage object s1 = Stage() # Create a Task object which creates a file named 'output.txt' of size 1 MB t1 = Task() t1.executable = ['mv'] t1.arguments = ['temp','/tmp/'] t1.upload_input_data = ['%s/temp'%cur_dir] # Add the Task to the Stage s1.add_tasks(t1) # Add Stage to the Pipeline p.add_stages(s1) return p
def test_wfp_initialize_workflow(): p = Pipeline() s = Stage() t = Task() t.executable = ['/bin/date'] s.add_tasks(t) p.add_stages(s) wfp = WFprocessor(sid='test', workflow=[p], pending_queue=list(), completed_queue=list(), mq_hostname=hostname, port=port, resubmit_failed=False) wfp._initialize_workflow() assert p.uid is not None assert p.stages[0].uid is not None for t in p.stages[0].tasks: assert t.uid is not None
def generate_pipeline(): # Create a Pipeline object p = Pipeline() # Create a Stage object s1 = Stage() # Create a Task object which creates a file named 'output.txt' of size 1 MB for x in range(10): t1 = Task() t1.executable = 'cat' t1.arguments = ['file1.txt','file2.txt','>','output.txt'] t1.copy_input_data = ['$SHARED/file1.txt', '$SHARED/file2.txt'] t1.download_output_data = ['output.txt > %s/output_%s.txt' %(cur_dir,x+1)] # Add the Task to the Stage s1.add_tasks(t1) # Add Stage to the Pipeline p.add_stages(s1) return p
def generate_pipeline(): # Create a Pipeline object p = Pipeline() p.name = 'p1' # Create a Stage object s1 = Stage() s1.name = 's1' # Create 4K tasks to ensure we don't hit any RMQ connection drops for _ in range(4096): t1 = Task() t1.executable = ['/bin/echo'] t1.arguments = ['"Hello World"'] # Add the Task to the Stage s1.add_tasks(t1) # Add Stage to the Pipeline p.add_stages(s1) return p
def generate_pipeline(): # Create a Pipeline object p = Pipeline() p.name = 'p1' # Create a Stage object s1 = Stage() s1.name = 's1' # Create a Task object which creates a file named 'output.txt' of size 1 MB t1 = Task() t1.name = 't1' t1.executable = ['/bin/false'] # t1.arguments = ['"Hello World"','>>','temp.txt'] # Add the Task to the Stage s1.add_tasks(t1) # Add Stage to the Pipeline p.add_stages(s1) return p
def test_stage_exceptions(t, l, i, b, se): """ ***Purpose***: Test if correct exceptions are raised when attributes are assigned unacceptable values. """ s = Stage() data_type = [t, l, i, b, se] for data in data_type: print 'Using: %s, %s' % (data, type(data)) if not isinstance(data, str): with pytest.raises(TypeError): s.name = data with pytest.raises(TypeError): s.tasks = data with pytest.raises(TypeError): s.add_tasks(data)
def test_stage_pass_uid(): s = Stage() s._uid = 's' s.name = 's1' s.parent_pipeline['uid'] = 'p' s.parent_pipeline['name'] = 'p1' t1 = Task() t2 = Task() s.add_tasks([t1,t2]) s._pass_uid() assert t1.parent_stage['uid'] == s.uid assert t1.parent_stage['name'] == s.name assert t1.parent_pipeline['uid'] == s.parent_pipeline['uid'] assert t1.parent_pipeline['name'] == s.parent_pipeline['name'] assert t2.parent_stage['uid'] == s.uid assert t2.parent_stage['name'] == s.name assert t2.parent_pipeline['uid'] == s.parent_pipeline['uid'] assert t2.parent_pipeline['name'] == s.parent_pipeline['name']
def func_on_true(): global CUR_NEW_STAGE CUR_NEW_STAGE += 1 s = Stage() for i in range(10): t = Task() t.executable = '/bin/sleep' t.arguments = [ '30'] s.add_tasks(t) # Add post-exec to the Stage s.post_exec = { 'condition': func_condition, 'on_true': func_on_true, 'on_false': func_on_false } p.add_stages(s)
def generate_pipeline(): # Create a Pipeline object p = Pipeline() # Create a Stage object s1 = Stage() # Create a Task object which creates a file named 'output.txt' of size 1 MB t1 = Task() t1.executable = '/bin/bash' t1.arguments = ['-l', '-c', 'base64 /dev/urandom | head -c 1000000 > output.txt'] # Add the Task to the Stage s1.add_tasks(t1) # Add Stage to the Pipeline p.add_stages(s1) # Create another Stage object to hold character count tasks s2 = Stage() # Create a Task object t2 = Task() t2.executable = '/bin/bash' t2.arguments = ['-l', '-c', 'grep -o . output.txt | sort | uniq -c > ccount.txt'] # Copy data from the task in the first stage to the current task's location t2.copy_input_data = ['$Pipline_%s_Stage_%s_Task_%s/output.txt' % (p.uid, s1.uid, t1.uid)] # Add the Task to the Stage s2.add_tasks(t2) # Add Stage to the Pipeline p.add_stages(s2) # Create another Stage object to hold checksum tasks s3 = Stage() # Create a Task object t3 = Task() t3.executable = '/bin/bash' t3.arguments = ['-l', '-c', 'sha1sum ccount.txt > chksum.txt'] # Copy data from the task in the first stage to the current task's location t3.copy_input_data = ['$Pipline_%s_Stage_%s_Task_%s/ccount.txt' % (p.uid, s2.uid, t2.uid)] # Download the output of the current task to the current location t3.download_output_data = ['chksum.txt > chksum_%s.txt' % cnt] # Add the Task to the Stage s3.add_tasks(t3) # Add Stage to the Pipeline p.add_stages(s3) return p
def generate_pipeline(): def func_condition(): global CUR_NEW_STAGE, MAX_NEW_STAGE if CUR_NEW_STAGE <= MAX_NEW_STAGE: return True return False def func_on_true(): global CUR_NEW_STAGE CUR_NEW_STAGE += 1 s = Stage() for i in range(10): t = Task() t.executable = '/bin/sleep' t.arguments = [ '30'] s.add_tasks(t) # Add post-exec to the Stage s.post_exec = { 'condition': func_condition, 'on_true': func_on_true, 'on_false': func_on_false } p.add_stages(s) def func_on_false(): print 'Done' # Create a Pipeline object p = Pipeline() # Create a Stage object s1 = Stage() for i in range(10): t1 = Task() t1.executable = ['sleep'] t1.arguments = [ '30'] # Add the Task to the Stage s1.add_tasks(t1) # Add post-exec to the Stage s1.post_exec = { 'condition': func_condition, 'on_true': func_on_true, 'on_false': func_on_false } # Add Stage to the Pipeline p.add_stages(s1) return p
stage_uids = list() task_uids = dict() Stages = 3 Replicas = 4 for N_Stg in range(Stages): stg = Stage() ## initialization task_uids['Stage_%s'%N_Stg] = list() if N_Stg == 0: for n0 in range(Replicas): t = Task() t.executable = ['/usr/local/packages/gromacs/5.1.4/INTEL-140-MVAPICH2-2.0/bin/gmx_mpi_d'] #MD Engine t.upload_input_data = ['in.gro', 'in.top', 'FNF.itp', 'martini_v2.2.itp', 'in.mdp'] t.pre_exec = ['module load gromacs', '/usr/local/packages/gromacs/5.1.4/INTEL-140-MVAPICH2-2.0/bin/gmx_mpi_d grompp -f in.mdp -c in.gro -o in.tpr -p in.top'] t.arguments = ['mdrun', '-s', 'in.tpr', '-deffnm', 'out'] t.cores = 32 stg.add_tasks(t) task_uids['Stage_%s'%N_Stg].append(t.uid) p.add_stages(stg) stage_uids.append(stg.uid) else: for n0 in range(Replicas): t = Task() t.executable = ['/usr/local/packages/gromacs/5.1.4/INTEL-140-MVAPICH2-2.0/bin/gmx_mpi_d'] #MD Engine t.copy_input_data = ['$Pipeline_%s_Stage_%s_Task_%s/out.gro > in.gro'%(p.uid, stage_uids[N_Stg-1], task_uids['Stage_%s'%(N_Stg-1)][n0]), '$Pipeline_%s_Stage_%s_Task_%s/in.top'%(p.uid, stage_uids[N_Stg-1], task_uids['Stage_%s'%(N_Stg-1)][n0]), '$Pipeline_%s_Stage_%s_Task_%s/FNF.itp'%(p.uid, stage_uids[N_Stg-1], task_uids['Stage_%s'%(N_Stg-1)][n0]), '$Pipeline_%s_Stage_%s_Task_%s/martini_v2.2.itp'%(p.uid, stage_uids[N_Stg-1], task_uids['Stage_%s'%(N_Stg-1)][n0]), '$Pipeline_%s_Stage_%s_Task_%s/in.mdp'%(p.uid, stage_uids[N_Stg-1], task_uids['Stage_%s'%(N_Stg-1)][n0])] t.pre_exec = ['module load gromacs', '/usr/local/packages/gromacs/5.1.4/INTEL-140-MVAPICH2-2.0/bin/gmx_mpi_d grompp -f in.mdp -c in.gro -o in.tpr -p in.top'] t.arguments = ['mdrun', '-s', 'in.tpr', '-deffnm', 'out'] t.cores = 32
if __name__ == '__main__': # Create a Pipeline object p = Pipeline() # Create a Stage object s = Stage() # Create a Task object t = Task() t.name = 'my-first-task' # Assign a name to the task (optional, do not use ',' or '_') t.executable = '/bin/echo' # Assign executable to the task t.arguments = ['Hello World'] # Assign arguments for the task executable # Add Task to the Stage s.add_tasks(t) # Add Stage to the Pipeline p.add_stages(s) # Create Application Manager appman = AppManager(hostname=hostname, port=port) # Create a dictionary describe four mandatory keys: # resource, walltime, and cpus # resource is 'local.localhost' to execute locally res_dict = { 'resource': 'local.localhost', 'walltime': 10, 'cpus': 1