def post_stage(): if (not os.path.exists(f'{run_dir}/aggregator/stop.aggregator')): nstages = len(p.stages) s = Stage() s.name = f"{nstages}" t = Task() t.cpu_reqs = { 'processes': 1, 'process_type': None, 'threads_per_process': 4, 'thread_type': 'OpenMP' } t.gpu_reqs = { 'processes': 0, 'process_type': None, 'threads_per_process': 0, 'thread_type': None } t.name = f" {i}_{nstages} " t.executable = PYTHON t.arguments = [ f'{current_dir}/simulation.py', f'{run_dir}/simulations/all/{i}_{nstages}', ADIOS_XML ] subprocess.getstatusoutput( f'ln -s {run_dir}/simulations/all/{i}_{nstages} {run_dir}/simulations/new/{i}_{nstages}' ) s.add_tasks(t) s.post_exec = post_stage p.add_stages(s)
def generate_pipeline(name, stages): # Create a Pipeline object p = Pipeline() p.name = name for s_cnt in range(stages): # Create a Stage object s = Stage() s.name = 'Stage %s' % s_cnt for t_cnt in range(5): # Create a Task object t = Task() t.name = 'my-task' # Assign a name to the task (optional) t.executable = ['/bin/echo'] # Assign executable to the task # Assign arguments for the task executable t.arguments = ['I am task %s in %s in %s' % (t_cnt, s_cnt, name)] # Add the Task to the Stage s.add_tasks(t) # Add Stage to the Pipeline p.add_stages(s) return p
def generate_pipeline(name, stages): # Create a Pipeline object p = Pipeline() p.name = name for s_cnt in range(stages): # Create a Stage object s = Stage() s.name = 'Stage %s'%s_cnt for t_cnt in range(5): # Create a Task object t = Task() t.name = 'my-task' # Assign a name to the task (optional) t.executable = '/bin/echo' # Assign executable to the task # Assign arguments for the task executable t.arguments = ['I am task %s in %s in %s'%(t_cnt, s_cnt, name)] # Add the Task to the Stage s.add_tasks(t) # Add Stage to the Pipeline p.add_stages(s) return p
def add_ex_stg(rid, cycle): #ex stg here ex_tsk = Task() ex_stg = Stage() ex_tsk.name = 'extsk-{replica}-{cycle}'.format(replica=rid, cycle=cycle) for rid in range(len(waiting_replicas)): ex_tsk.link_input_data += ['%s/mdinfo-{replica}-{cycle}'.format(replica=rid, cycle=self.cycle)%replica_sandbox] ex_tsk.arguments = ['t_ex_gibbs.py', len(waiting_replicas)] #This needs to be fixed ex_tsk.executable = ['python'] ex_tsk.cpu_reqs = { 'processes': 1, 'process_type': '', 'threads_per_process': 1, 'thread_type': None } ex_tsk.pre_exec = ['export dummy_variable=19'] ex_stg.add_tasks(ex_tsk) ex_stg.post_exec = { 'condition': post_ex, 'on_true': terminate_replicas, 'on_false': continue_md } return ex_stg
def add_md_stg(rid,cycle): #md stg h md_tsk = Task() md_stg = Stage() md_tsk.name = 'mdtsk-{replica}-{cycle}'.format(replica=rid, cycle=cycle) md_tsk.link_input_data += ['%s/inpcrd' %replica_sandbox, '%s/prmtop' %replica_sandbox, '%s/mdin-{replica}-{cycle}'.format(replica=rid, cycle=0) %replica_sandbox] md_tsk.arguments = ['-O', '-i', 'mdin-{replica}-{cycle}'.format(replica=rid, cycle=0), '-p', 'prmtop', '-c', 'inpcrd', '-o', 'out', '-r', '%s/restrt-{replica}-{cycle}'.format(replica=rid, cycle=cycle) %replica_sandbox, '-x', 'mdcrd', '-inf', '%s/mdinfo-{replica}-{cycle}'.format(replica=rid, cycle=cycle) %replica_sandbox] md_tsk.executable = ['/home/scm177/mantel/AMBER/amber14/bin/sander'] md_tsk.cpu_reqs = { 'processes': replica_cores, 'process_type': '', 'threads_per_process': 1, 'thread_type': None } md_tsk.pre_exec = ['export dummy_variable=19', 'echo $SHARED'] md_stg.add_tasks(md_tsk) md_stg.post_exec = { 'condition': md_post, 'on_true': suspend, 'on_false': exchange_stg } return md_stg
def create_inversion_dict_stage(cmt_file_db, param_path, task_counter): """Creates stage for the creation of the inversion files. This stage is tiny, but required before the actual inversion. :param cmt_file_db: :param param_path: :param task_counter: :return: """ # Get database parameter path databaseparam_path = os.path.join(param_path, "Database/DatabaseParameters.yml") # Load Parameters DB_params = read_yaml_file(databaseparam_path) # Earthquake specific database parameters: Dir and Cid Cdir, Cid = get_Centry_path(DB_params["databasedir"], cmt_file_db) # Function inv_dict_func = os.path.join(bin_path, "write_inversion_dicts.py") # Create Process Paths Stage (CPP) # Create a Stage object inv_dict_stage = Stage() inv_dict_stage.name = "Creating" # Create Task inv_dict_task = Task() # This way the task gets the name of the path file inv_dict_task.name = "Inversion-Dictionaries" inv_dict_task.pre_exec = [ # Conda activate DB_params["conda-activate"] ] inv_dict_task.executable = [DB_params["bin-python"]] # Assign exec # to the task inv_dict_task.arguments = [ inv_dict_func, "-f", cmt_file_db, "-p", param_path ] # In the future maybe to database dir as a total log? inv_dict_task.stdout = os.path.join( "%s" % Cdir, "logs", "stdout.pipeline_%s.task_%s.%s" % (Cid, str(task_counter).zfill(4), inv_dict_task.name)) inv_dict_task.stderr = os.path.join( "%s" % Cdir, "logs", "stderr.pipeline_%s.task_%s.%s" % (Cid, str(task_counter).zfill(4), inv_dict_task.name)) inv_dict_stage.add_tasks(inv_dict_task) task_counter += 1 return inv_dict_stage, task_counter
def generate_ML_pipeline(): p = Pipeline() p.name = 'ML' s1 = Stage() s1.name = 'Generator-ML' # the generator/ML Pipeline will consist of 1 Stage, 2 Tasks Task 1 : # Generator; Task 2: ConvNet/Active Learning Model # NOTE: Generator and ML/AL are alive across the whole workflow execution. # For local testing, sleep time is longer than the total execution time of # the MD pipelines. t1 = Task() t1.name = "generator" t1.pre_exec = [ # 'module load python/2.7.15-anaconda2-5.3.0', # 'module load cuda/9.1.85', # 'module load gcc/6.4.0', # 'source activate snakes' ] # t1.executable = ['python'] # t1.arguments = ['/ccs/home/jdakka/tf.py'] t1.executable = ['sleep'] t1.arguments = ['5'] s1.add_tasks(t1) t2 = Task() t2.name = "ml-al" t2.pre_exec = [ # 'module load python/2.7.15-anaconda2-5.3.0', # 'module load cuda/9.1.85', # 'module load gcc/6.4.0', # 'source activate snakes' ] # t2.executable = ['python'] # t2.arguments = ['/ccs/home/jdakka/tf.py'] t2.executable = ['sleep'] t2.arguments = ['10'] s1.add_tasks(t2) # Add Stage to the Pipeline p.add_stages(s1) return p
def generate_pipeline(nid): # Create a Pipeline object p = Pipeline() p.name = 'p%s' % nid # Create a Stage object s1 = Stage() s1.name = 's1' # Create a Task object which creates a file named 'output.txt' of size 1 MB t1 = Task() t1.name = 't2' t1.executable = ['/bin/echo'] t1.arguments = ['hello'] # Add the Task to the Stage s1.add_tasks(t1) # Add Stage to the Pipeline p.add_stages(s1) # Create another Stage object to hold character count tasks s2 = Stage() s2.name = 's2' s2_task_uids = [] for cnt in range(10): # Create a Task object t2 = Task() t2.name = 't%s' % (cnt + 1) t2.executable = ['/bin/echo'] t2.arguments = ['world'] # Copy data from the task in the first stage to the current task's location t2.copy_input_data = [ '$Pipeline_%s_Stage_%s_Task_%s/output.txt' % (p.name, s1.name, t1.name)] # Add the Task to the Stage s2.add_tasks(t2) s2_task_uids.append(t2.name) # Add Stage to the Pipeline p.add_stages(s2) return p
def create_single_task(): t1 = Task() t1.name = 'simulation' t1.executable = '/bin/date' t1.copy_input_data = [] t1.copy_output_data = [] return t1
def main(): cmd = "{0} 'ls {1}'".format(ssh, dir_) p = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE) out, _ = p.communicate() out = out.decode('utf-8').strip().split(linesep) fullpaths = [op.join(dir_, p) for p in out] print(fullpaths) # Start radical entk pipeline p = Pipeline() for i in range(iterations): s = Stage() for fp in fullpaths: t = Task() t.name = 'Incrementation {}'.format(i) t.pre_exec = [ 'source /home/vhayot/miniconda3/etc/profile.d/conda.sh', 'conda activate radenv' ] t.executable = 'python /home/vhayot/inc.py' if i == 0: t.arguments = [fp, out_dir, i] else: # Note: assuming all data is accessible through shared dir # radical entk functions without sharedfs, however t.arguments = [ op.join(out_dir, "it-{0}-{1}".format(i - 1, op.basename(fp))), out_dir, i ] s.add_tasks(t) # Create a new stage everytime there's a dependency p.add_stages(s) appman = AppManager(hostname=hostname, port=port) appman.resource_desc = { 'resource': 'xsede.bridges', 'walltime': 20, 'cpus': 5, 'project': 'mc3bggp', 'schema': 'gsissh' } appman.workflow = set([p]) appman.run()
def constructTask(url): response = requests.get(url) response = response.json() t = Task() t.name = str(response['name']) t.executable = [str(response['executable'])] t.arguments = [str(response['arguments'])] return t
def create_single_task(): t1 = Task() t1.name = 'simulation' t1.executable = ['/bin/date'] t1.copy_input_data = [] t1.copy_output_data = [] return t1
def generate_pipeline(): # Create a Pipeline object p = Pipeline() p.name = 'p1' # Create a Stage object s1 = Stage() s1.name = 's1' # Create a Task object which creates a file named 'output.txt' of size 1 MB t1 = Task() t1.name = 't1' t1.executable = ['/bin/echo'] t1.arguments = ['"Hello World"'] t1.stdout = 'temp.txt' # Add the Task to the Stage s1.add_tasks(t1) # Add Stage to the Pipeline p.add_stages(s1) # Create a Stage object s2 = Stage() s2.name = 's2' # Create a Task object which creates a file named 'output.txt' of size 1 MB t2 = Task() t2.name = 't2' t2.executable = ['/bin/cat'] t2.arguments = [ '$Pipeline_%s_Stage_%s_Task_%s/temp.txt' % (p.name, s1.name, t1.name) ] t2.stdout = 'output.txt' t2.download_output_data = ['output.txt'] # Add the Task to the Stage s2.add_tasks(t2) # Add Stage to the Pipeline p.add_stages(s2) return p
def create_single_task(): t1 = Task() t1.name = 'simulation' t1.executable = ['/bin/echo'] t1.arguments = ['hello'] t1.copy_input_data = [] t1.copy_output_data = [] return t1
def create_single_task(): t1 = Task() t1.name = 'dummy_task' t1.executable = ['placeholder'] t1.arguments = ['a','b','c'] t1.copy_input_data = [] t1.copy_output_data = [] return t1
def create_single_task(): t1 = Task() t1.name = 'simulation' t1.executable = ['gmx mdrun'] t1.arguments = ['a', 'b', 'c'] t1.copy_input_data = [] t1.copy_output_data = [] return t1
def get_pipeline(shared_fs=False, size=1): p = Pipeline() p.name = 'p' n = 4 s1 = Stage() s1.name = 's1' for x in range(n): t = Task() t.name = 't%s'%x # dd if=/dev/random bs=<byte size of a chunk> count=<number of chunks> of=<output file name> t.executable = 'dd' if not shared_fs: t.arguments = ['if=/dev/urandom','bs=%sM'%size, 'count=1', 'of=$NODE_LFS_PATH/s1_t%s.txt'%x] else: t.arguments = ['if=/dev/urandom','bs=%sM'%size, 'count=1', 'of=/home/vivek91/s1_t%s.txt'%x] t.cpu_reqs['processes'] = 1 t.cpu_reqs['threads_per_process'] = 24 t.cpu_reqs['thread_type'] = '' t.cpu_reqs['process_type'] = '' t.lfs_per_process = 1024 s1.add_tasks(t) p.add_stages(s1) s2 = Stage() s2.name = 's2' for x in range(n): t = Task() t.executable = ['dd'] if not shared_fs: t.arguments = ['if=$NODE_LFS_PATH/s1_t%s.txt'%x,'bs=%sM'%size, 'count=1', 'of=$NODE_LFS_PATH/s2_t%s.txt'%x] else: t.arguments = ['if=/home/vivek91/s1_t%s.txt'%x,'bs=%sM'%size, 'count=1', 'of=/home/vivek91/s2_t%s.txt'%x] t.cpu_reqs['processes'] = 1 t.cpu_reqs['threads_per_process'] = 24 t.cpu_reqs['thread_type'] = '' t.cpu_reqs['process_type'] = '' t.tag = 't%s'%x s2.add_tasks(t) p.add_stages(s2) return p
def get_pipeline(shared_fs=False, size=1): p = Pipeline() p.name = 'p' n = 4 s1 = Stage() s1.name = 's1' for x in range(n): t = Task() t.name = 't%s'%x # dd if=/dev/random bs=<byte size of a chunk> count=<number of chunks> of=<output file name> t.executable = ['dd'] if not shared_fs: t.arguments = ['if=/dev/urandom','bs=%sM'%size, 'count=1', 'of=$NODE_LFS_PATH/s1_t%s.txt'%x] else: t.arguments = ['if=/dev/urandom','bs=%sM'%size, 'count=1', 'of=/home/vivek91/s1_t%s.txt'%x] t.cpu_reqs['processes'] = 1 t.cpu_reqs['threads_per_process'] = 24 t.cpu_reqs['thread_type'] = '' t.cpu_reqs['process_type'] = '' t.lfs_per_process = 1024 s1.add_tasks(t) p.add_stages(s1) s2 = Stage() s2.name = 's2' for x in range(n): t = Task() t.executable = ['dd'] if not shared_fs: t.arguments = ['if=$NODE_LFS_PATH/s1_t%s.txt'%x,'bs=%sM'%size, 'count=1', 'of=$NODE_LFS_PATH/s2_t%s.txt'%x] else: t.arguments = ['if=/home/vivek91/s1_t%s.txt'%x,'bs=%sM'%size, 'count=1', 'of=/home/vivek91/s2_t%s.txt'%x] t.cpu_reqs['processes'] = 1 t.cpu_reqs['threads_per_process'] = 24 t.cpu_reqs['thread_type'] = '' t.cpu_reqs['process_type'] = '' t.tag = 't%s'%x s2.add_tasks(t) p.add_stages(s2) return p
def create_inversion_stage(cmt_file_db, param_path, task_counter): """Creates inversion stage. :param cmt_file_db: :param param_path: :return: """ # Get database parameter path databaseparam_path = os.path.join(param_path, "Database/DatabaseParameters.yml") # Load Parameters DB_params = read_yaml_file(databaseparam_path) # Earthquake specific database parameters: Dir and Cid Cdir, Cid = get_Centry_path(DB_params["databasedir"], cmt_file_db) # Function inversion_func = os.path.join(bin_path, "inversion.py") # Create a Stage object inversion_stage = Stage() inversion_stage.name = "CMT3D" # Create Task inversion_task = Task() # This way the task gets the name of the path file inversion_task.name = "Inversion" inversion_task.pre_exec = [ # Conda activate DB_params["conda-activate"] ] inversion_task.executable = DB_params["bin-python"] # Assign exec # to the task inversion_task.arguments = [ inversion_func, "-f", cmt_file_db, "-p", param_path ] # In the future maybe to database dir as a total log? inversion_task.stdout = os.path.join( "%s" % Cdir, "logs", "stdout.pipeline_%s.task_%s.%s" % (Cid, str(task_counter).zfill(4), inversion_task.name)) inversion_task.stderr = os.path.join( "%s" % Cdir, "logs", "stderr.pipeline_%s.task_%s.%s" % (Cid, str(task_counter).zfill(4), inversion_task.name)) inversion_stage.add_tasks(inversion_task) return inversion_stage
def create_task_from_cu(cu, prof=None): """ Purpose: Create a Task based on the Compute Unit. Details: Currently, only the uid, parent_stage and parent_pipeline are retrieved. The exact initial Task (that was converted to a CUD) cannot be recovered as the RP API does not provide the same attributes for a CU as for a CUD. Also, this is not required for the most part. TODO: Add exit code, stdout, stderr and path attributes to a Task. These can be extracted from a CU :arguments: :cu: RP Compute Unit :return: Task """ try: logger.debug('Create Task from CU %s' % cu.name) if prof: prof.prof('task from cu - create', uid=cu.name.split(',')[0].strip()) task = Task() task.uid = cu.name.split(',')[0].strip() task.name = cu.name.split(',')[1].strip() task.parent_stage['uid'] = cu.name.split(',')[2].strip() task.parent_stage['name'] = cu.name.split(',')[3].strip() task.parent_pipeline['uid'] = cu.name.split(',')[4].strip() task.parent_pipeline['name'] = cu.name.split(',')[5].strip() task.rts_uid = cu.uid if cu.exit_code is not None: task.exit_code = cu.exit_code else: if cu.state == rp.DONE: task.exit_code = 0 else: task.exit_code = 1 task.path = ru.Url(cu.sandbox).path if prof: prof.prof('task from cu - done', uid=cu.name.split(',')[0].strip()) logger.debug('Task %s created from CU %s' % (task.uid, cu.name)) return task except Exception, ex: logger.error('Task creation from CU failed, error: %s' % ex) raise
def create_process_path_files(cmt_file_db, param_path, task_counter): """This function creates the path files used for processing both synthetic and observed data in ASDF format, as well as the following windowing procedure. :param cmt_file_db: cmtfile in the database :param param_path: path to parameter file directory :param pipelinedir: path to pipeline directory :return: EnTK Stage """ # Get database parameter path databaseparam_path = os.path.join(param_path, "Database/DatabaseParameters.yml") # Load Parameters DB_params = read_yaml_file(databaseparam_path) # Earthquake specific database parameters: Dir and Cid Cdir, Cid = get_Centry_path(DB_params["databasedir"], cmt_file_db) # Process path function create_process_path_bin = os.path.join(bin_path, "create_path_files.py") # Create Process Paths Stage (CPP) # Create a Stage object cpp = Stage() cpp.name = "CreateProcessPaths" # Create Task cpp_t = Task() cpp_t.name = "CPP-Task" cpp_t.pre_exec = [ # Conda activate DB_params["conda-activate"] ] cpp_t.executable = DB_params["bin-python"] # Assign executable # to the task cpp_t.arguments = [create_process_path_bin, cmt_file_db] # In the future maybe to database dir as a total log? cpp_t.stdout = os.path.join( "%s" % Cdir, "logs", "stdout.pipeline_%s.task_%s.%s" % (Cid, str(task_counter).zfill(4), cpp_t.name)) cpp_t.stderr = os.path.join( "%s" % Cdir, "logs", "stderr.pipeline_%s.task_%s.%s" % (Cid, str(task_counter).zfill(4), cpp_t.name)) task_counter += 1 cpp.add_tasks(cpp_t) return cpp, task_counter
def write_sources(cmt_file_db, param_path, task_counter): """ This function creates a stage that modifies the CMTSOLUTION files before the simulations are run. :param cmt_file_db: cmtfile in the database :param param_path: path to parameter file directory :param task_counter: total task count up until now in pipeline :return: EnTK Stage """ # Get Database parameters databaseparam_path = os.path.join(param_path, "Database/DatabaseParameters.yml") DB_params = read_yaml_file(databaseparam_path) # Earthquake specific database parameters: Dir and Cid Cdir, Cid = get_Centry_path(DB_params["databasedir"], cmt_file_db) # Path to function write_source_func = os.path.join(bin_path, "write_sources.py") # Create a Stage object w_sources = Stage() w_sources.name = "Write-Sources" # Create Task for stage w_sources_t = Task() w_sources_t.name = "Task-Sources" w_sources_t.pre_exec = [ # Conda activate DB_params["conda-activate"] ] w_sources_t.executable = DB_params["bin-python"] # # Assign executable # to the task w_sources_t.arguments = [write_source_func, cmt_file_db] # In the future maybe to database dir as a total log? w_sources_t.stdout = os.path.join( "%s" % Cdir, "logs", "stdout.pipeline_%s.task_%s.%s" % (Cid, str(task_counter).zfill(4), w_sources_t.name)) w_sources_t.stderr = os.path.join( "%s" % Cdir, "logs", "stderr.pipeline_%s.task_%s.%s" % (Cid, str(task_counter).zfill(4), w_sources_t.name)) # Add Task to the Stage w_sources.add_tasks(w_sources_t) task_counter += 1 return w_sources, task_counter
def generate_pipeline(nid): p = Pipeline() p.name = 'p%s' % nid s1 = Stage() s1.name = 's1' t1 = Task() t1.name = 't2' t1.executable = '/bin/echo' t1.arguments = ['hello'] s1.add_tasks(t1) p.add_stages(s1) s2 = Stage() s2.name = 's2' s2_task_uids = [] for cnt in range(10): t2 = Task() t2.name = 't%s' % (cnt + 1) t2.executable = '/bin/echo' t2.arguments = ['world'] # Copy data from the task in first stage to the current task's location t2.copy_input_data = [ '$Pipeline_%s_Stage_%s_Task_%s/output.txt' % (p.name, s1.name, t1.name) ] s2.add_tasks(t2) s2_task_uids.append(t2.name) p.add_stages(s2) return p
def create_task_from_cu(cu, prof=None): """ Purpose: Create a Task based on the Compute Unit. Details: Currently, only the uid, parent_stage and parent_pipeline are retrieved. The exact initial Task (that was converted to a CUD) cannot be recovered as the RP API does not provide the same attributes for a CU as for a CUD. Also, this is not required for the most part. TODO: Add exit code, stdout, stderr and path attributes to a Task. These can be extracted from a CU :arguments: :cu: RP Compute Unit :return: Task """ try: logger.debug('Create Task from CU %s' % cu.name) if prof: prof.prof('task from cu - create', uid=cu.name.split(',')[0].strip()) task = Task() task.uid = cu.name.split(',')[0].strip() task.name = cu.name.split(',')[1].strip() task.parent_stage['uid'] = cu.name.split(',')[2].strip() task.parent_stage['name'] = cu.name.split(',')[3].strip() task.parent_pipeline['uid'] = cu.name.split(',')[4].strip() task.parent_pipeline['name'] = cu.name.split(',')[5].strip() task.rts_uid = cu.uid if cu.state == rp.DONE: task.exit_code = 0 else: task.exit_code = 1 task.path = ru.Url(cu.sandbox).path if prof: prof.prof('task from cu - done', uid=cu.name.split(',')[0].strip()) logger.debug('Task %s created from CU %s' % (task.uid, cu.name)) return task except Exception, ex: logger.exception('Task creation from CU failed, error: %s' % ex) raise
def specfem_clean_up(cmt_file_db, param_path, task_counter): """ Cleaning up the simulation directories since we don"t need all the files for the future. :param cmt_file_db: cmtfile in the database :param param_path: path to parameter file directory :param pipelinedir: path to pipeline directory :return: EnTK Stage """ # Get Database parameters databaseparam_path = os.path.join(param_path, "Database/DatabaseParameters.yml") # Database parameters. DB_params = read_yaml_file(databaseparam_path) # Earthquake specific database parameters: Dir and Cid Cdir, Cid = get_Centry_path(DB_params["databasedir"], cmt_file_db) # Path to function clean_up_func = os.path.join(bin_path, "clean_up_simdirs.py") # Create a Stage object clean_up = Stage() clean_up.name = "Clean-Up" # Create Task for stage clean_up_t = Task() clean_up_t.name = "Task-Clean-Up" clean_up_t.pre_exec = [ # Conda activate DB_params["conda-activate"] ] clean_up_t.executable = DB_params["bin-python"] # Assign executable # to the task clean_up_t.arguments = [clean_up_func, cmt_file_db] # In the future maybe to database dir as a total log? clean_up_t.stdout = os.path.join( "%s" % Cdir, "logs", "stdout.pipeline_%s.task_%s.%s" % (Cid, str(task_counter).zfill(4), clean_up_t.name)) clean_up_t.stderr = os.path.join( "%s" % Cdir, "logs", "stderr.pipeline_%s.task_%s.%s" % (Cid, str(task_counter).zfill(4), clean_up_t.name)) # Add Task to the Stage clean_up.add_tasks(clean_up_t) return clean_up, task_counter
def data_request(cmt_file_db, param_path, task_counter): """ This function creates the request for the observed data and returns it as an EnTK Stage :param cmt_file_db: cmt_file in the database :param param_path: path to parameter file directory :param task_counter: total task count up until now in pipeline :return: EnTK Stage """ # Get Database parameters databaseparam_path = os.path.join(param_path, "Database/DatabaseParameters.yml") DB_params = read_yaml_file(databaseparam_path) # Earthquake specific database parameters: Dir and Cid Cdir, Cid = get_Centry_path(DB_params["databasedir"], cmt_file_db) # # Path to function request_data_func = os.path.join(bin_path, "request_data.py") # Create a Stage object datarequest = Stage() datarequest_t = Task() datarequest_t.name = "data-request" datarequest_t.pre_exec = [ # Conda activate DB_params["conda-activate"] ] datarequest_t.executable = DB_params["bin-python"] # Assign executable # to the task datarequest_t.arguments = [request_data_func, cmt_file_db] # In the future maybe to database dir as a total log? datarequest_t.stdout = os.path.join( "%s" % Cdir, "logs", "stdout.pipeline_%s.task_%s.%s" % (Cid, str(task_counter).zfill(4), datarequest_t.name)) datarequest_t.stderr = os.path.join( "%s" % Cdir, "logs", "stderr.pipeline_%s.task_%s.%s" % (Cid, str(task_counter).zfill(4), datarequest_t.name)) # Add Task to the Stage datarequest.add_tasks(datarequest_t) # Increase task-counter task_counter += 1 return datarequest, task_counter
def generate_task(self, **ensembles): """ Generate a `radical.entk` task. Parameters ---------- ensembles: dict, OrderedDict Dictionary of the *current* values of variables that are ensembles. All the variables that were declared with `add_ensemble` should be specified here so that a correct task object can be generated. """ [setattr(self, k, w) for k, w in ensembles.iteritems()] if not self.all_variables_defined(): raise ValueError('Some variables are not defined!') task = Task() task.name = ensembles['task_name'] task.pre_exec += self.engine.pre_exec task.executable += str(self.engine.executable) task.arguments += self.engine.arguments task.cpu_reqs = { 'processes': self._processes, 'process_type': 'MPI' if self.engine.uses_mpi else None, 'threads_per_process': self._threads_per_process, 'thread_type': None } task.gpu_reqs = { 'processes': self._gpu_processes, 'process_type': 'MPI' if self.engine.gpu_uses_mpi else None, 'threads_per_process': self._gpu_threads_per_process, 'thread_type': None } task.arguments.extend(self.arguments) task.copy_input_data.extend(self.copied_files) task.copy_input_data.extend(self.system.copied_files) task.post_exec.append('echo "{}" > sim_desc.txt'.format(task.name)) task.link_input_data.extend(self.input_data(**ensembles)) task.link_input_data.extend(self.system.linked_files) task.pre_exec.extend( self._sed.format(n, v, f) for f, vs in self.get_variables().items() for n, v in vs) return task
def sendr(qname, bulk_size, num_tasks): try: tasks = list() for cnt in range(num_tasks): task = Task() task.name = str(cnt) tasks.append(task) connection = pika.BlockingConnection(pika.ConnectionParameters( host=hostname, port=port, heartbeat=0)) channel = connection.channel() cur_task_cnt = 0 f = open('sendr.txt','w') f.write('start: %f\n'%time.time()) while(cur_task_cnt < num_tasks): workload = list() wld_size = 0 # tasks = copy_tasks for task in tasks: workload.append(task.to_dict()) # copy_tasks.remove(task) wld_size+=1 if wld_size == bulk_size: break cur_task_cnt += wld_size wld_as_json = json.dumps(workload) channel.basic_publish( exchange = '', routing_key = qname, body = wld_as_json, # properties=pika.BasicProperties( # delivery_mode = 2, # make message persistent # ) ) f.write('stop: %f\n'%time.time()) except Exception as ex: print 'Error in sendr: %s'%ex print traceback.format_exc()
def describe_MD_pipeline(): p = Pipeline() p.name = 'MD' # MD stage s1 = Stage() s1.name = 'OpenMM' # Each Task() is an OpenMM executable that will run on a single GPU. # Set sleep time for local testing # for i in range(18): task = Task() task.name = 'md' task.pre_exec = [] # task.pre_exec += ['export MINICONDA=/gpfs/alpine/scratch/jdakka/bip178/miniconda'] # task.pre_exec += ['export PATH=$MINICONDA/bin:$PATH'] # task.pre_exec += ['export LD_LIBRARY_PATH=$MINICONDA/lib:$LD_LIBRARY_PATH'] task.pre_exec += ['module load python/2.7.15-anaconda2-5.3.0'] task.pre_exec += ['module load cuda/9.1.85'] task.pre_exec += ['module load gcc/6.4.0'] task.pre_exec += ['source activate openmm'] task.pre_exec += ['cd /gpfs/alpine/scratch/jdakka/bip178/benchmarks/MD_exps/fs-pep/results_2'] task.executable = '/ccs/home/jdakka/.conda/envs/openmm/bin/python' task.arguments = ['run_openmm.py', '-f', '/gpfs/alpine/scratch/jdakka/bip178/benchmarks/MD_exps/fs-pep/pdb/100-fs-peptide-400K.pdb'] task.cpu_reqs = {'processes': 1, 'process_type': None, 'threads_per_process': 1, 'thread_type': None } task.gpu_reqs = {'processes': 1, 'process_type': None, 'threads_per_process': 1, 'thread_type': 'CUDA' } # Add the MD task to the Docking Stage s1.add_tasks(task) # Add MD stage to the MD Pipeline p.add_stages(s1) return p
def create_pipeline(): p = Pipeline() s = Stage() t1 = Task() t1.name = 'simulation' t1.executable = ['sleep'] t1.arguments = ['10'] s.add_tasks(t1) p.add_stages(s) return p
def create_pipeline(): p = Pipeline() s = Stage() t1 = Task() t1.name = 'simulation' t1.executable = 'sleep' t1.arguments = ['10'] s.add_tasks(t1) p.add_stages(s) return p
def create_pipeline(): p = Pipeline() s = Stage() t = Task() t.name = 'simulation' t.executable = ['/bin/echo'] t.arguments = ['hello'] t.copy_input_data = [] t.copy_output_data = [] s.add_tasks(t) p.add_stages(s) return p
def create_pipeline(): p = Pipeline() s = Stage() t1 = Task() t1.name = 'simulation' t1.executable = ['/bin/echo'] t1.arguments = ['hello'] t1.copy_input_data = [] t1.copy_output_data = [] s.add_tasks(t1) p.add_stages(s) return p
def create_entry(cmt_filename, param_path, task_counter): """This function creates the Entk stage for creation of a database entry. :param cmt_filename: cmt_filename :param param_path: parameter directory :param pipelinedir: Directory of the pipeline :return: EnTK Stage """ # Get Database parameters databaseparam_path = os.path.join(param_path, "Database/DatabaseParameters.yml") DB_params = read_yaml_file(databaseparam_path) # Earthquake specific database parameters: Dir and Cid Cdir, Cid = get_Centry_path(DB_params["databasedir"], cmt_filename) # Create a Stage object database_entry = Stage() t1 = Task() t1.name = "database-entry" t1.pre_exec = PRE_EXECS t1.executable = 'create-entry' # Assign # executable to the task t1.arguments = ['-f %s' % cmt_filename, '-p %s' % param_path] # In the future maybe to database dir as a total log? t1.stdout = os.path.join( "%s" % Cdir, "logs", "stdout.pipeline_%s.task_%s.%s" % (Cid, str(task_counter).zfill(4), t1.name)) t1.stderr = os.path.join( "%s" % Cdir, "logs", "stderr.pipeline_%s.task_%s.%s" % (Cid, str(task_counter).zfill(4), t1.name)) # Increase task-counter task_counter += 1 # Add Task to the Stage database_entry.add_tasks(t1) return database_entry, task_counter
def generate_pipline(stages, tasks_per_stage=1): p = Pipeline() ##Create 8 stages each with one task for s_cnt in range(stages): s = Stage() s.name = 'stage %s' % (s_cnt + 1) for t_cnt in range(tasks_per_stage): t = Task() t.name = 'task %s' % (t_cnt + 1) t.executable = '/bin/sleep' t.arguments = ['100'] # Add the Task to the Stage s.add_tasks(t) # Add Stage to the Pipeline p.add_stages(s) return p
def generate_pipeline(name, stages): #generate the pipeline of prediction and blob detection # Create a Pipeline object p = Pipeline() p.name = name for s_cnt in range(stages): # Create a Stage object s = Stage() s.name = 'Stage %s'%s_cnt if(stage==1) # Create Task 1, training t = Task() t.name = 'my-task1' t.executable = ['sbatch'] # Assign executable to the task # Assign arguments for the task executable t.arguments = ['/Code/trainbatch.bat']
def generate_pipeline(): # Create a Pipeline object p = Pipeline() p.name = 'p1' # Create a Stage object s1 = Stage() s1.name = 's1' # Create a Task object which creates a file named 'output.txt' of size 1 MB t1 = Task() t1.name = 't1' t1.executable = ['/bin/false'] # t1.arguments = ['"Hello World"','>>','temp.txt'] # Add the Task to the Stage s1.add_tasks(t1) # Add Stage to the Pipeline p.add_stages(s1) return p
def test_amgr_run_mock(): p = Pipeline() s = Stage() t = Task() t.name = 'simulation' t.executable = ['/bin/date'] s.tasks = t p.add_stages(s) res_dict = { 'resource': 'local.localhost', 'walltime': 5, 'cpus': 1, 'project': '' } appman = Amgr(hostname=hostname, port=port, rts="mock") appman.resource_desc = res_dict appman.workflow = [p] appman.run()
def test_task_exceptions(s,l,i,b): """ **Purpose**: Test if all attribute assignments raise exceptions for invalid values """ t = Task() data_type = [s,l,i,b] for data in data_type: if not isinstance(data,str): with pytest.raises(TypeError): t.name = data with pytest.raises(TypeError): t.path = data with pytest.raises(TypeError): t.parent_stage = data with pytest.raises(TypeError): t.parent_pipeline = data with pytest.raises(TypeError): t.stdout = data with pytest.raises(TypeError): t.stderr = data if not isinstance(data,list): with pytest.raises(TypeError): t.pre_exec = data with pytest.raises(TypeError): t.arguments = data with pytest.raises(TypeError): t.post_exec = data with pytest.raises(TypeError): t.upload_input_data = data with pytest.raises(TypeError): t.copy_input_data = data with pytest.raises(TypeError): t.link_input_data = data with pytest.raises(TypeError): t.move_input_data = data with pytest.raises(TypeError): t.copy_output_data = data with pytest.raises(TypeError): t.download_output_data = data with pytest.raises(TypeError): t.move_output_data = data if not isinstance(data, str) and not isinstance(data, list): with pytest.raises(TypeError): t.executable = data if not isinstance(data, str) and not isinstance(data, unicode): with pytest.raises(ValueError): t.cpu_reqs = { 'processes': 1, 'process_type': data, 'threads_per_process': 1, 'thread_type': None } t.cpu_reqs = { 'processes': 1, 'process_type': None, 'threads_per_process': 1, 'thread_type': data } t.gpu_reqs = { 'processes': 1, 'process_type': data, 'threads_per_process': 1, 'thread_type': None } t.gpu_reqs = { 'processes': 1, 'process_type': None, 'threads_per_process': 1, 'thread_type': data } if not isinstance(data, int): with pytest.raises(TypeError): t.cpu_reqs = { 'processes': data, 'process_type': None, 'threads_per_process': 1, 'thread_type': None } t.cpu_reqs = { 'processes': 1, 'process_type': None, 'threads_per_process': data, 'thread_type': None } t.gpu_reqs = { 'processes': data, 'process_type': None, 'threads_per_process': 1, 'thread_type': None } t.gpu_reqs = { 'processes': 1, 'process_type': None, 'threads_per_process': data, 'thread_type': None }
def test_create_cud_from_task(): """ **Purpose**: Test if the 'create_cud_from_task' function generates a RP ComputeUnitDescription with the complete Task description """ pipeline = 'p1' stage = 's1' task = 't1' placeholder_dict = { pipeline: { stage: { task: '/home/vivek/some_file.txt' } } } t1 = Task() t1.name = 't1' t1.pre_exec = ['module load gromacs'] t1.executable = ['grompp'] t1.arguments = ['hello'] t1.cpu_reqs = {'processes': 4, 'process_type': 'MPI', 'threads_per_process': 1, 'thread_type': 'OpenMP' } t1.gpu_reqs = {'processes': 4, 'process_type': 'MPI', 'threads_per_process': 2, 'thread_type': 'OpenMP' } t1.post_exec = ['echo test'] t1.upload_input_data = ['upload_input.dat'] t1.copy_input_data = ['copy_input.dat'] t1.link_input_data = ['link_input.dat'] t1.copy_output_data = ['copy_output.dat'] t1.download_output_data = ['download_output.dat'] p = Pipeline() p.name = 'p1' s = Stage() s.name = 's1' s.tasks = t1 p.stages = s p._assign_uid('test') cud = create_cud_from_task(t1, placeholder_dict) assert cud.name == '%s,%s,%s,%s,%s,%s' % (t1.uid, t1.name, t1.parent_stage['uid'], t1.parent_stage['name'], t1.parent_pipeline['uid'], t1.parent_pipeline['name']) assert cud.pre_exec == t1.pre_exec # rp returns executable as a string regardless of whether assignment was using string or list assert cud.executable == t1.executable assert cud.arguments == t1.arguments assert cud.cpu_processes == t1.cpu_reqs['processes'] assert cud.cpu_threads == t1.cpu_reqs['threads_per_process'] assert cud.cpu_process_type == t1.cpu_reqs['process_type'] assert cud.cpu_thread_type == t1.cpu_reqs['thread_type'] assert cud.gpu_processes == t1.gpu_reqs['processes'] assert cud.gpu_threads == t1.gpu_reqs['threads_per_process'] assert cud.gpu_process_type == t1.gpu_reqs['process_type'] assert cud.gpu_thread_type == t1.gpu_reqs['thread_type'] assert cud.post_exec == t1.post_exec assert {'source': 'upload_input.dat', 'target': 'upload_input.dat'} in cud.input_staging assert {'source': 'copy_input.dat', 'action': rp.COPY, 'target': 'copy_input.dat'} in cud.input_staging assert {'source': 'link_input.dat', 'action': rp.LINK, 'target': 'link_input.dat'} in cud.input_staging assert {'source': 'copy_output.dat', 'action': rp.COPY, 'target': 'copy_output.dat'} in cud.output_staging assert {'source': 'download_output.dat', 'target': 'download_output.dat'} in cud.output_staging
def GeneralCycle(self, Replicas, Replica_Cores, Cycle, MD_Executable, ExchangeMethod): """ All cycles after the initial cycle Pulls up exchange pairs file and generates the new workflow """ self._prof.prof('InitcreateMDwokflow_{0}'.format(Cycle), uid=self._uid) with open('exchangePairs_{0}.dat'.format(Cycle),'r') as f: # Read exchangePairs.dat ExchangeArray = [] for line in f: ExchangeArray.append(int(line.split()[1])) #ExchangeArray.append(line) #print ExchangeArray q = Pipeline() q.name = 'genpipeline{0}'.format(Cycle) #Bookkeeping stage_uids = list() task_uids = list() ## = dict() md_dict = dict() #Create initial MD stage md_stg = Stage() md_stg.name = 'mdstage{0}'.format(Cycle) self._prof.prof('InitMD_{0}'.format(Cycle), uid=self._uid) for r in range (Replicas): md_tsk = AMBERTask(cores=Replica_Cores, MD_Executable=MD_Executable) md_tsk.name = 'mdtsk-{replica}-{cycle}'.format(replica=r,cycle=Cycle) md_tsk.link_input_data = ['%s/restrt > inpcrd'%(self.Book[Cycle-1][ExchangeArray[r]]), '%s/prmtop'%(self.Book[0][r]), #'%s/prmtop'%(self.Tarball_path[0]), '%s/mdin_{0}'.format(r)%(self.Book[0][r])] #'%s/mdin'%(self.Book[0][r])] #'%s/mdin'%(self.Tarball_path[0])] md_tsk.arguments = ['-O', '-i', 'mdin_{0}'.format(r), '-p', 'prmtop', '-c', 'inpcrd', '-o', 'out_{0}'.format(r),'-inf', 'mdinfo_{0}'.format(r)] #md_tsk.arguments = ['-O', '-i', 'mdin', '-p', 'prmtop', '-c', 'inpcrd', '-o', 'out_{0}'.format(r),'-inf', 'mdinfo_{0}'.format(r)] md_dict[r] = '$Pipeline_%s_Stage_%s_Task_%s'%(q.name, md_stg.name, md_tsk.name) self.md_task_list.append(md_tsk) md_stg.add_tasks(md_tsk) q.add_stages(md_stg) ex_stg = Stage() ex_stg.name = 'exstg{0}'.format(Cycle+1) #Create Exchange Task ex_tsk = Task() ex_tsk.name = 'extsk{0}'.format(Cycle+1) ex_tsk.executable = ['python'] ex_tsk.upload_input_data = [ExchangeMethod] for r in range (Replicas): ex_tsk.link_input_data += ['%s/mdinfo_%s'%(md_dict[r],r)] ex_tsk.arguments = ['TempEx.py','{0}'.format(Replicas), '{0}'.format(Cycle+1)] ex_tsk.cores = 1 ex_tsk.mpi = False ex_tsk.download_output_data = ['exchangePairs_{0}.dat'.format(Cycle+1)] # Finds exchange partners, also Generates exchange history trace ex_stg.add_tasks(ex_tsk) #task_uids.append(ex_tsk.uid) self.ex_task_list.append(ex_tsk) q.add_stages(ex_stg) #stage_uids.append(ex_stg.uid) self.Book.append(md_dict) #self._prof.prof('EndEx_{0}'.format(Cycle), uid=self._uid) #print d #print self.Book return q
def generate_pipeline(): # Create a Pipeline object p = Pipeline() p.name = 'p1' # Create a Stage object s1 = Stage() s1.name = 's1' # Create a Task object which creates a file named 'output.txt' of size 1 MB t1 = Task() t1.name = 't1' t1.executable = '/bin/bash' t1.arguments = ['-l', '-c', 'base64 /dev/urandom | head -c 1000000 > output.txt'] # Add the Task to the Stage s1.add_tasks(t1) # Add Stage to the Pipeline p.add_stages(s1) # Create another Stage object to hold character count tasks s2 = Stage() s2.name = 's2' s2_task_uids = [] for cnt in range(30): # Create a Task object t2 = Task() t2.name = 't%s' % (cnt + 1) t2.executable = '/bin/bash' t2.arguments = ['-l', '-c', 'grep -o . output.txt | sort | uniq -c > ccount.txt'] # Copy data from the task in the first stage to the current task's location t2.copy_input_data = ['$Pipeline_%s_Stage_%s_Task_%s/output.txt' % (p.name, s1.name, t1.name)] # Add the Task to the Stage s2.add_tasks(t2) s2_task_uids.append(t2.name) # Add Stage to the Pipeline p.add_stages(s2) # Create another Stage object to hold checksum tasks s3 = Stage() s3.name = 's3' for cnt in range(30): # Create a Task object t3 = Task() t3.name = 't%s' % (cnt + 1) t3.executable = '/bin/bash' t3.arguments = ['-l', '-c', 'sha1sum ccount.txt > chksum.txt'] # Copy data from the task in the first stage to the current task's location t3.copy_input_data = ['$Pipeline_%s_Stage_%s_Task_%s/ccount.txt' % (p.name, s2.name, s2_task_uids[cnt])] # Download the output of the current task to the current location t3.download_output_data = ['chksum.txt > chksum_%s.txt' % cnt] # Add the Task to the Stage s3.add_tasks(t3) # Add Stage to the Pipeline p.add_stages(s3) return p
# VM, set "RMQ_HOSTNAME" and "RMQ_PORT" in the session where you are running # this script. hostname = os.environ.get('RMQ_HOSTNAME', 'localhost') port = os.environ.get('RMQ_PORT', 5672) if __name__ == '__main__': # Create a Pipeline object p = Pipeline() # Create a Stage object s = Stage() # Create a Task object t = Task() t.name = 'my-first-task' # Assign a name to the task (optional, do not use ',' or '_') t.executable = '/bin/echo' # Assign executable to the task t.arguments = ['Hello World'] # Assign arguments for the task executable # Add Task to the Stage s.add_tasks(t) # Add Stage to the Pipeline p.add_stages(s) # Create Application Manager appman = AppManager(hostname=hostname, port=port) # Create a dictionary describe four mandatory keys: # resource, walltime, and cpus # resource is 'local.localhost' to execute locally
def InitCycle(self, Replicas, Replica_Cores, md_executable, ExchangeMethod, timesteps): # "Cycle" = 1 MD stage plus the subsequent exchange computation """ Initial cycle consists of: 1) Create tarball of MD input data 2) Transfer the tarball to pilot sandbox 3) Untar the tarball 4) Run first Cycle """ #Initialize Pipeline #self._prof.prof('InitTar', uid=self._uid) p = Pipeline() p.name = 'initpipeline' md_dict = dict() #Bookkeeping tar_dict = dict() #Bookkeeping ##Write the input files self._prof.prof('InitWriteInputs', uid=self._uid) writeInputs.writeInputs(max_temp=350,min_temp=250,replicas=Replicas,timesteps=timesteps) self._prof.prof('EndWriteInputs', uid=self._uid) self._prof.prof('InitTar', uid=self._uid) #Create Tarball of input data tar = tarfile.open("Input_Files.tar","w") for name in ["prmtop", "inpcrd", "mdin"]: tar.add(name) for r in range (Replicas): tar.add('mdin_{0}'.format(r)) tar.close() #delete all input files outside the tarball for r in range (Replicas): os.remove('mdin_{0}'.format(r)) self._prof.prof('EndTar', uid=self._uid) #Create Untar Stage untar_stg = Stage() untar_stg.name = 'untarStg' #Untar Task untar_tsk = Task() untar_tsk.name = 'untartsk' untar_tsk.executable = ['python'] untar_tsk.upload_input_data = ['untar_input_files.py','Input_Files.tar'] untar_tsk.arguments = ['untar_input_files.py','Input_Files.tar'] untar_tsk.cores = 1 untar_stg.add_tasks(untar_tsk) p.add_stages(untar_stg) tar_dict[0] = '$Pipeline_%s_Stage_%s_Task_%s'%(p.name, untar_stg.name, untar_tsk.name) # First MD stage: needs to be defined separately since workflow is not built from a predetermined order md_stg = Stage() md_stg.name = 'mdstg0' self._prof.prof('InitMD_0', uid=self._uid) # MD tasks for r in range (Replicas): md_tsk = AMBERTask(cores=Replica_Cores, MD_Executable=md_executable) md_tsk.name = 'mdtsk-{replica}-{cycle}'.format(replica=r,cycle=0) md_tsk.link_input_data += [ '%s/inpcrd'%tar_dict[0], '%s/prmtop'%tar_dict[0], '%s/mdin_{0}'.format(r)%tar_dict[0] #Use for full temperature exchange #'%s/mdin'%tar_dict[0] #Testing only ] md_tsk.arguments = ['-O','-p','prmtop', '-i', 'mdin_{0}'.format(r), # Use this for full Temperature Exchange '-c','inpcrd','-o','out_{0}'.format(r), '-inf','mdinfo_{0}'.format(r)] md_dict[r] = '$Pipeline_%s_Stage_%s_Task_%s'%(p.name, md_stg.name, md_tsk.name) md_stg.add_tasks(md_tsk) self.md_task_list.append(md_tsk) #print md_tsk.uid p.add_stages(md_stg) #stage_uids.append(md_stg.uid) # First Exchange Stage ex_stg = Stage() ex_stg.name = 'exstg0' self._prof.prof('InitEx_0', uid=self._uid) #with open('logfile.log', 'a') as logfile: # logfile.write( '%.5f' %time.time() + ',' + 'InitEx0' + '\n') # Create Exchange Task. Exchange task performs a Metropolis Hastings thermodynamic balance condition # check and spits out the exchangePairs.dat file that contains a sorted list of ordered pairs. # Said pairs then exchange configurations by linking output configuration files appropriately. ex_tsk = Task() ex_tsk.name = 'extsk0' ex_tsk.executable = ['python'] ex_tsk.upload_input_data = [ExchangeMethod] for r in range (Replicas): ex_tsk.link_input_data += ['%s/mdinfo_%s'%(md_dict[r],r)] ex_tsk.arguments = ['TempEx.py','{0}'.format(Replicas), '0'] ex_tsk.cores = 1 ex_tsk.mpi = False ex_tsk.download_output_data = ['exchangePairs_0.dat'] ex_stg.add_tasks(ex_tsk) #task_uids.append(ex_tsk.uid) p.add_stages(ex_stg) self.ex_task_list.append(ex_tsk) #self.ex_task_uids.append(ex_tsk.uid) self.Book.append(md_dict) return p
def general_cycle(self, replicas, replica_cores, cycle, python_path, md_executable, exchange_method, pre_exec): """ All cycles after the initial cycle Pulls up exchange pairs file and generates the new workflow """ self._prof.prof('InitcreateMDwokflow_{0}'.format(cycle), uid=self._uid) with open('exchangePairs_{0}.dat'.format(cycle), 'r') as f: # Read exchangePairs.dat exchange_array = [] for line in f: exchange_array.append(int(line.split()[1])) #exchange_array.append(line) #print exchange_array q = Pipeline() q.name = 'genpipeline{0}'.format(cycle) #bookkeeping stage_uids = list() task_uids = list() ## = dict() md_dict = dict() #Create MD stage md_stg = Stage() md_stg.name = 'mdstage{0}'.format(cycle) self._prof.prof('InitMD_{0}'.format(cycle), uid=self._uid) for r in range(replicas): md_tsk = AMBERTask(cores=replica_cores, md_executable=md_executable, pre_exec=pre_exec) md_tsk.name = 'mdtsk-{replica}-{cycle}'.format( replica=r, cycle=cycle) md_tsk.link_input_data = [ '%s/restrt > inpcrd' % (self.book[cycle - 1][exchange_array[r]]), '%s/prmtop' % (self.book[0][r]), '%s/mdin_{0}'.format(r) % (self.book[0][r]) ] ### The Following softlinking scheme is to be used ONLY if node local file system is to be used: not fully supported yet. #md_tsk.link_input_data = ['$NODE_LFS_PATH/rstrt-{replica}-{cycle}'.format(replica=exchange_array[r],cycle=cycle-1) > '$NODE_LFS_PATH/inpcrd', # #'%s/restrt > inpcrd'%(self.book[cycle-1][exchange_array[r]]), # '%s/prmtop'%(self.book[0][r]), # '%s/mdin_{0}'.format(r)%(self.Book[0][r])] md_tsk.arguments = [ '-O', '-i', 'mdin_{0}'.format(r), '-p', 'prmtop', '-c', 'inpcrd', #'-c', 'rstrt-{replica}-{cycle}'.format(replica=r,cycle=cycle-1), '-o', 'out-{replica}-{cycle}'.format(replica=r, cycle=cycle), '-r', 'restrt', #'-r', 'rstrt-{replica}-{cycle}'.format(replica=r,cycle=cycle), '-x', 'mdcrd-{replica}-{cycle}'.format(replica=r, cycle=cycle), '-inf', 'mdinfo_{0}'.format(r) ] #md_tsk.tag = 'mdtsk-{replica}-{cycle}'.format(replica=r,cycle=0) md_dict[r] = '$Pipeline_%s_Stage_%s_Task_%s' % ( q.name, md_stg.name, md_tsk.name) self.md_task_list.append(md_tsk) md_stg.add_tasks(md_tsk) q.add_stages(md_stg) ex_stg = Stage() ex_stg.name = 'exstg{0}'.format(cycle + 1) #Create Exchange Task ex_tsk = Task() ex_tsk.name = 'extsk{0}'.format(cycle + 1) ex_tsk.executable = [python_path]#['/usr/bin/python'] #['/opt/python/bin/python'] ex_tsk.upload_input_data = [exchange_method] for r in range(replicas): ex_tsk.link_input_data += ['%s/mdinfo_%s' % (md_dict[r], r)] ex_tsk.pre_exec = ['mv *.py exchange_method.py'] ex_tsk.arguments = [ 'exchange_method.py', '{0}'.format(replicas), '{0}'.format(cycle + 1) ] ex_tsk.cores = 1 ex_tsk.mpi = False ex_tsk.download_output_data = [ 'exchangePairs_{0}.dat'.format(cycle + 1) ] # Finds exchange partners, also Generates exchange history trace ex_stg.add_tasks(ex_tsk) #task_uids.append(ex_tsk.uid) self.ex_task_list.append(ex_tsk) q.add_stages(ex_stg) #stage_uids.append(ex_stg.uid) self.book.append(md_dict) #self._prof.prof('EndEx_{0}'.format(cycle), uid=self._uid) #print d #print self.book return q
def init_cycle(self, replicas, replica_cores, python_path, md_executable, exchange_method, min_temp, max_temp, timesteps, basename, pre_exec): # "cycle" = 1 MD stage plus the subsequent exchange computation """ Initial cycle consists of: 1) Create tarball of MD input data 2) Transfer the tarball to pilot sandbox 3) Untar the tarball 4) Run first cycle """ #Initialize Pipeline self._prof.prof('InitTar', uid=self._uid) p = Pipeline() p.name = 'initpipeline' md_dict = dict() #bookkeeping tar_dict = dict() #bookkeeping #Write the input files self._prof.prof('InitWriteInputs', uid=self._uid) writeInputs.writeInputs( max_temp=max_temp, min_temp=min_temp, replicas=replicas, timesteps=timesteps, basename=basename) self._prof.prof('EndWriteInputs', uid=self._uid) self._prof.prof('InitTar', uid=self._uid) #Create Tarball of input data tar = tarfile.open("input_files.tar", "w") for name in [ basename + ".prmtop", basename + ".inpcrd", basename + ".mdin" ]: tar.add(name) for r in range(replicas): tar.add('mdin_{0}'.format(r)) tar.close() #delete all input files outside the tarball for r in range(replicas): os.remove('mdin_{0}'.format(r)) self._prof.prof('EndTar', uid=self._uid) #Create Untar Stage repo = git.Repo('.', search_parent_directories=True) aux_function_path = repo.working_tree_dir untar_stg = Stage() untar_stg.name = 'untarStg' #Untar Task untar_tsk = Task() untar_tsk.name = 'untartsk' untar_tsk.executable = ['python'] untar_tsk.upload_input_data = [ str(aux_function_path)+'/repex/untar_input_files.py', 'input_files.tar' ] untar_tsk.arguments = ['untar_input_files.py', 'input_files.tar'] untar_tsk.cpu_reqs = 1 #untar_tsk.post_exec = [''] untar_stg.add_tasks(untar_tsk) p.add_stages(untar_stg) tar_dict[0] = '$Pipeline_%s_Stage_%s_Task_%s' % ( p.name, untar_stg.name, untar_tsk.name) # First MD stage: needs to be defined separately since workflow is not built from a predetermined order, also equilibration needs to happen first. md_stg = Stage() md_stg.name = 'mdstg0' self._prof.prof('InitMD_0', uid=self._uid) # MD tasks for r in range(replicas): md_tsk = AMBERTask(cores=replica_cores, md_executable=md_executable, pre_exec=pre_exec) md_tsk.name = 'mdtsk-{replica}-{cycle}'.format(replica=r, cycle=0) md_tsk.link_input_data += [ '%s/inpcrd' % tar_dict[0], '%s/prmtop' % tar_dict[0], '%s/mdin_{0}'.format(r) % tar_dict[0] #Use for full temperature exchange ] md_tsk.arguments = [ '-O', '-p', 'prmtop', '-i', 'mdin_{0}'.format(r), '-c', 'inpcrd', '-o', 'out-{replica}-{cycle}'.format(replica=r, cycle=0), '-r', 'restrt'.format(replica=r, cycle=0), #'-r', 'rstrt-{replica}-{cycle}'.format(replica=r,cycle=0), '-x', 'mdcrd-{replica}-{cycle}'.format(replica=r, cycle=0), #'-o', '$NODE_LFS_PATH/out-{replica}-{cycle}'.format(replica=r,cycle=0), #'-r', '$NODE_LFS_PATH/rstrt-{replica}-{cycle}'.format(replica=r,cycle=0), #'-x', '$NODE_LFS_PATH/mdcrd-{replica}-{cycle}'.format(replica=r,cycle=0), '-inf', 'mdinfo_{0}'.format(r) ] md_dict[r] = '$Pipeline_%s_Stage_%s_Task_%s' % ( p.name, md_stg.name, md_tsk.name) md_stg.add_tasks(md_tsk) self.md_task_list.append(md_tsk) #print md_tsk.uid p.add_stages(md_stg) #stage_uids.append(md_stg.uid) # First Exchange Stage ex_stg = Stage() ex_stg.name = 'exstg0' self._prof.prof('InitEx_0', uid=self._uid) # Create Exchange Task ex_tsk = Task() ex_tsk.name = 'extsk0' #ex_tsk.pre_exec = ['module load python/2.7.10'] ex_tsk.executable = [python_path] ex_tsk.upload_input_data = [exchange_method] for r in range(replicas): ex_tsk.link_input_data += ['%s/mdinfo_%s' % (md_dict[r], r)] ex_tsk.pre_exec = ['mv *.py exchange_method.py'] ex_tsk.arguments = ['exchange_method.py', '{0}'.format(replicas), '0'] ex_tsk.cores = 1 ex_tsk.mpi = False ex_tsk.download_output_data = ['exchangePairs_0.dat'] ex_stg.add_tasks(ex_tsk) #task_uids.append(ex_tsk.uid) p.add_stages(ex_stg) self.ex_task_list.append(ex_tsk) #self.ex_task_uids.append(ex_tsk.uid) self.book.append(md_dict) return p
def test_rp_da_scheduler_bw(): """ **Purpose**: Run an EnTK application on localhost """ p1 = Pipeline() p1.name = 'p1' n = 10 s1 = Stage() s1.name = 's1' for x in range(n): t = Task() t.name = 't%s'%x t.executable = ['/bin/hostname'] t.arguments = ['>','hostname.txt'] t.cpu_reqs['processes'] = 1 t.cpu_reqs['threads_per_process'] = 16 t.cpu_reqs['thread_type'] = '' t.cpu_reqs['process_type'] = '' t.lfs_per_process = 10 t.download_output_data = ['hostname.txt > s1_t%s_hostname.txt'%(x)] s1.add_tasks(t) p1.add_stages(s1) s2 = Stage() s2.name = 's2' for x in range(n): t = Task() t.executable = ['/bin/hostname'] t.arguments = ['>','hostname.txt'] t.cpu_reqs['processes'] = 1 t.cpu_reqs['threads_per_process'] = 16 t.cpu_reqs['thread_type'] = '' t.cpu_reqs['process_type'] = '' t.download_output_data = ['hostname.txt > s2_t%s_hostname.txt'%(x)] t.tag = 't%s'%x s2.add_tasks(t) p1.add_stages(s2) res_dict = { 'resource' : 'ncsa.bw_aprun', 'walltime' : 10, 'cpus' : 128, 'project' : 'gk4', 'queue' : 'high' } os.environ['RADICAL_PILOT_DBURL'] = MLAB appman = AppManager(hostname=hostname, port=port) appman.resource_desc = res_dict appman.workflow = [p1] appman.run() for i in range(n): assert open('s1_t%s_hostname.txt'%i,'r').readline().strip() == open('s2_t%s_hostname.txt'%i,'r').readline().strip() txts = glob('%s/*.txt' % os.getcwd()) for f in txts: os.remove(f)
def test_task_to_dict(): """ **Purpose**: Test if the 'to_dict' function of Task class converts all expected attributes of the Task into a dictionary """ t = Task() d = t.to_dict() assert d == { 'uid': None, 'name': None, 'state': states.INITIAL, 'state_history': [states.INITIAL], 'pre_exec': [], 'executable': str(), 'arguments': [], 'post_exec': [], 'cpu_reqs': { 'processes': 1, 'process_type': None, 'threads_per_process': 1, 'thread_type': None }, 'gpu_reqs': { 'processes': 0, 'process_type': None, 'threads_per_process': 0, 'thread_type': None }, 'lfs_per_process': 0, 'upload_input_data': [], 'copy_input_data': [], 'link_input_data': [], 'move_input_data': [], 'copy_output_data': [], 'move_output_data': [], 'download_output_data': [], 'stdout': None, 'stderr': None, 'exit_code': None, 'path': None, 'tag': None, 'parent_stage': {'uid':None, 'name': None}, 'parent_pipeline': {'uid':None, 'name': None}} t = Task() t.uid = 'test.0000' t.name = 'new' t.pre_exec = ['module load abc'] t.executable = ['sleep'] t.arguments = ['10'] t.cpu_reqs['processes'] = 10 t.cpu_reqs['threads_per_process'] = 2 t.gpu_reqs['processes'] = 5 t.gpu_reqs['threads_per_process'] = 3 t.lfs_per_process = 1024 t.upload_input_data = ['test1'] t.copy_input_data = ['test2'] t.link_input_data = ['test3'] t.move_input_data = ['test4'] t.copy_output_data = ['test5'] t.move_output_data = ['test6'] t.download_output_data = ['test7'] t.stdout = 'out' t.stderr = 'err' t.exit_code = 1 t.path = 'a/b/c' t.tag = 'task.0010' t.parent_stage = {'uid': 's1', 'name': 'stage1'} t.parent_pipeline = {'uid': 'p1', 'name': 'pipeline1'} d = t.to_dict() assert d == { 'uid': 'test.0000', 'name': 'new', 'state': states.INITIAL, 'state_history': [states.INITIAL], 'pre_exec': ['module load abc'], 'executable': 'sleep', 'arguments': ['10'], 'post_exec': [], 'cpu_reqs': { 'processes': 10, 'process_type': None, 'threads_per_process': 2, 'thread_type': None }, 'gpu_reqs': { 'processes': 5, 'process_type': None, 'threads_per_process': 3, 'thread_type': None }, 'lfs_per_process': 1024, 'upload_input_data': ['test1'], 'copy_input_data': ['test2'], 'link_input_data': ['test3'], 'move_input_data': ['test4'], 'copy_output_data': ['test5'], 'move_output_data': ['test6'], 'download_output_data': ['test7'], 'stdout': 'out', 'stderr': 'err', 'exit_code': 1, 'path': 'a/b/c', 'tag': 'task.0010', 'parent_stage': {'uid': 's1', 'name': 'stage1'}, 'parent_pipeline': {'uid': 'p1', 'name': 'pipeline1'}} t.executable = 'sleep' d = t.to_dict() assert d == { 'uid': 'test.0000', 'name': 'new', 'state': states.INITIAL, 'state_history': [states.INITIAL], 'pre_exec': ['module load abc'], 'executable': 'sleep', 'arguments': ['10'], 'post_exec': [], 'cpu_reqs': { 'processes': 10, 'process_type': None, 'threads_per_process': 2, 'thread_type': None }, 'gpu_reqs': { 'processes': 5, 'process_type': None, 'threads_per_process': 3, 'thread_type': None }, 'lfs_per_process': 1024, 'upload_input_data': ['test1'], 'copy_input_data': ['test2'], 'link_input_data': ['test3'], 'move_input_data': ['test4'], 'copy_output_data': ['test5'], 'move_output_data': ['test6'], 'download_output_data': ['test7'], 'stdout': 'out', 'stderr': 'err', 'exit_code': 1, 'path': 'a/b/c', 'tag': 'task.0010', 'parent_stage': {'uid': 's1', 'name': 'stage1'}, 'parent_pipeline': {'uid': 'p1', 'name': 'pipeline1'}}