Exemplo n.º 1
0
def generate_pipeline():

    p = Pipeline()
    s1 = Stage()

    t1 = Task()
    t1.executable = '/bin/sleep'
    t1.arguments = ['3']

    s1.add_tasks(t1)

    p.add_stages(s1)
    s2 = Stage()
    t2 = Task()
    t2.executable = '/bin/sleep'
    t2.arguments = ['3']

    s2.add_tasks(t2)
    p.add_stages(s2)
    s3 = Stage()

    t3 = Task()
    t3.executable = '/bin/sleep'
    t3.arguments = ['3']

    s3.add_tasks(t3)
    p.add_stages(s3)

    return p
    def describe_MD_stages():

        # Docking stage
        s1 = Stage()
        s1.name = 'Docking.%d' % CUR_NEW_STAGE

        # Docking task
        t1 = Task()
        t1.executable = ['sleep']
        t1.arguments = ['3']

        # Add the Docking task to the Docking Stage
        s1.add_tasks(t1)

        # MD stage
        s2 = Stage()
        s2.name = 'Simulation.%d' % CUR_NEW_STAGE

        # Each Task() is an OpenMM executable that will run on a single GPU.
        # Set sleep time for local testing
        for i in range(6):
            t2 = Task()
            t2.executable = ['sleep']
            t2.arguments = ['5']

            # Add the MD task to the Docking Stage
            s2.add_tasks(t2)

        # Add post-exec to the Stage
        s2.post_exec = func_condition

        return [s1, s2]
Exemplo n.º 3
0
def generate_pipeline(nid):

    p       = Pipeline()
    s1      = Stage()
    s2      = Stage()
    t1      = Task()

    p.name  = 'p%s' % nid
    s1.name = 's1'
    s2.name = 's2'
    t1.name = 't1'

    t1.executable = '/bin/echo'
    t1.arguments  = ['hello']

    s1.add_tasks(t1)
    p.add_stages(s1)

    for cnt in range(10):

        tn            = Task()
        tn.name       = 't%s' % (cnt + 1)
        tn.executable = '/bin/echo'
        tn.arguments  = ['world']

        # Copy data from the task in first stage to the current task's location
        tn.copy_input_data = ['$Pipeline_%s_Stage_%s_Task_%s/output.txt'
                              % (p.name, s1.name, t1.name)]
        s2.add_tasks(tn)

    p.add_stages(s2)

    return p
Exemplo n.º 4
0
def main():

    cmd = "{0} 'ls {1}'".format(ssh, dir_)
    p = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE)
    out, _ = p.communicate()

    out = out.decode('utf-8').strip().split(linesep)

    fullpaths = [op.join(dir_, p) for p in out]
    print(fullpaths)

    # Start radical entk pipeline

    p = Pipeline()

    for i in range(iterations):

        s = Stage()

        for fp in fullpaths:

            t = Task()
            t.name = 'Incrementation {}'.format(i)
            t.pre_exec = [
                'source /home/vhayot/miniconda3/etc/profile.d/conda.sh',
                'conda activate radenv'
            ]
            t.executable = 'python /home/vhayot/inc.py'

            if i == 0:
                t.arguments = [fp, out_dir, i]
            else:
                # Note: assuming all data is accessible through shared dir
                # radical entk functions without sharedfs, however
                t.arguments = [
                    op.join(out_dir,
                            "it-{0}-{1}".format(i - 1, op.basename(fp))),
                    out_dir, i
                ]

            s.add_tasks(t)

        # Create a new stage everytime there's a dependency
        p.add_stages(s)

    appman = AppManager(hostname=hostname, port=port)

    appman.resource_desc = {
        'resource': 'xsede.bridges',
        'walltime': 20,
        'cpus': 5,
        'project': 'mc3bggp',
        'schema': 'gsissh'
    }

    appman.workflow = set([p])

    appman.run()
def get_pipeline(shared_fs=False, size=1):

    p = Pipeline()
    p.name = 'p'

    n = 4

    s1 = Stage()
    s1.name = 's1'
    for x in range(n):
        t = Task()
        t.name = 't%s'%x

        # dd if=/dev/random bs=<byte size of a chunk> count=<number of chunks> of=<output file name>

        t.executable = 'dd'

        if not shared_fs:
            t.arguments = ['if=/dev/urandom','bs=%sM'%size, 'count=1', 'of=$NODE_LFS_PATH/s1_t%s.txt'%x]
        else:
            t.arguments = ['if=/dev/urandom','bs=%sM'%size, 'count=1', 'of=/home/vivek91/s1_t%s.txt'%x]

        t.cpu_reqs['processes'] = 1
        t.cpu_reqs['threads_per_process'] = 24
        t.cpu_reqs['thread_type'] = ''
        t.cpu_reqs['process_type'] = ''
        t.lfs_per_process = 1024

        s1.add_tasks(t)

    p.add_stages(s1)

    s2 = Stage()
    s2.name = 's2'
    for x in range(n):
        t = Task()
        t.executable = ['dd']

        if not shared_fs:
            t.arguments = ['if=$NODE_LFS_PATH/s1_t%s.txt'%x,'bs=%sM'%size, 'count=1', 'of=$NODE_LFS_PATH/s2_t%s.txt'%x]
        else:
            t.arguments = ['if=/home/vivek91/s1_t%s.txt'%x,'bs=%sM'%size, 'count=1', 'of=/home/vivek91/s2_t%s.txt'%x]

        t.cpu_reqs['processes'] = 1
        t.cpu_reqs['threads_per_process'] = 24
        t.cpu_reqs['thread_type'] = ''
        t.cpu_reqs['process_type'] = ''
        t.tag = 't%s'%x

        s2.add_tasks(t)


    p.add_stages(s2)

    return p
Exemplo n.º 6
0
def get_pipeline(shared_fs=False, size=1):

    p = Pipeline()
    p.name = 'p'

    n = 4

    s1 = Stage()
    s1.name = 's1'
    for x in range(n):
        t = Task()
        t.name = 't%s'%x

        # dd if=/dev/random bs=<byte size of a chunk> count=<number of chunks> of=<output file name>

        t.executable = ['dd']

        if not shared_fs:
            t.arguments = ['if=/dev/urandom','bs=%sM'%size, 'count=1', 'of=$NODE_LFS_PATH/s1_t%s.txt'%x]
        else:
            t.arguments = ['if=/dev/urandom','bs=%sM'%size, 'count=1', 'of=/home/vivek91/s1_t%s.txt'%x]

        t.cpu_reqs['processes'] = 1        
        t.cpu_reqs['threads_per_process'] = 24
        t.cpu_reqs['thread_type'] = ''
        t.cpu_reqs['process_type'] = ''
        t.lfs_per_process = 1024

        s1.add_tasks(t)

    p.add_stages(s1)

    s2 = Stage()
    s2.name = 's2'
    for x in range(n):
        t = Task()
        t.executable = ['dd']

        if not shared_fs:
            t.arguments = ['if=$NODE_LFS_PATH/s1_t%s.txt'%x,'bs=%sM'%size, 'count=1', 'of=$NODE_LFS_PATH/s2_t%s.txt'%x]
        else:
            t.arguments = ['if=/home/vivek91/s1_t%s.txt'%x,'bs=%sM'%size, 'count=1', 'of=/home/vivek91/s2_t%s.txt'%x]

        t.cpu_reqs['processes'] = 1
        t.cpu_reqs['threads_per_process'] = 24
        t.cpu_reqs['thread_type'] = ''
        t.cpu_reqs['process_type'] = ''
        t.tag = 't%s'%x

        s2.add_tasks(t)


    p.add_stages(s2)    

    return p
Exemplo n.º 7
0
def generate_pipeline():

    # Create a Pipeline object
    p = Pipeline()

    # Create a Stage object
    s1 = Stage()

    # Create a Task object which creates a file named 'output.txt' of size 1 MB
    t1 = Task()
    t1.executable = '/bin/bash'
    t1.arguments = ['-l', '-c', 'base64 /dev/urandom | head -c 1000000 > output.txt']

    # Add the Task to the Stage
    s1.add_tasks(t1)

    # Add Stage to the Pipeline
    p.add_stages(s1)

    # Create another Stage object to hold character count tasks
    s2 = Stage()

    # Create a Task object
    t2 = Task()
    t2.executable = '/bin/bash'
    t2.arguments = ['-l', '-c', 'grep -o . output.txt | sort | uniq -c > ccount.txt']
    # Copy data from the task in the first stage to the current task's location
    t2.copy_input_data = ['$Pipline_%s_Stage_%s_Task_%s/output.txt' % (p.uid, s1.uid, t1.uid)]

    # Add the Task to the Stage
    s2.add_tasks(t2)

    # Add Stage to the Pipeline
    p.add_stages(s2)

    # Create another Stage object to hold checksum tasks
    s3 = Stage()

    # Create a Task object
    t3 = Task()
    t3.executable = '/bin/bash'
    t3.arguments = ['-l', '-c', 'sha1sum ccount.txt > chksum.txt']
    # Copy data from the task in the first stage to the current task's location
    t3.copy_input_data = ['$Pipline_%s_Stage_%s_Task_%s/ccount.txt' % (p.uid, s2.uid, t2.uid)]
    # Download the output of the current task to the current location
    t3.download_output_data = ['chksum.txt > chksum_%s.txt' % cnt]

    # Add the Task to the Stage
    s3.add_tasks(t3)

    # Add Stage to the Pipeline
    p.add_stages(s3)

    return p
Exemplo n.º 8
0
def get_pipeline(tasks):

    # Create a Pipeline object
    p = Pipeline()

    # Create a Stage 1
    s1 = Stage()

    # Create a Task object according to the app_name
    t1 = Task()
    t1.pre_exec = ['module load gromacs/5.0/INTEL-140-MVAPICH2-2.0']
    t1.executable = app_coll['grompp']['executable']
    t1.arguments = app_coll['grompp']['arguments']
    t1.cores = app_coll['grompp']['cores']
    t1.link_input_data = [
        '$SHARED/grompp.mdp > grompp.mdp', '$SHARED/input.gro > input.gro',
        '$SHARED/topol.top > topol.top'
    ]

    # Add the Task to the Stage
    s1.add_tasks(t1)

    # Add Stage to the Pipeline
    p.add_stages(s1)

    # Create a Stage 2
    s2 = Stage()

    for cnt in range(tasks):

        # Create a Task object according to the app_name
        t2 = Task()
        t2.pre_exec = [
            'module load gromacs/5.0/INTEL-140-MVAPICH2-2.0',
            'export OMP_NUM_THREADS=%s' % num_cores
        ]
        t2.executable = app_coll['mdrun']['executable']
        t2.arguments = app_coll['mdrun']['arguments']
        #t2.cores = app_coll['mdrun']['cores']
        t2.cores = num_cores
        t2.copy_input_data = [
            '$Pipeline_%s_Stage_%s_Task_%s/topol.tpr' % (p.uid, s1.uid, t1.uid)
        ]

        # Add the Task to the Stage
        s2.add_tasks(t2)

    # Add Stage to the Pipeline
    p.add_stages(s2)

    return p
Exemplo n.º 9
0
def generate_pipeline(nid):

    # Create a Pipeline object
    p = Pipeline()
    p.name = 'p%s' % nid

    # Create a Stage object
    s1 = Stage()
    s1.name = 's1'

    # Create a Task object which creates a file named 'output.txt' of size 1 MB
    t1 = Task()
    t1.name = 't2'
    t1.executable = ['/bin/echo']
    t1.arguments = ['hello']

    # Add the Task to the Stage
    s1.add_tasks(t1)

    # Add Stage to the Pipeline
    p.add_stages(s1)

    # Create another Stage object to hold character count tasks
    s2 = Stage()
    s2.name = 's2'
    s2_task_uids = []

    for cnt in range(10):

        # Create a Task object
        t2 = Task()
        t2.name = 't%s' % (cnt + 1)
        t2.executable = ['/bin/echo']
        t2.arguments = ['world']
        # Copy data from the task in the first stage to the current task's location
        t2.copy_input_data = [
            '$Pipeline_%s_Stage_%s_Task_%s/output.txt' %
            (p.name, s1.name, t1.name)
        ]

        # Add the Task to the Stage
        s2.add_tasks(t2)
        s2_task_uids.append(t2.name)

    # Add Stage to the Pipeline
    p.add_stages(s2)

    return p
Exemplo n.º 10
0
    def _task(self, pipeline_id, model_id, time_stamp):

        # Specify training hyperparameters
        # Select latent dimension for CVAE [3, ... self.num_ml]
        latent_dim = 3 + model_id
        epochs = 100
        batch_size = 512

        cvae_dir = f'{self.prefix}/data/ml/pipeline-{pipeline_id}'
        cm_data_path = f'{self.prefix}/data/preproc/pipeline-{pipeline_id}/cvae-input.h5'

        task = Task()

        self.load_environment(task)
        self.set_python_executable(task)
        self.assign_hardware(task)

        # Create output directory for generated files.
        task.pre_exec.extend([f'mkdir -p {cvae_dir}'])

        # Specify python ML task with arguments
        task.arguments = [f'{self.prefix}/examples/cvae_dbscan/scripts/cvae.py',
                          '--input', cm_data_path,
                          '--out', cvae_dir,
                          '--model_id', f'{model_id}',
                          '--epochs', f'{epochs}',
                          '--batch_size', f'{batch_size}',
                          '--latent_dim', f'{latent_dim}']
        
        return task
        def add_md_stg(rid,cycle):
            #md stg h
            md_tsk = Task()
            md_stg = Stage()
            md_tsk.name = 'mdtsk-{replica}-{cycle}'.format(replica=rid, cycle=cycle)
            md_tsk.link_input_data += ['%s/inpcrd' %replica_sandbox, 
                                   '%s/prmtop' %replica_sandbox, 
                                   '%s/mdin-{replica}-{cycle}'.format(replica=rid, cycle=0) %replica_sandbox]
            md_tsk.arguments = ['-O', 
                            '-i',   'mdin-{replica}-{cycle}'.format(replica=rid, cycle=0), 
                            '-p',   'prmtop', 
                            '-c',   'inpcrd', 
                            '-o',   'out',
                            '-r',   '%s/restrt-{replica}-{cycle}'.format(replica=rid, cycle=cycle) %replica_sandbox,
                            '-x',   'mdcrd',
                            '-inf', '%s/mdinfo-{replica}-{cycle}'.format(replica=rid, cycle=cycle) %replica_sandbox]
            md_tsk.executable = ['/home/scm177/mantel/AMBER/amber14/bin/sander']
            md_tsk.cpu_reqs = {
                            'processes': replica_cores,
                            'process_type': '',
                            'threads_per_process': 1,
                            'thread_type': None
                               }
            md_tsk.pre_exec   = ['export dummy_variable=19', 'echo $SHARED']
         
            md_stg.add_tasks(md_tsk)
            md_stg.post_exec = {
                            'condition': md_post,
                            'on_true': suspend,
                            'on_false': exchange_stg
                          } 

            return md_stg
Exemplo n.º 12
0
    def generate_aggregating_task(self): 
        """ 
        Function to concatenate the MD trajectory (h5 contact map) 
        """ 
        p = Pipeline() 
        p.name = 'aggragating' 
        s2 = Stage()
        s2.name = 'aggregating'

        # Aggregation task
        t2 = Task()
        # https://github.com/radical-collaboration/hyperspace/blob/MD/microscope/experiments/MD_to_CVAE/MD_to_CVAE.py
        t2.pre_exec = [] 
        t2.pre_exec += ['. /sw/summit/python/2.7/anaconda2/5.3.0/etc/profile.d/conda.sh']
        t2.pre_exec += ['conda activate %s' % conda_path] 
        t2.pre_exec += ['cd %s' % agg_path]
        t2.executable = ['%s/bin/python' % conda_path]  # MD_to_CVAE.py
        t2.arguments = [
                '%s/MD_to_CVAE.py' % agg_path, 
                '--sim_path', md_path, 
                '--train_frames', 100000]

        # assign hardware the task 
        t2.cpu_reqs = {
                'processes': 1,
                'process_type': None,
                'threads_per_process': 4,
                'thread_type': 'OpenMP'
                }
        # Add the aggregation task to the aggreagating stage
        s2.add_tasks(t2)
        p.add_stages(s2) 
        return p
Exemplo n.º 13
0
def generate_pipeline():

    # Create a Pipeline object
    p = Pipeline()

    # Create a Stage object
    s1 = Stage()

    # Create a Task object which creates a file named 'output.txt' of size 1 MB
    for x in range(10):
        t1 = Task()
        t1.executable = 'cat'
        t1.arguments = ['file1.txt', 'file2.txt', '>', 'output.txt']
        t1.copy_input_data = ['$SHARED/file1.txt', '$SHARED/file2.txt']
        t1.download_output_data = [
            'output.txt > %s/output_%s.txt' % (cur_dir, x + 1)
        ]

        # Add the Task to the Stage
        s1.add_tasks(t1)

    # Add Stage to the Pipeline
    p.add_stages(s1)

    return p
Exemplo n.º 14
0
    def func_on_true():

        global CUR_NEW_STAGE, CUR_TASKS, CUR_CORES, duration

        CUR_NEW_STAGE += 1

        s = Stage()

        for i in range(CUR_TASKS):
            t = Task()
            t.pre_exec = [
                'export PATH=/u/sciteam/balasubr/modules/stress-ng-0.09.34:$PATH'
            ]
            t.executable = ['stress-ng']
            t.arguments = ['-c', str(CUR_CORES), '-t', str(duration)]
            t.cpu_reqs = {
                'processes': 1,
                'process_type': '',
                'threads_per_process': CUR_CORES,
                'thread_type': ''
            }

            # Add the Task to the Stage
            s.add_tasks(t)

        # Add post-exec to the Stage
        s.post_exec = {
            'condition': func_condition,
            'on_true': func_on_true,
            'on_false': func_on_false
        }

        p.add_stages(s)
    def generate_outlier_detection_stage(self) -> Stage:
        stage = Stage()
        stage.name = "outlier_detection"
        cfg = self.cfg.od_stage

        task = Task()
        task.cpu_reqs = cfg.cpu_reqs.dict()
        task.gpu_reqs = cfg.gpu_reqs.dict()
        task.pre_exec = cfg.pre_exec
        task.executable = cfg.executable
        task.arguments = cfg.arguments

        self.outlier_pdbs_path(self.cur_iteration).mkdir()

        # Update base parameters
        cfg.run_config.experiment_directory = self.cfg.experiment_directory
        cfg.run_config.input_path = self.aggregated_data_path(
            self.cur_iteration)
        cfg.run_config.output_path = self.outlier_pdbs_path(self.cur_iteration)
        cfg.run_config.weights_path = self.latest_ml_checkpoint_path(
            self.cur_iteration)
        cfg.run_config.restart_points_path = self.restart_points_path(
            self.cur_iteration)

        cfg_path = self.experiment_dirs["od_runs"].joinpath(
            f"od_{self.cur_iteration:03d}.yaml")
        cfg.run_config.dump_yaml(cfg_path)

        task.arguments += ["-c", cfg_path]
        stage.add_tasks(task)

        return stage
def generate_pipeline(name, stages):

    # Create a Pipeline object
    p = Pipeline()
    p.name = name


    for s_cnt in range(stages):

        # Create a Stage object
        s = Stage()
        s.name = 'Stage %s'%s_cnt

        for t_cnt in range(5):

            # Create a Task object
            t = Task()
            t.name = 'my-task'        # Assign a name to the task (optional)
            t.executable = '/bin/echo'   # Assign executable to the task
            # Assign arguments for the task executable
            t.arguments = ['I am task %s in %s in %s'%(t_cnt, s_cnt, name)]

            # Add the Task to the Stage
            s.add_tasks(t)

        # Add Stage to the Pipeline
        p.add_stages(s)

    return p
Exemplo n.º 17
0
 def post_stage():
     if (not os.path.exists(f'{run_dir}/aggregator/stop.aggregator')):
         nstages = len(p.stages)
         s = Stage()
         s.name = f"{nstages}"
         t = Task()
         t.cpu_reqs = {
             'processes': 1,
             'process_type': None,
             'threads_per_process': 4,
             'thread_type': 'OpenMP'
         }
         t.gpu_reqs = {
             'processes': 0,
             'process_type': None,
             'threads_per_process': 0,
             'thread_type': None
         }
         t.name = f" {i}_{nstages} "
         t.executable = PYTHON
         t.arguments = [
             f'{current_dir}/simulation.py',
             f'{run_dir}/simulations/all/{i}_{nstages}', ADIOS_XML
         ]
         subprocess.getstatusoutput(
             f'ln -s  {run_dir}/simulations/all/{i}_{nstages} {run_dir}/simulations/new/{i}_{nstages}'
         )
         s.add_tasks(t)
         s.post_exec = post_stage
         p.add_stages(s)
Exemplo n.º 18
0
 def add_ex_stg(rid, cycle):
     #ex stg here
     ex_tsk = Task()
     ex_stg = Stage()
     ex_tsk.name = 'extsk-{replica}-{cycle}'.format(replica=rid, cycle=cycle)
     for rid in range(len(waiting_replicas)):
         ex_tsk.link_input_data += ['%s/mdinfo-{replica}-{cycle}'.format(replica=rid, cycle=self.cycle)%replica_sandbox]
        
     ex_tsk.arguments = ['t_ex_gibbs.py', len(waiting_replicas)] #This needs to be fixed
     ex_tsk.executable = ['python']
     ex_tsk.cpu_reqs = {
                    'processes': 1,
                    'process_type': '',
                    'threads_per_process': 1,
                    'thread_type': None
                     }
     ex_tsk.pre_exec   = ['export dummy_variable=19']
      
     ex_stg.add_tasks(ex_tsk)
     ex_stg.post_exec = {
                     'condition': post_ex,
                     'on_true': terminate_replicas,
                     'on_false': continue_md
                   } 
     return ex_stg
Exemplo n.º 19
0
    def generate_aggregating_stage():
        """ 
        Function to concatenate the MD trajectory (h5 contact map) 
        """
        s2 = Stage()
        s2.name = 'aggregating'

        # Aggregation task
        t2 = Task()
        # https://github.com/radical-collaboration/hyperspace/blob/MD/microscope/experiments/MD_to_CVAE/MD_to_CVAE.py
        t2.pre_exec = []

        t2.pre_exec += [
            '. /sw/summit/python/2.7/anaconda2/5.3.0/etc/profile.d/conda.sh'
        ]
        t2.pre_exec += ['conda activate rp.copy']
        t2.pre_exec += [
            'cd /gpfs/alpine/bip179/scratch/hrlee/hyperspace/microscope/experiments/MD_to_CVAE'
        ]
        t2.executable = ['/ccs/home/hrlee/.conda/envs/rp.copy/bin/python'
                         ]  # MD_to_CVAE.py
        t2.arguments = [
            '/gpfs/alpine/bip179/scratch/hrlee/hyperspace/microscope/experiments/MD_to_CVAE/MD_to_CVAE.py',
            '-f',
            '/gpfs/alpine/bip179/scratch/hrlee/hyperspace/microscope/experiments/MD_exps/fs-pep'
        ]

        # Add the aggregation task to the aggreagating stage
        s2.add_tasks(t2)
        return s2
    def generate_ml_stage(self) -> Stage:
        stage = Stage()
        stage.name = "learning"
        cfg = self.cfg.ml_stage

        task = Task()
        task.cpu_reqs = cfg.cpu_reqs.dict()
        task.gpu_reqs = cfg.gpu_reqs.dict()
        task.pre_exec = cfg.pre_exec
        task.executable = cfg.executable
        task.arguments = cfg.arguments

        # Update base parameters
        cfg.run_config.input_path = self.aggregated_data_path(
            self.cur_iteration)
        cfg.run_config.output_path = self.model_path(self.cur_iteration)
        if self.cur_iteration > 0:
            cfg.run_config.init_weights_path = self.latest_ml_checkpoint_path(
                self.cur_iteration - 1)

        cfg_path = self.experiment_dirs["ml_runs"].joinpath(
            f"ml_{self.cur_iteration:03d}.yaml")
        cfg.run_config.dump_yaml(cfg_path)

        task.arguments += ["-c", cfg_path]
        stage.add_tasks(task)

        return stage
def generate_pipeline():

    global CUR_TASKS, CUR_CORES, duration, MAX_NEW_STAGE

    def func_condition():

        global CUR_NEW_STAGE, MAX_NEW_STAGE

        if CUR_NEW_STAGE < MAX_NEW_STAGE:
            return True

        return False

    def func_on_true():

        global CUR_NEW_STAGE
        CUR_NEW_STAGE += 1

        for t in p.stages[CUR_NEW_STAGE].tasks:
            cores = randint(1,16)
            t.arguments = ['-c', str(cores), '-t', str(duration)]

    def func_on_false():
        print 'Done'

    # Create a Pipeline object
    p = Pipeline()

    for s in range(MAX_NEW_STAGE+1):

        # Create a Stage object
        s1 = Stage()

        for i in range(CUR_TASKS):

            t1 = Task()
            t1.pre_exec = ['export PATH=/u/sciteam/balasubr/modules/stress-ng-0.09.34:$PATH']
            t1.executable = ['stress-ng']
            t1.arguments = [ '-c', str(CUR_CORES), '-t', str(duration)]
            t1.cpu_reqs = {
                            'processes': 1,
                            'process_type': '',
                            'threads_per_process': CUR_CORES,
                            'thread_type': ''
                        }

            # Add the Task to the Stage
            s1.add_tasks(t1)

        # Add post-exec to the Stage
        s1.post_exec = {
                           'condition': func_condition,
                           'on_true': func_on_true,
                           'on_false': func_on_false
                       }

        # Add Stage to the Pipeline
        p.add_stages(s1)

    return p
Exemplo n.º 22
0
    def esmacs(self, rct_stage, stage, outdir="equilibration", name=None):

        for i in range(1, 13):
            t = Task()
            t.pre_exec = [
                "export WDIR=\"{}/{}\"".format(self.run_dir, name),
                ". {}".format(self.conda_init),
                "conda activate {}".format(self.esmacs_tenv),
                "module load {}".format(self.esmacs_tmodules),
                "mkdir -p $WDIR/replicas/rep{}/{}".format(i, outdir),
                "cd $WDIR/replicas/rep{}/{}".format(i, outdir),
                "rm -f {}.log {}.xml {}.dcd {}.chk".format(
                    stage, stage, stage, stage), "export OMP_NUM_THREADS=1"
            ]
            # t.executable = '/ccs/home/litan/miniconda3/envs/wf3/bin/python3.7'
            t.executable = 'python3'
            t.arguments = ['$WDIR/{}.py'.format(stage)]
            t.post_exec = []
            t.cpu_reqs = {
                'processes': 1,
                'process_type': None,
                'threads_per_process': 4,
                'thread_type': 'OpenMP'
            }
            t.gpu_reqs = {
                'processes': 1,
                'process_type': None,
                'threads_per_process': 1,
                'thread_type': 'CUDA'
            }
            getattr(self, rct_stage).add_tasks(t)
            print(getattr(self, rct_stage).to_dict())
    def generate_aggregating_stage(self) -> Stage:
        stage = Stage()
        stage.name = "aggregating"
        cfg = self.cfg.aggregation_stage

        # Aggregation task
        task = Task()

        task.cpu_reqs = cfg.cpu_reqs.dict()
        task.pre_exec = cfg.pre_exec
        task.executable = cfg.executable
        task.arguments = cfg.arguments

        # Update base parameters
        cfg.run_config.experiment_directory = self.cfg.experiment_directory
        cfg.run_config.output_path = self.aggregated_data_path(
            self.cur_iteration)

        cfg_path = self.experiment_dirs["aggregation_runs"].joinpath(
            f"aggregation_{self.cur_iteration:03d}.yaml")
        cfg.run_config.dump_yaml(cfg_path)

        task.arguments += ["-c", cfg_path]
        stage.add_tasks(task)

        return stage
Exemplo n.º 24
0
def generate_pipeline(name, stages):

    # Create a Pipeline object
    p = Pipeline()
    p.name = name

    for s_cnt in range(stages):

        # Create a Stage object
        s = Stage()
        s.name = 'Stage %s' % s_cnt

        for t_cnt in range(5):

            # Create a Task object
            t = Task()
            t.name = 'my-task'  # Assign a name to the task (optional)
            t.executable = '/bin/echo'  # Assign executable to the task
            # Assign arguments for the task executable
            t.arguments = ['I am task %s in %s in %s' % (t_cnt, s_cnt, name)]

            # Add the Task to the Stage
            s.add_tasks(t)

        # Add Stage to the Pipeline
        p.add_stages(s)

    return p
Exemplo n.º 25
0
    def tasks(self, pipeline_id):
        """
        Returns
        -------
        Set of tasks to be added to the preprocessing stage.

        """
        md_dir = f'{self.prefix}/data/md/pipeline-{pipeline_id}'
        preproc_dir = f'{self.prefix}/data/preproc/pipeline-{pipeline_id}'

        task = Task()

        self.load_environment(task)
        self.set_python_executable(task)
        self.assign_hardware(task)

        # Create output directory for generated files.
        task.pre_exec.extend([f'mkdir -p {preproc_dir}'])

        # Specify python preprocessing task with arguments
        task.arguments = [
            f'{self.prefix}/examples/cvae_dbscan/scripts/contact_map.py',
            '--sim_path', md_dir, '--out', preproc_dir
        ]

        return {task}
Exemplo n.º 26
0
    def generate_aggregating_stage():
        """ 
        Function to concatenate the MD trajectory (h5 contact map) 
        """
        s2 = Stage()
        s2.name = 'aggregating'

        # Aggregation task
        t2 = Task()
        # https://github.com/radical-collaboration/hyperspace/blob/MD/microscope/experiments/MD_to_CVAE/MD_to_CVAE.py
        t2.pre_exec = []
        #t2.pre_exec += ['. /sw/summit/python/2.7/anaconda2/5.3.0/etc/profile.d/conda.sh']
        #t2.pre_exec += ['conda activate %s' % conda_path]
        t2.pre_exec += ['module unload python']
        t2.pre_exec += ['module load ibm-wml-ce']
        t2.pre_exec += ['cd %s' % agg_path]
        #t2.executable = ['%s/bin/python' % conda_path]  # MD_to_CVAE.py
        t2.executable = [
            '/sw/summit/ibm-wml-ce/anaconda-base/envs/ibm-wml-ce-1.7.0-2/bin/python'
        ]
        t2.arguments = ['%s/MD_to_CVAE.py' % agg_path, '--sim_path', md_path]

        # Add the aggregation task to the aggreagating stage
        s2.add_tasks(t2)
        return s2
Exemplo n.º 27
0
    def tasks(self, pipeline_id):
        """
        Returns
        -------
        set of tasks to be added to the outlier stage.

        """
        md_dir = f'{self.prefix}/data/md/pipeline-{pipeline_id}'
        cvae_dir = f'{self.prefix}/data/ml/pipeline-{pipeline_id}'
        shared_path = f'{self.prefix}/data/shared/pipeline-{pipeline_id + 1}/pdb'
        outlier_dir = f'{self.prefix}/data/outlier/pipeline-{pipeline_id}'
        cm_data_path = f'{self.prefix}/data/preproc/pipeline-{pipeline_id}/cvae-input.h5'

        task = Task()
        self.load_environment(task)
        self.set_python_executable(task)
        self.assign_hardware(task)

        # Create output directories for generated files.
        task.pre_exec.append(f'mkdir -p {outlier_dir} {shared_path}')

        # Initialize eps dictionary that is shared and updated over
        # each round of the pipeline
        # if pipeline_id == 0:
        #     task.pre_exec.append(f'touch {outlier_dir}/eps-{pipeline_id}.json')

        # Specify python outlier detection task with arguments
        task.arguments = [
            f'{self.prefix}/examples/cvae_dbscan/scripts/dbscan.py',
            '--sim_path', md_dir, '--shared_path', shared_path, '--cm_path',
            cm_data_path, '--cvae_path', cvae_dir
        ]

        return {task}
Exemplo n.º 28
0
def create_inversion_dict_stage(cmt_file_db, param_path, task_counter):
    """Creates stage for the creation of the inversion files. This stage is
    tiny, but required before the actual inversion.

    :param cmt_file_db:
    :param param_path:
    :param task_counter:
    :return:
    """

    # Get database parameter path
    databaseparam_path = os.path.join(param_path,
                                      "Database/DatabaseParameters.yml")

    # Load Parameters
    DB_params = read_yaml_file(databaseparam_path)

    # Earthquake specific database parameters: Dir and Cid
    Cdir, Cid = get_Centry_path(DB_params["databasedir"], cmt_file_db)

    # Function
    inv_dict_func = os.path.join(bin_path, "write_inversion_dicts.py")

    # Create Process Paths Stage (CPP)
    # Create a Stage object
    inv_dict_stage = Stage()
    inv_dict_stage.name = "Creating"

    # Create Task
    inv_dict_task = Task()

    # This way the task gets the name of the path file
    inv_dict_task.name = "Inversion-Dictionaries"

    inv_dict_task.pre_exec = [  # Conda activate
        DB_params["conda-activate"]
    ]

    inv_dict_task.executable = [DB_params["bin-python"]]  # Assign exec
    # to the task

    inv_dict_task.arguments = [
        inv_dict_func, "-f", cmt_file_db, "-p", param_path
    ]

    # In the future maybe to database dir as a total log?
    inv_dict_task.stdout = os.path.join(
        "%s" % Cdir, "logs", "stdout.pipeline_%s.task_%s.%s" %
        (Cid, str(task_counter).zfill(4), inv_dict_task.name))

    inv_dict_task.stderr = os.path.join(
        "%s" % Cdir, "logs", "stderr.pipeline_%s.task_%s.%s" %
        (Cid, str(task_counter).zfill(4), inv_dict_task.name))

    inv_dict_stage.add_tasks(inv_dict_task)

    task_counter += 1

    return inv_dict_stage, task_counter
Exemplo n.º 29
0
def generate_task(cfg: BaseStageConfig) -> Task:
    task = Task()
    task.cpu_reqs = cfg.cpu_reqs.dict().copy()
    task.gpu_reqs = cfg.gpu_reqs.dict().copy()
    task.pre_exec = cfg.pre_exec.copy()
    task.executable = cfg.executable
    task.arguments = cfg.arguments.copy()
    return task
def generate_ML_pipeline():

    p = Pipeline()
    p.name = 'ML'

    s1 = Stage()
    s1.name = 'Generator-ML'

    # the generator/ML Pipeline will consist of 1 Stage, 2 Tasks Task 1 :
    # Generator; Task 2: ConvNet/Active Learning Model
    # NOTE: Generator and ML/AL are alive across the whole workflow execution.
    # For local testing, sleep time is longer than the total execution time of
    # the MD pipelines.

    t1 = Task()
    t1.name = "generator"
    t1.pre_exec = [
        # 'module load python/2.7.15-anaconda2-5.3.0',
        # 'module load cuda/9.1.85',
        # 'module load gcc/6.4.0',
        # 'source activate snakes'
    ]
    # t1.executable = ['python']
    # t1.arguments  = ['/ccs/home/jdakka/tf.py']
    t1.executable = ['sleep']
    t1.arguments = ['5']
    s1.add_tasks(t1)

    t2 = Task()
    t2.name = "ml-al"
    t2.pre_exec = [
        # 'module load python/2.7.15-anaconda2-5.3.0',
        # 'module load cuda/9.1.85',
        # 'module load gcc/6.4.0',
        # 'source activate snakes'
    ]
    # t2.executable = ['python']
    # t2.arguments  = ['/ccs/home/jdakka/tf.py']
    t2.executable = ['sleep']
    t2.arguments = ['10']
    s1.add_tasks(t2)

    # Add Stage to the Pipeline
    p.add_stages(s1)

    return p
Exemplo n.º 31
0
def constructTask(url):
    response = requests.get(url)
    response = response.json()

    t = Task()
    t.name = str(response['name'])
    t.executable = [str(response['executable'])]
    t.arguments = [str(response['arguments'])]
    return t
Exemplo n.º 32
0
    def describe_MD_pipline():
        p = Pipeline()
        p.name = 'MD'

        # Docking stage
        s1 = Stage()
        s1.name = 'Docking'

        # Docking task
        t1 = Task()
        t1.executable = ['sleep']
        t1.arguments = ['30']

        # Add the Docking task to the Docking Stage
        s1.add_tasks(t1)

        # Add Docking stage to the pipeline
        p.add_stages(s1)

        # MD stage
        s2 = Stage()
        s2.name = 'Simulation'

        # Each Task() is an OpenMM executable that will run on a single GPU.
        # Set sleep time for local testing
        for i in range(6):
            t2 = Task()
            t2.executable = ['sleep']
            t2.arguments = ['60']

            # Add the MD task to the Docking Stage
            s2.add_tasks(t2)

        # Add post-exec to the Stage
        s2.post_exec = {
            'condition': func_condition,
            'on_true': func_on_true,
            'on_false': func_on_false
        }

        # Add MD stage to the MD Pipeline
        p.add_stages(s2)

        return p
def create_single_task():

    t1 = Task()
    t1.name = 'simulation'
    t1.executable = ['/bin/echo']
    t1.arguments = ['hello']
    t1.copy_input_data = []
    t1.copy_output_data = []

    return t1
def generate_pipeline():

    def func_condition():

        global CUR_NEW_STAGE, MAX_NEW_STAGE

        if CUR_NEW_STAGE <= MAX_NEW_STAGE:
            return True

        return False

    def func_on_true():

        global CUR_NEW_STAGE
        CUR_NEW_STAGE += 1

        shuffle(p.stages[CUR_NEW_STAGE:])

    def func_on_false():
        print 'Done'

    # Create a Pipeline object
    p = Pipeline()

    for s in range(MAX_NEW_STAGE+1):

        # Create a Stage object
        s1 = Stage()

        for i in range(CUR_TASKS):

            t1 = Task()
            t1.executable = '/bin/sleep'
            t1.arguments = [ '30']

            # Add the Task to the Stage
            s1.add_tasks(t1)

        # Add post-exec to the Stage
        s1.post_exec = {
                        condition': func_condition,
                        on_true': func_on_true,
                        on_false': func_on_false
                        }

        # Add Stage to the Pipeline
        p.add_stages(s1)

    return p
    def create_pipeline():

        p = Pipeline()

        s = Stage()

        t1 = Task()
        t1.name = 'simulation'
        t1.executable = ['sleep']
        t1.arguments = ['10']

        s.add_tasks(t1)

        p.add_stages(s)

        return p
    def create_pipeline():

        p = Pipeline()

        s = Stage()

        t1 = Task()
        t1.name = 'simulation'
        t1.executable = ['/bin/echo']
        t1.arguments = ['hello']
        t1.copy_input_data = []
        t1.copy_output_data = []

        s.add_tasks(t1)

        p.add_stages(s)

        return p
def generate_pipeline():

    def func_condition():

        p.suspend()
        print 'Suspending pipeline %s for 10 seconds' %p.uid
        sleep(10)
        return True

    def func_on_true():

        print 'Resuming pipeline %s' %p.uid
        p.resume()

    def func_on_false():
        pass

    # Create a Pipeline object
    p = Pipeline()

    # Create a Stage object
    s1 = Stage()

    for i in range(10):

        t1 = Task()
        t1.executable = '/bin/sleep'
        t1.arguments = ['30']

        # Add the Task to the Stage
        s1.add_tasks(t1)

    # Add post-exec to the Stage
    s1.post_exec = {
        'condition': func_condition,
        'on_true': func_on_true,
        'on_false': func_on_false
    }

    # Add Stage to the Pipeline
    p.add_stages(s1)

    return p
def generate_pipeline():

    # Create a Pipeline object
    p = Pipeline()

    # Create a Stage object
    s1 = Stage()

    # Create a Task object which creates a file named 'output.txt' of size 1 MB
    t1 = Task()
    t1.executable = ['/bin/sleep']
    t1.arguments = ['300']

    # Add the Task to the Stage
    s1.add_tasks(t1)

    # Add Stage to the Pipeline
    p.add_stages(s1)

    return p
def generate_pipeline():

    # Create a Pipeline object
    p = Pipeline()

    # Create a Stage object
    s1 = Stage()

    # Create a Task object which creates a file named 'output.txt' of size 1 MB
    t1 = Task()
    t1.executable = ['mv']
    t1.arguments = ['temp','/tmp/']
    t1.upload_input_data = ['%s/temp'%cur_dir]

    # Add the Task to the Stage
    s1.add_tasks(t1)

    # Add Stage to the Pipeline
    p.add_stages(s1)

    return p
def generate_pipeline():

    # Create a Pipeline object
    p = Pipeline()

    # Create a Stage object
    s1 = Stage()

    # Create a Task object which creates a file named 'output.txt' of size 1 MB
    for x in range(10):
        t1 = Task()
        t1.executable = 'cat'
        t1.arguments = ['file1.txt','file2.txt','>','output.txt']
        t1.copy_input_data = ['$SHARED/file1.txt', '$SHARED/file2.txt']
        t1.download_output_data = ['output.txt > %s/output_%s.txt' %(cur_dir,x+1)]

        # Add the Task to the Stage
        s1.add_tasks(t1)

    # Add Stage to the Pipeline
    p.add_stages(s1)

    return p
def generate_pipeline():

    # Create a Pipeline object
    p = Pipeline()
    p.name = 'p1'

    # Create a Stage object
    s1 = Stage()
    s1.name = 's1'

    # Create 4K tasks to ensure we don't hit any RMQ connection drops
    for _ in range(4096):
        t1 = Task()
        t1.executable = ['/bin/echo']
        t1.arguments = ['"Hello World"']

        # Add the Task to the Stage
        s1.add_tasks(t1)

    # Add Stage to the Pipeline
    p.add_stages(s1)

    return p
    def func_on_true():

        global CUR_NEW_STAGE

        CUR_NEW_STAGE += 1

        s = Stage()

        for i in range(10):
            t = Task()
            t.executable = '/bin/sleep'
            t.arguments = [ '30']

            s.add_tasks(t)

        # Add post-exec to the Stage
        s.post_exec = {
                        'condition': func_condition,
                        'on_true': func_on_true,
                        'on_false': func_on_false
                    }

        p.add_stages(s)
Exemplo n.º 43
0
def init_cycle():

    # Create Pipeline Obj

    p = Pipeline()

    #Bookkeeping
    stage_uids = list()
    task_uids = list() ## = dict()
    d = dict()    
    dict_tarball = dict()
    
    #Create Tarball stage
    tar_stg = Stage()
    #Create Tar/untar task
    tar_tsk = Task()
    tar_tsk.executable = ['python']
    tar_tsk.upload_input_data = ['Input_Files.tar', 'untar_input_files.py']
    tar_tsk.arguments = ['untar_input_files.py','Input_Files.tar']
    tar_tsk.cores = 1
    tar_stg.add_tasks(tar_tsk)
    #task_uids.append(tar_tsk.uid)
    p.add_stages(tar_stg)
    #stage_uids.append(tar_stg.uid)
    dict_tarball[0] = '$Pipeline_%s_Stage_%s_Task_%s'%(p.uid,tar_stg.uid,tar_tsk.uid)
    #Create initial MD stage

    md_stg = Stage()

    #Create MD task
    for n0 in range (Replicas):    
        md_tsk = Task()
        md_tsk.executable = ['/u/sciteam/mushnoor/amber/amber14/bin/sander.MPI']  #MD Engine, BW
        #md_tsk.executable = ['/usr/local/packages/amber/16/INTEL-140-MVAPICH2-2.0/bin/pmemd.MPI'] #MD Engine, SuperMIC
        #md_tsk.executable = ['/opt/amber/bin/pmemd.MPI']
        #md_tsk.upload_input_data = ['inpcrd', 'prmtop', 'mdin_{0}'.format(n0)]
        #md_tsk.upload_input_data = ['inpcrd','prmtop','mdin']
        md_tsk.link_input_data += ['%s/inpcrd'%dict_tarball[0],
                                  '%s/prmtop'%dict_tarball[0],
                                   '%s/mdin'%dict_tarball[0]]  
        md_tsk.pre_exec = ['export AMBERHOME=$HOME/amber/amber14/']
        #md_tsk.pre_exec = ['module load amber']    
        #md_tsk.arguments = ['-O', '-i', 'mdin_{0}'.format(n0), '-p', 'prmtop', '-c', 'inpcrd', '-o', 'out_{0}'.format(n0), '-inf', 'mdinfo_{0}'.format(n0)]
        md_tsk.arguments = ['-O', '-i', 'mdin', '-p', 'prmtop', '-c', 'inpcrd', '-o', 'out_{0}'.format(n0), '-inf', 'mdinfo_{0}'.format(n0)]
        md_tsk.cores = Replica_Cores
        md_tsk.mpi = True
        d[n0] = '$Pipeline_%s_Stage_%s_Task_%s'%(p.uid, md_stg.uid, md_tsk.uid)

        md_stg.add_tasks(md_tsk)
        task_uids.append(md_tsk.uid)
    p.add_stages(md_stg)
    stage_uids.append(md_stg.uid)
    #print d 
    #Create Exchange Stage
    
    ex_stg = Stage()

    #Create Exchange Task

    ex_tsk = Task()
    ex_tsk.executable = ['python']
    ex_tsk.upload_input_data = ['exchangeMethods/TempEx.py']
    for n1 in range (Replicas):
        ex_tsk.link_input_data += ['%s/mdinfo_%s'%(d[n1],n1)]
    
    ex_tsk.arguments = ['TempEx.py','{0}'.format(Replicas)]
    ex_tsk.cores = 1
    ex_tsk.mpi = False
    ex_tsk.download_output_data = ['exchangePairs.dat']
    ex_stg.add_tasks(ex_tsk)
    task_uids.append(ex_tsk.uid)
    p.add_stages(ex_stg)
    stage_uids.append(ex_stg.uid)
    Book.append(d)
    #print Book
    return p
    p = Pipeline()
    # Bookkeeping
    stage_uids = list()
    task_uids = dict()
    Stages = 3
    Replicas = 4
    for N_Stg in range(Stages):
        stg =  Stage() ## initialization
        task_uids['Stage_%s'%N_Stg] = list()
        if N_Stg == 0:
            for n0 in range(Replicas):
                t = Task()
                t.executable = ['/usr/local/packages/gromacs/5.1.4/INTEL-140-MVAPICH2-2.0/bin/gmx_mpi_d']  #MD Engine  
                t.upload_input_data = ['in.gro', 'in.top', 'FNF.itp', 'martini_v2.2.itp', 'in.mdp'] 
                t.pre_exec = ['module load gromacs', '/usr/local/packages/gromacs/5.1.4/INTEL-140-MVAPICH2-2.0/bin/gmx_mpi_d grompp -f in.mdp -c in.gro -o in.tpr -p in.top'] 
                t.arguments = ['mdrun', '-s', 'in.tpr', '-deffnm', 'out']
                t.cores = 32
                stg.add_tasks(t)
                task_uids['Stage_%s'%N_Stg].append(t.uid)
            p.add_stages(stg)
            stage_uids.append(stg.uid) 



        else:
        
            for n0 in range(Replicas):
                t = Task()
                t.executable = ['/usr/local/packages/gromacs/5.1.4/INTEL-140-MVAPICH2-2.0/bin/gmx_mpi_d']  #MD Engine  
                t.copy_input_data = ['$Pipeline_%s_Stage_%s_Task_%s/out.gro > in.gro'%(p.uid, stage_uids[N_Stg-1], task_uids['Stage_%s'%(N_Stg-1)][n0]), '$Pipeline_%s_Stage_%s_Task_%s/in.top'%(p.uid, stage_uids[N_Stg-1], task_uids['Stage_%s'%(N_Stg-1)][n0]),  '$Pipeline_%s_Stage_%s_Task_%s/FNF.itp'%(p.uid, stage_uids[N_Stg-1], task_uids['Stage_%s'%(N_Stg-1)][n0]),  '$Pipeline_%s_Stage_%s_Task_%s/martini_v2.2.itp'%(p.uid, stage_uids[N_Stg-1], task_uids['Stage_%s'%(N_Stg-1)][n0]),  '$Pipeline_%s_Stage_%s_Task_%s/in.mdp'%(p.uid, stage_uids[N_Stg-1], task_uids['Stage_%s'%(N_Stg-1)][n0])]
                t.pre_exec = ['module load gromacs', '/usr/local/packages/gromacs/5.1.4/INTEL-140-MVAPICH2-2.0/bin/gmx_mpi_d grompp -f in.mdp -c in.gro -o in.tpr -p in.top'] 
hostname = os.environ.get('RMQ_HOSTNAME', 'localhost')
port = os.environ.get('RMQ_PORT', 5672)

if __name__ == '__main__':

    # Create a Pipeline object
    p = Pipeline()

    # Create a Stage object
    s = Stage()

    # Create a Task object
    t = Task()
    t.name = 'my-first-task'        # Assign a name to the task (optional, do not use ',' or '_')
    t.executable = '/bin/echo'   # Assign executable to the task
    t.arguments = ['Hello World']  # Assign arguments for the task executable

    # Add Task to the Stage
    s.add_tasks(t)

    # Add Stage to the Pipeline
    p.add_stages(s)

    # Create Application Manager
    appman = AppManager(hostname=hostname, port=port)

    # Create a dictionary describe four mandatory keys:
    # resource, walltime, and cpus
    # resource is 'local.localhost' to execute locally
    res_dict = {
Exemplo n.º 46
0
    def init_cycle(self, replicas, replica_cores, python_path, md_executable, exchange_method, min_temp, max_temp, timesteps, basename, pre_exec):  # "cycle" = 1 MD stage plus the subsequent exchange computation
        """ 
        Initial cycle consists of:
        1) Create tarball of MD input data 
        2) Transfer the tarball to pilot sandbox
        3) Untar the tarball
        4) Run first cycle
        """

        #Initialize Pipeline
        self._prof.prof('InitTar', uid=self._uid)
        p = Pipeline()
        p.name = 'initpipeline'

        md_dict = dict()  #bookkeeping
        tar_dict = dict()  #bookkeeping

        #Write the input files

        self._prof.prof('InitWriteInputs', uid=self._uid)

        writeInputs.writeInputs(
            max_temp=max_temp,
            min_temp=min_temp,
            replicas=replicas,
            timesteps=timesteps,
            basename=basename)

        self._prof.prof('EndWriteInputs', uid=self._uid)

        self._prof.prof('InitTar', uid=self._uid)
        #Create Tarball of input data

        tar = tarfile.open("input_files.tar", "w")
        for name in [
                basename + ".prmtop", basename + ".inpcrd", basename + ".mdin"
        ]:
            tar.add(name)
        for r in range(replicas):
            tar.add('mdin_{0}'.format(r))
        tar.close()

        #delete all input files outside the tarball

        for r in range(replicas):
            os.remove('mdin_{0}'.format(r))

        self._prof.prof('EndTar', uid=self._uid)

        #Create Untar Stage

        repo = git.Repo('.', search_parent_directories=True)
        aux_function_path = repo.working_tree_dir


        untar_stg = Stage()
        untar_stg.name = 'untarStg'

        #Untar Task
        
        untar_tsk = Task()
        untar_tsk.name = 'untartsk'
        untar_tsk.executable = ['python']

        untar_tsk.upload_input_data = [
            str(aux_function_path)+'/repex/untar_input_files.py', 'input_files.tar'
        ]
        untar_tsk.arguments = ['untar_input_files.py', 'input_files.tar']
        untar_tsk.cpu_reqs = 1
        #untar_tsk.post_exec         = ['']
        untar_stg.add_tasks(untar_tsk)
        p.add_stages(untar_stg)

        tar_dict[0] = '$Pipeline_%s_Stage_%s_Task_%s' % (
            p.name, untar_stg.name, untar_tsk.name)

        # First MD stage: needs to be defined separately since workflow is not built from a predetermined order, also equilibration needs to happen first. 

        md_stg = Stage()
        md_stg.name = 'mdstg0'
        self._prof.prof('InitMD_0', uid=self._uid)

        # MD tasks

        for r in range(replicas):

            md_tsk = AMBERTask(cores=replica_cores, md_executable=md_executable, pre_exec=pre_exec)
            md_tsk.name = 'mdtsk-{replica}-{cycle}'.format(replica=r, cycle=0)
            md_tsk.link_input_data += [
                '%s/inpcrd' % tar_dict[0],
                '%s/prmtop' % tar_dict[0],
                '%s/mdin_{0}'.format(r) %
                tar_dict[0]  #Use for full temperature exchange
            ]
            md_tsk.arguments = [
                '-O',
                '-p',
                'prmtop',
                '-i',
                'mdin_{0}'.format(r),
                '-c',
                'inpcrd',
                '-o',
                'out-{replica}-{cycle}'.format(replica=r, cycle=0),
                '-r',
                'restrt'.format(replica=r, cycle=0),
                #'-r',  'rstrt-{replica}-{cycle}'.format(replica=r,cycle=0),
                '-x',
                'mdcrd-{replica}-{cycle}'.format(replica=r, cycle=0),
                #'-o',  '$NODE_LFS_PATH/out-{replica}-{cycle}'.format(replica=r,cycle=0),
                #'-r',  '$NODE_LFS_PATH/rstrt-{replica}-{cycle}'.format(replica=r,cycle=0),
                #'-x',  '$NODE_LFS_PATH/mdcrd-{replica}-{cycle}'.format(replica=r,cycle=0),
                '-inf',
                'mdinfo_{0}'.format(r)
            ]
            md_dict[r] = '$Pipeline_%s_Stage_%s_Task_%s' % (
                p.name, md_stg.name, md_tsk.name)

            md_stg.add_tasks(md_tsk)
            self.md_task_list.append(md_tsk)
            #print md_tsk.uid
        p.add_stages(md_stg)
        #stage_uids.append(md_stg.uid)

        # First Exchange Stage

        ex_stg = Stage()
        ex_stg.name = 'exstg0'
        self._prof.prof('InitEx_0', uid=self._uid)

        # Create Exchange Task

        ex_tsk = Task()
        ex_tsk.name = 'extsk0'
        #ex_tsk.pre_exec             = ['module load python/2.7.10']
        ex_tsk.executable = [python_path]
        ex_tsk.upload_input_data = [exchange_method]
        for r in range(replicas):
            ex_tsk.link_input_data += ['%s/mdinfo_%s' % (md_dict[r], r)]
        ex_tsk.pre_exec = ['mv *.py exchange_method.py']
        ex_tsk.arguments = ['exchange_method.py', '{0}'.format(replicas), '0']
        ex_tsk.cores = 1
        ex_tsk.mpi = False
        ex_tsk.download_output_data = ['exchangePairs_0.dat']
        ex_stg.add_tasks(ex_tsk)
        #task_uids.append(ex_tsk.uid)
        p.add_stages(ex_stg)
        self.ex_task_list.append(ex_tsk)
        #self.ex_task_uids.append(ex_tsk.uid)
        self.book.append(md_dict)
        return p
# this script.
hostname = os.environ.get('RMQ_HOSTNAME', 'localhost')
port = os.environ.get('RMQ_PORT', 5672)

if __name__ == '__main__':

    # Create a Pipeline object
    p = Pipeline()

    # Create a Stage object
    s1 = Stage()

    # Create a Task object which creates a file named 'output.txt' of size 1 MB
    t1 = Task()
    t1.executable = '/bin/bash'
    t1.arguments = ['-l', '-c', 'base64 /dev/urandom | head -c 1000000 > output.txt']

    # Add the Task to the Stage
    s1.add_tasks(t1)

    # Add Stage to the Pipeline
    p.add_stages(s1)

    # Create another Stage object
    s2 = Stage()
    s2.name = 'Stage 2'

    # Create a Task object
    t2 = Task()
    t2.executable = ['/bin/bash']
    t2.arguments = ['-l', '-c', 'grep -o . output.txt | sort | uniq -c > ccount.txt']
def test_task_exceptions(s,l,i,b):

    """
    **Purpose**: Test if all attribute assignments raise exceptions for invalid values
    """

    t = Task()

    data_type = [s,l,i,b]

    for data in data_type:

        if not isinstance(data,str):
            with pytest.raises(TypeError):
                t.name = data

            with pytest.raises(TypeError):
                t.path = data

            with pytest.raises(TypeError):
                t.parent_stage = data

            with pytest.raises(TypeError):
                t.parent_pipeline = data

            with pytest.raises(TypeError):
                t.stdout = data

            with pytest.raises(TypeError):
                t.stderr = data

        if not isinstance(data,list):

            with pytest.raises(TypeError):
                t.pre_exec = data

            with pytest.raises(TypeError):
                t.arguments = data

            with pytest.raises(TypeError):
                t.post_exec = data

            with pytest.raises(TypeError):
                t.upload_input_data = data

            with pytest.raises(TypeError):
                t.copy_input_data = data

            with pytest.raises(TypeError):
                t.link_input_data = data

            with pytest.raises(TypeError):
                t.move_input_data = data

            with pytest.raises(TypeError):
                t.copy_output_data = data

            with pytest.raises(TypeError):
                t.download_output_data = data

            with pytest.raises(TypeError):
                t.move_output_data = data

        if not isinstance(data, str) and not isinstance(data, list):

            with pytest.raises(TypeError):
                t.executable = data

        if not isinstance(data, str) and not isinstance(data, unicode):

            with pytest.raises(ValueError):
                t.cpu_reqs = {
                                'processes': 1,
                                'process_type': data,
                                'threads_per_process': 1,
                                'thread_type': None
                            }
                t.cpu_reqs = {
                                'processes': 1,
                                'process_type': None,
                                'threads_per_process': 1,
                                'thread_type': data
                            }
                t.gpu_reqs = {
                                'processes': 1,
                                'process_type': data,
                                'threads_per_process': 1,
                                'thread_type': None
                            }
                t.gpu_reqs = {
                                'processes': 1,
                                'process_type': None,
                                'threads_per_process': 1,
                                'thread_type': data
                            }

        if not isinstance(data, int):

            with pytest.raises(TypeError):
                t.cpu_reqs = {
                                'processes': data,
                                'process_type': None,
                                'threads_per_process': 1,
                                'thread_type': None
                            }
                t.cpu_reqs = {
                                'processes': 1,
                                'process_type': None,
                                'threads_per_process': data,
                                'thread_type': None
                            }
                t.gpu_reqs = {
                                'processes': data,
                                'process_type': None,
                                'threads_per_process': 1,
                                'thread_type': None
                            }
                t.gpu_reqs = {
                                'processes': 1,
                                'process_type': None,
                                'threads_per_process': data,
                                'thread_type': None
                            }
Exemplo n.º 49
0
    stage_uids = list()
    task_uids = dict()
    Stages = 1
    Replicas = 2


    for N_Stg in range(Stages):
        stg =  Stage() ## initialization
        task_uids['Stage_%s'%N_Stg] = list()
        if N_Stg == 0:
            for n0 in range(Replicas):
                t = Task()
                t.executable = ['/u/sciteam/mushnoor/amber/amber14/bin/sander.MPI']  #MD Engine  
                t.upload_input_data = ['inpcrd', 'prmtop', 'mdin'] 
                t.pre_exec = ['export AMBERHOME=$HOME/amber/amber14/'] 
                t.arguments = ['-O', '-i', 'mdin', '-p', 'prmtop', '-c', 'inpcrd', '-o', 'out']
                t.cores = 32
                t.mpi = True
                stg.add_tasks(t)
                task_uids['Stage_%s'%N_Stg].append(t.uid)
            p.add_stages(stg)
            stage_uids.append(stg.uid) 


        else:
        
            for n0 in range(Replicas):
                t = Task()
                t.executable = ['/u/sciteam/mushnoor/amber/amber14/bin/sander.MPI']  #MD Engine 
                t.copy_input_data = ['$Pipeline_%s_Stage_%s_Task_%s/out.gro > in.gro'%(p.uid, stage_uids[N_Stg-1], task_uids['Stage_%s'%(N_Stg-1)][n0]), '$Pipeline_%s_Stage_%s_Task_%s/in.top'%(p.uid, stage_uids[N_Stg-1], task_uids['Stage_%s'%(N_Stg-1)][n0]),  '$Pipeline_%s_Stage_%s_Task_%s/FNF.itp'%(p.uid, stage_uids[N_Stg-1], task_uids['Stage_%s'%(N_Stg-1)][n0]),  '$Pipeline_%s_Stage_%s_Task_%s/martini_v2.2.itp'%(p.uid, stage_uids[N_Stg-1], task_uids['Stage_%s'%(N_Stg-1)][n0]),  '$Pipeline_%s_Stage_%s_Task_%s/in.mdp'%(p.uid, stage_uids[N_Stg-1], task_uids['Stage_%s'%(N_Stg-1)][n0])]
                t.pre_exec = Pre_Exec_Command_List +  ['/u/sciteam/mushnoor/gromacs/gromacs-5.0.4/build-cpu/bin/gmx_mpi grompp -f in.mdp -c in.gro -o in.tpr -p in.top'] 
    # Create a Pipeline object
    p = Pipeline()

    # Create a Stage object
    s1 = Stage()

    # List to hold uids of Tasks of Stage 1
    s1_task_uids = list()

    for cnt in range(10):

        # Create a Task object
        t = Task()
        t.executable = '/bin/echo'   # Assign executable to the task
        t.arguments = ['I am task %s in %s'%(cnt, s1.name)]  # Assign arguments for the task executable

        # Add the Task to the Stage
        s1.add_tasks(t)

        # Add Task uid to list
        s1_task_uids.append(t.uid)

    # Add Stage to the Pipeline
    p.add_stages(s1)


    # Create another Stage object
    s2 = Stage()

    # List to hold uids of Tasks of Stage 2
def generate_pipeline():

    def func_condition():

        global CUR_NEW_STAGE, MAX_NEW_STAGE

        if CUR_NEW_STAGE <= MAX_NEW_STAGE:
            return True

        return False

    def func_on_true():

        global CUR_NEW_STAGE

        CUR_NEW_STAGE += 1

        s = Stage()

        for i in range(10):
            t = Task()
            t.executable = '/bin/sleep'
            t.arguments = [ '30']

            s.add_tasks(t)

        # Add post-exec to the Stage
        s.post_exec = {
                        'condition': func_condition,
                        'on_true': func_on_true,
                        'on_false': func_on_false
                    }

        p.add_stages(s)

    def func_on_false():
        print 'Done'

    # Create a Pipeline object
    p = Pipeline()

    # Create a Stage object
    s1 = Stage()

    for i in range(10):

        t1 = Task()
        t1.executable = ['sleep']
        t1.arguments = [ '30']

        # Add the Task to the Stage
        s1.add_tasks(t1)

    # Add post-exec to the Stage
    s1.post_exec = {
                        'condition': func_condition,
                        'on_true': func_on_true,
                        'on_false': func_on_false
                    }

    # Add Stage to the Pipeline
    p.add_stages(s1)

    return p
Exemplo n.º 52
0
    def InitCycle(self, Replicas, Replica_Cores, md_executable, ExchangeMethod, timesteps): # "Cycle" = 1 MD stage plus the subsequent exchange computation

        """ 
        Initial cycle consists of:
        1) Create tarball of MD input data 
        2) Transfer the tarball to pilot sandbox
        3) Untar the tarball
        4) Run first Cycle
        """    
        
        #Initialize Pipeline
        #self._prof.prof('InitTar', uid=self._uid)
        p = Pipeline()
        p.name = 'initpipeline'

        md_dict    = dict() #Bookkeeping
        tar_dict   = dict() #Bookkeeping

        ##Write the input files

        self._prof.prof('InitWriteInputs', uid=self._uid)

                             

        writeInputs.writeInputs(max_temp=350,min_temp=250,replicas=Replicas,timesteps=timesteps)

        self._prof.prof('EndWriteInputs', uid=self._uid)

        
        self._prof.prof('InitTar', uid=self._uid)
        #Create Tarball of input data

        tar = tarfile.open("Input_Files.tar","w")
        for name in ["prmtop", "inpcrd", "mdin"]:
            tar.add(name)
        for r in range (Replicas):
            tar.add('mdin_{0}'.format(r))
        tar.close()

        #delete all input files outside the tarball

        for r in range (Replicas):
            os.remove('mdin_{0}'.format(r))

        self._prof.prof('EndTar', uid=self._uid)

                
        #Create Untar Stage

        untar_stg = Stage()
        untar_stg.name = 'untarStg'
    
        #Untar Task

        untar_tsk                   = Task()
        untar_tsk.name              = 'untartsk'
        untar_tsk.executable        = ['python']
        
        untar_tsk.upload_input_data = ['untar_input_files.py','Input_Files.tar']
        untar_tsk.arguments         = ['untar_input_files.py','Input_Files.tar']
        untar_tsk.cores             = 1

        untar_stg.add_tasks(untar_tsk)
        p.add_stages(untar_stg)

             
        tar_dict[0] = '$Pipeline_%s_Stage_%s_Task_%s'%(p.name,
                                                       untar_stg.name,
                                                       untar_tsk.name)
                 


        # First MD stage: needs to be defined separately since workflow is not built from a predetermined order

        md_stg = Stage()
        md_stg.name = 'mdstg0'
        self._prof.prof('InitMD_0', uid=self._uid)
        
        # MD tasks
               
        for r in range (Replicas):

            
            md_tsk                  = AMBERTask(cores=Replica_Cores, MD_Executable=md_executable)
            md_tsk.name             = 'mdtsk-{replica}-{cycle}'.format(replica=r,cycle=0)
            md_tsk.link_input_data += [
                                       '%s/inpcrd'%tar_dict[0],
                                       '%s/prmtop'%tar_dict[0],
                                       '%s/mdin_{0}'.format(r)%tar_dict[0]  #Use for full temperature exchange
                                       #'%s/mdin'%tar_dict[0]  #Testing only
                                       ] 
            md_tsk.arguments        = ['-O','-p','prmtop', '-i', 'mdin_{0}'.format(r), # Use this for full Temperature Exchange
                                       '-c','inpcrd','-o','out_{0}'.format(r),
                                       '-inf','mdinfo_{0}'.format(r)]
            md_dict[r]              = '$Pipeline_%s_Stage_%s_Task_%s'%(p.name, md_stg.name, md_tsk.name)

            md_stg.add_tasks(md_tsk)
            self.md_task_list.append(md_tsk)
            #print md_tsk.uid
        p.add_stages(md_stg)
        #stage_uids.append(md_stg.uid)
                                                    

        # First Exchange Stage
        
        ex_stg = Stage()
        ex_stg.name = 'exstg0'
        self._prof.prof('InitEx_0', uid=self._uid)
        #with open('logfile.log', 'a') as logfile:
         #   logfile.write( '%.5f' %time.time() + ',' + 'InitEx0' + '\n')
        # Create Exchange Task. Exchange task performs a Metropolis Hastings thermodynamic balance condition
        # check and spits out the exchangePairs.dat file that contains a sorted list of ordered pairs. 
        # Said pairs then exchange configurations by linking output configuration files appropriately.

        ex_tsk                      = Task()
        ex_tsk.name                 = 'extsk0'
        ex_tsk.executable           = ['python']
        ex_tsk.upload_input_data    = [ExchangeMethod]  
        for r in range (Replicas):
            ex_tsk.link_input_data     += ['%s/mdinfo_%s'%(md_dict[r],r)]
        ex_tsk.arguments            = ['TempEx.py','{0}'.format(Replicas), '0']
        ex_tsk.cores                = 1
        ex_tsk.mpi                  = False
        ex_tsk.download_output_data = ['exchangePairs_0.dat']
        ex_stg.add_tasks(ex_tsk)
        #task_uids.append(ex_tsk.uid)
        p.add_stages(ex_stg)
        self.ex_task_list.append(ex_tsk)
        #self.ex_task_uids.append(ex_tsk.uid)
        self.Book.append(md_dict)
        return p
Exemplo n.º 53
0
def InitCycle(Replicas, Replica_Cores, MD_Executable, ExchangeMethod):     # "Cycle" = 1 MD stage plus the subsequent exchange computation

    #Initialize Pipeline
    p = Pipeline()

    md_dict    = dict() #Bookkeeping
    tar_dict   = dict() #Bookkeeping


    #Create Tarball of input data

        


    #Create Untar Stage
    untar_stg = Stage()
    #Untar Task
    untar_tsk                   = Task()
    untar_tsk.executable        = ['python']
    untar_tsk.upload_input_data = ['untar_input_files.py','../../Input_Files.tar']
    untar_tsk.arguments         = ['untar_input_files.py','Input_Files.tar']
    untar_tsk.cores             = 1

    untar_stg.add_tasks(untar_tsk)
    p.add_stages(untar_stg)


    tar_dict[0] = '$Pipeline_%s_Stage_%s_Task_%s'%(p.uid,
                                                   untar_stg.uid,
                                                   untar_tsk.uid)
    print tar_dict[0]
    # First MD stage: needs to be defined separately since workflow is not built from a predetermined order
    md_stg = Stage()


    # MD tasks

    for r in range (Replicas):
        md_tsk                  = Task()
        md_tsk.executable       = [MD_Executable]
        md_tsk.link_input_data += ['%s/inpcrd'%tar_dict[0],
                                   '%s/prmtop'%tar_dict[0],
                                   #'%s/mdin_{0}'.format(r)%tar_dict[0]
                                   '%s/mdin'%tar_dict[0] 
                                   ] 
        md_tsk.pre_exec         = ['export AMBERHOME=$HOME/amber/amber14/'] #Should be abstracted from the user?
        md_tsk.arguments        = ['-O','-p','prmtop', '-i', 'mdin',               #'mdin_{0}'.format(r), # Use this for full Temperature Exchange
                                   '-c','inpcrd','-o','out_{0}'.format(r),
                                   '-inf','mdinfo_{0}'.format(r)]
        md_tsk.cores = Replica_Cores
        md_tsk.mpi = True
        md_dict[r] = '$Pipeline_%s_Stage_%s_Task_%s'%(p.uid, md_stg.uid, md_tsk.uid)

        md_stg.add_tasks(md_tsk)
        #task_uids.append(md_tsk.uid)
    p.add_stages(md_stg)
    #stage_uids.append(md_stg.uid)
                                                

    # First Exchange Stage
    ex_stg = Stage()

    # Create Exchange Task. Exchange task performs a Metropolis Hastings thermodynamic balance condition
    # and spits out the exchangePairs.dat file that contains a sorted list of ordered pairs. 
    # Said pairs then exchange configurations by linking output configuration files appropriately.

    ex_tsk                      = Task()
    ex_tsk.executable           = ['python']
    #ex_tsk.upload_input_data    = ['exchangeMethods/TempEx.py']
    ex_tsk.upload_input_data    = [ExchangeMethod]  
    for r in range (Replicas):
        ex_tsk.link_input_data     += ['%s/mdinfo_%s'%(md_dict[r],r)]
    ex_tsk.arguments            = ['TempEx.py','{0}'.format(Replicas)]
    ex_tsk.cores                = 1
    ex_tsk.mpi                  = False
    ex_tsk.download_output_data = ['exchangePairs.dat']
    ex_stg.add_tasks(ex_tsk)
    #task_uids.append(ex_tsk.uid)
    p.add_stages(ex_stg)
    #stage_uids.append(ex_stg.uid)
    Book.append(md_dict)
    #print Book
    return p
Exemplo n.º 54
0
    def general_cycle(self, replicas, replica_cores, cycle, python_path, md_executable, exchange_method, pre_exec):
        """
        All cycles after the initial cycle
        Pulls up exchange pairs file and generates the new workflow
        """

        self._prof.prof('InitcreateMDwokflow_{0}'.format(cycle), uid=self._uid)
        with open('exchangePairs_{0}.dat'.format(cycle),
                  'r') as f:  # Read exchangePairs.dat
            exchange_array = []
            for line in f:
                exchange_array.append(int(line.split()[1]))
                #exchange_array.append(line)
                #print exchange_array

        q = Pipeline()
        q.name = 'genpipeline{0}'.format(cycle)
        #bookkeeping
        stage_uids = list()
        task_uids = list()  ## = dict()
        md_dict = dict()

        #Create MD stage

        md_stg = Stage()
        md_stg.name = 'mdstage{0}'.format(cycle)

        self._prof.prof('InitMD_{0}'.format(cycle), uid=self._uid)

        for r in range(replicas):
            md_tsk = AMBERTask(cores=replica_cores, md_executable=md_executable, pre_exec=pre_exec)
            md_tsk.name = 'mdtsk-{replica}-{cycle}'.format(
                replica=r, cycle=cycle)
            md_tsk.link_input_data = [
                '%s/restrt > inpcrd' %
                (self.book[cycle - 1][exchange_array[r]]),
                '%s/prmtop' % (self.book[0][r]),
                '%s/mdin_{0}'.format(r) % (self.book[0][r])
            ]

            ### The Following softlinking scheme is to be used ONLY if node local file system is to be used: not fully supported yet.
            #md_tsk.link_input_data = ['$NODE_LFS_PATH/rstrt-{replica}-{cycle}'.format(replica=exchange_array[r],cycle=cycle-1) > '$NODE_LFS_PATH/inpcrd',
            #                          #'%s/restrt > inpcrd'%(self.book[cycle-1][exchange_array[r]]),
            #                          '%s/prmtop'%(self.book[0][r]),
            #                          '%s/mdin_{0}'.format(r)%(self.Book[0][r])]

            md_tsk.arguments = [
                '-O',
                '-i',
                'mdin_{0}'.format(r),
                '-p',
                'prmtop',
                '-c',
                'inpcrd',
                #'-c', 'rstrt-{replica}-{cycle}'.format(replica=r,cycle=cycle-1),
                '-o',
                'out-{replica}-{cycle}'.format(replica=r, cycle=cycle),
                '-r',
                'restrt',
                #'-r', 'rstrt-{replica}-{cycle}'.format(replica=r,cycle=cycle),
                '-x',
                'mdcrd-{replica}-{cycle}'.format(replica=r, cycle=cycle),
                '-inf',
                'mdinfo_{0}'.format(r)
            ]
            #md_tsk.tag              = 'mdtsk-{replica}-{cycle}'.format(replica=r,cycle=0)
            md_dict[r] = '$Pipeline_%s_Stage_%s_Task_%s' % (
                q.name, md_stg.name, md_tsk.name)
            self.md_task_list.append(md_tsk)
            md_stg.add_tasks(md_tsk)

        q.add_stages(md_stg)

        ex_stg = Stage()
        ex_stg.name = 'exstg{0}'.format(cycle + 1)

        #Create Exchange Task
        ex_tsk = Task()
        ex_tsk.name = 'extsk{0}'.format(cycle + 1)
        ex_tsk.executable = [python_path]#['/usr/bin/python']  #['/opt/python/bin/python']
        ex_tsk.upload_input_data = [exchange_method]
        for r in range(replicas):

            ex_tsk.link_input_data += ['%s/mdinfo_%s' % (md_dict[r], r)]
        ex_tsk.pre_exec = ['mv *.py exchange_method.py']
        ex_tsk.arguments = [
            'exchange_method.py', '{0}'.format(replicas), '{0}'.format(cycle + 1)
        ]
        ex_tsk.cores = 1
        ex_tsk.mpi = False
        ex_tsk.download_output_data = [
            'exchangePairs_{0}.dat'.format(cycle + 1)
        ]  # Finds exchange partners, also  Generates exchange history trace

        ex_stg.add_tasks(ex_tsk)

        #task_uids.append(ex_tsk.uid)
        self.ex_task_list.append(ex_tsk)

        q.add_stages(ex_stg)

        #stage_uids.append(ex_stg.uid)

        self.book.append(md_dict)
        #self._prof.prof('EndEx_{0}'.format(cycle), uid=self._uid)
        #print d
        #print self.book
        return q
Exemplo n.º 55
0
    Pilot_Cores = Replicas * Replica_Cores

    
    for N_Stg in range(Stages):
        stg =  Stage() ## initialization
        task_uids['Stage_%s'%N_Stg] = list()

        #####Initial MD stage  

        if N_Stg == 0:
            for n0 in range(Replicas):
                t = Task()
                t.executable = ['/u/sciteam/mushnoor/amber/amber14/bin/sander.MPI']  #MD Engine  
                t.upload_input_data = ['inpcrd', 'prmtop', 'mdin_{0}'.format(n0)] 
                t.pre_exec = ['export AMBERHOME=$HOME/amber/amber14/'] 
                t.arguments = ['-O', '-i', 'mdin_{0}'.format(n0), '-p', 'prmtop', '-c', 'inpcrd', '-o', 'out']
                t.cores = Replica_Cores
                stg.add_tasks(t)
                task_uids['Stage_%s'%N_Stg].append(t.uid)
            p.add_stages(stg)
            stage_uids.append(stg.uid) 



        #####Exchange Stages    
        elif N_Stg != 0 and N_Stg%2 = 1:
            t = Task()
            t.executable = ['python']
            t.upload_input_data = ['exchangeMethods/RandEx.py']
            #t.link_input_data = ['']
            t.arguments = ['RandEx.py', Replicas]
Exemplo n.º 56
0
def cycle(k):


    #read exchangePairs.dat
    #
    with open("exchangePairs.dat","r") as f:
        ExchangeArray = []
        for line in f:
            ExchangeArray.append(int(line.split()[1]))
            #ExchangeArray.append(line)
        #print ExchangeArray    

    
    p = Pipeline()

    #Bookkeeping
    stage_uids = list()
    task_uids = list() ## = dict()
    d = dict() 

    #Create initial MD stage

    md_stg = Stage()

    #Create MD task
    for n0 in range (Replicas):
        md_tsk = Task()
        md_tsk.executable = ['/u/sciteam/mushnoor/amber/amber14/bin/sander.MPI']  #MD Engine, Blue Waters
        #md_tsk.executable = ['/usr/local/packages/amber/16/INTEL-140-MVAPICH2-2.0/bin/pmemd.MPI'] #MD Engine, SuperMIC 
        #md_tsk.executable = ['/opt/amber/bin/pmemd.MPI']
        md_tsk.link_input_data = ['%s/restrt > inpcrd'%(Book[k-1][ExchangeArray[n0]]),
                                  '%s/prmtop'%(Book[k-1][n0]),
                                  #'%s/mdin_{0}'.format(n0)%(Book[k-1][n0])]
                                  '%s/mdin'%(Book[k-1][n0])]   
                                  ##Above: Copy from previous PIPELINE, make sure bookkeeping is correct
                                   
                              
        md_tsk.pre_exec = ['export AMBERHOME=$HOME/amber/amber14/'] #Preexec, BLue Waters
        #md_tsk.pre_exec = ['module load amber']
        #md_tsk.arguments = ['-O', '-i', 'mdin_{0}'.format(n0), '-p', 'prmtop', '-c', 'inpcrd', '-o', 'out_{0}'.format(n0),'-inf', 'mdinfo_{0}'.format(n0)]
        md_tsk.arguments = ['-O', '-i', 'mdin', '-p', 'prmtop', '-c', 'inpcrd', '-o', 'out_{0}'.format(n0),'-inf', 'mdinfo_{0}'.format(n0)]
        md_tsk.cores = Replica_Cores
        md_tsk.mpi = True
        d[n0] = '$Pipeline_%s_Stage_%s_Task_%s'%(p.uid, md_stg.uid, md_tsk.uid)
        #print d
        md_stg.add_tasks(md_tsk)
        task_uids.append(md_tsk.uid)
    p.add_stages(md_stg)
    stage_uids.append(md_stg.uid)

    #Create exchange stage 

    ex_stg= Stage()
    
    #Create Exchange Task

    ex_tsk = Task()
    ex_tsk.executable = ['python']
    ex_tsk.upload_input_data = ['exchangeMethods/TempEx.py']
    for n1 in range (Replicas):
        #print d[n1]
        
        ex_tsk.link_input_data += ['%s/mdinfo_%s'%(d[n1],n1)]
    
    ex_tsk.arguments = ['TempEx.py','{0}'.format(Replicas)]
    ex_tsk.cores = 1
    ex_tsk.mpi = False
    ex_tsk.download_output_data = ['exchangePairs.dat']
    ex_stg.add_tasks(ex_tsk)
    task_uids.append(ex_tsk.uid)
    p.add_stages(ex_stg)
    stage_uids.append(ex_stg.uid)
    Book.append(d)
    #print d
    #print Book
    return p
if __name__ == '__main__':

    # Create a Pipeline object
    p = Pipeline()

    # Create a Stage object
    s = Stage()

    for cnt in range(10):

        # Create a Task object
        t = Task()
        t.name = 'my-task'        # Assign a name to the task (optional, do not use ',' or '_')
        t.executable = '/bin/echo'   # Assign executable to the task
        t.arguments = ['I am task %s'%cnt]  # Assign arguments for the task executable

        # Add the Task to the Stage
        s.add_tasks(t)

    # Add Stage to the Pipeline
    p.add_stages(s)

    # Create Application Manager
    appman = AppManager(hostname=hostname, port=port)

    # Create a dictionary describe four mandatory keys:
    # resource, walltime, and cpus
    # resource is 'local.localhost' to execute locally
    res_dict = {
Exemplo n.º 58
0
    def GeneralCycle(self, Replicas, Replica_Cores, Cycle, MD_Executable, ExchangeMethod):

        """
        All cycles after the initial cycle
        Pulls up exchange pairs file and generates the new workflow
        """


        self._prof.prof('InitcreateMDwokflow_{0}'.format(Cycle), uid=self._uid)
        with open('exchangePairs_{0}.dat'.format(Cycle),'r') as f:  # Read exchangePairs.dat
            ExchangeArray = []
            for line in f:
                ExchangeArray.append(int(line.split()[1]))
                #ExchangeArray.append(line)
                #print ExchangeArray
                    

        q = Pipeline()
        q.name = 'genpipeline{0}'.format(Cycle)
        #Bookkeeping
        stage_uids = list()
        task_uids = list() ## = dict()
        md_dict = dict()


        #Create initial MD stage


        md_stg = Stage()
        md_stg.name = 'mdstage{0}'.format(Cycle)

        self._prof.prof('InitMD_{0}'.format(Cycle), uid=self._uid)
    
        for r in range (Replicas):
            md_tsk                 = AMBERTask(cores=Replica_Cores, MD_Executable=MD_Executable)
            md_tsk.name            = 'mdtsk-{replica}-{cycle}'.format(replica=r,cycle=Cycle)
            md_tsk.link_input_data = ['%s/restrt > inpcrd'%(self.Book[Cycle-1][ExchangeArray[r]]),
                                      '%s/prmtop'%(self.Book[0][r]),
                                      #'%s/prmtop'%(self.Tarball_path[0]),
                                      '%s/mdin_{0}'.format(r)%(self.Book[0][r])]

                                      #'%s/mdin'%(self.Book[0][r])]
                                      #'%s/mdin'%(self.Tarball_path[0])]

            md_tsk.arguments      = ['-O', '-i', 'mdin_{0}'.format(r), '-p', 'prmtop', '-c', 'inpcrd', '-o', 'out_{0}'.format(r),'-inf', 'mdinfo_{0}'.format(r)]
            #md_tsk.arguments       = ['-O', '-i', 'mdin', '-p', 'prmtop', '-c', 'inpcrd', '-o', 'out_{0}'.format(r),'-inf', 'mdinfo_{0}'.format(r)]
            md_dict[r]             = '$Pipeline_%s_Stage_%s_Task_%s'%(q.name, md_stg.name, md_tsk.name)
            self.md_task_list.append(md_tsk)
            md_stg.add_tasks(md_tsk)
        

        
        q.add_stages(md_stg)
                 
                                                                                            
                                                                                              
        ex_stg = Stage()
        ex_stg.name = 'exstg{0}'.format(Cycle+1)

        #Create Exchange Task
        ex_tsk                      = Task()
        ex_tsk.name                 = 'extsk{0}'.format(Cycle+1)
        ex_tsk.executable           = ['python']
        ex_tsk.upload_input_data    = [ExchangeMethod]
        for r in range (Replicas):

            ex_tsk.link_input_data += ['%s/mdinfo_%s'%(md_dict[r],r)]

        ex_tsk.arguments            = ['TempEx.py','{0}'.format(Replicas), '{0}'.format(Cycle+1)]
        ex_tsk.cores                = 1
        ex_tsk.mpi                  = False
        ex_tsk.download_output_data = ['exchangePairs_{0}.dat'.format(Cycle+1)] # Finds exchange partners, also  Generates exchange history trace

        ex_stg.add_tasks(ex_tsk)

        #task_uids.append(ex_tsk.uid)
        self.ex_task_list.append(ex_tsk)

        q.add_stages(ex_stg)

        #stage_uids.append(ex_stg.uid)

        self.Book.append(md_dict)
        #self._prof.prof('EndEx_{0}'.format(Cycle), uid=self._uid)
        #print d
        #print self.Book
        return q
Exemplo n.º 59
0
def Cycle(Replicas, Replica_Cores, Cycles, MD_Executable, ExchangeMethod):

    """
    All cycles after the initial cycle
    """

    with open("exchangePairs.dat","r") as f:  # Read exchangePairs.dat
        ExchangeArray = []
        for line in f:
            ExchangeArray.append(int(line.split()[1]))
            #ExchangeArray.append(line)
            #print ExchangeArray
                

    q = Pipeline()
    #Bookkeeping
    stage_uids = list()
    task_uids = list() ## = dict()
    md_dict = dict()


    #Create initial MD stage


    md_stg = Stage()
    for r in range (Replicas):
        md_tsk                 = Task()
        md_tsk.executable      = [MD_Executable]  #MD Engine, Blue Waters
        md_tsk.link_input_data = ['%s/restrt > inpcrd'%(Book[Cycle-1][ExchangeArray[r]]),
                                  '%s/prmtop'%(Book[Cycle-1][r]),
                                  #'%s/mdin_{0}'.format(r)%(Book[k-1][r])]
                                  '%s/mdin'%(Book[Cycle-1][r])]

        md_tsk.pre_exec        = ['export AMBERHOME=$HOME/amber/amber14/'] # Should be abstracted from user?
        #md_tsk.pre_exec       = ['module load amber']
        #md_tsk.arguments      = ['-O', '-i', 'mdin_{0}'.format(n0), '-p', 'prmtop', '-c', 'inpcrd', '-o', 'out_{0}'.format(n0),'-inf', 'mdinfo_{0}'.format(n0)]
        md_tsk.arguments       = ['-O', '-i', 'mdin', '-p', 'prmtop', '-c', 'inpcrd', '-o', 'out_{0}'.format(r),'-inf', 'mdinfo_{0}'.format(r)]
        md_tsk.cores           = Replica_Cores
        md_tsk.mpi             = True
        md_dict[r]             = '$Pipeline_%s_Stage_%s_Task_%s'%(p.uid, md_stg.uid, md_tsk.uid)
        md_stg.add_tasks(md_tsk)

        #task_uids.append(md_tsk.uid)
    q.add_stages(md_stg)
             
                                                                                         
                                                                                          
    ex_stg= Stage()
    #Create Exchange Task
    ex_tsk = Task()
    ex_tsk.executable = ['python']
    ex_tsk.upload_input_data = ['exchangeMethods/TempEx.py']
    for n1 in range (Replicas):
        #print d[n1]

        ex_tsk.link_input_data += ['%s/mdinfo_%s'%(d[n1],n1)]

    ex_tsk.arguments = ['TempEx.py','{0}'.format(Replicas)]
    ex_tsk.cores = 1
    ex_tsk.mpi = False
    ex_tsk.download_output_data = ['exchangePairs.dat']
    ex_stg.add_tasks(ex_tsk)
    #task_uids.append(ex_tsk.uid)
    q.add_stages(ex_stg)
    #stage_uids.append(ex_stg.uid)
    Book.append(md_dict)
        #print d
        #print Book
    return q