Beispiel #1
0
 def add_ex_stg(rid, cycle):
     #ex stg here
     ex_tsk = Task()
     ex_stg = Stage()
     ex_tsk.name = 'extsk-{replica}-{cycle}'.format(replica=rid, cycle=cycle)
     for rid in range(len(waiting_replicas)):
         ex_tsk.link_input_data += ['%s/mdinfo-{replica}-{cycle}'.format(replica=rid, cycle=self.cycle)%replica_sandbox]
        
     ex_tsk.arguments = ['t_ex_gibbs.py', len(waiting_replicas)] #This needs to be fixed
     ex_tsk.executable = ['python']
     ex_tsk.cpu_reqs = {
                    'processes': 1,
                    'process_type': '',
                    'threads_per_process': 1,
                    'thread_type': None
                     }
     ex_tsk.pre_exec   = ['export dummy_variable=19']
      
     ex_stg.add_tasks(ex_tsk)
     ex_stg.post_exec = {
                     'condition': post_ex,
                     'on_true': terminate_replicas,
                     'on_false': continue_md
                   } 
     return ex_stg
def generate_pipeline():

    global CUR_TASKS, CUR_CORES, duration, MAX_NEW_STAGE

    def func_condition():

        global CUR_NEW_STAGE, MAX_NEW_STAGE

        if CUR_NEW_STAGE < MAX_NEW_STAGE:
            return True

        return False

    def func_on_true():

        global CUR_NEW_STAGE
        CUR_NEW_STAGE += 1

        for t in p.stages[CUR_NEW_STAGE].tasks:
            cores = randint(1,16)
            t.arguments = ['-c', str(cores), '-t', str(duration)]

    def func_on_false():
        print 'Done'

    # Create a Pipeline object
    p = Pipeline()

    for s in range(MAX_NEW_STAGE+1):

        # Create a Stage object
        s1 = Stage()

        for i in range(CUR_TASKS):

            t1 = Task()
            t1.pre_exec = ['export PATH=/u/sciteam/balasubr/modules/stress-ng-0.09.34:$PATH']
            t1.executable = ['stress-ng']
            t1.arguments = [ '-c', str(CUR_CORES), '-t', str(duration)]
            t1.cpu_reqs = {
                            'processes': 1,
                            'process_type': '',
                            'threads_per_process': CUR_CORES,
                            'thread_type': ''
                        }

            # Add the Task to the Stage
            s1.add_tasks(t1)

        # Add post-exec to the Stage
        s1.post_exec = {
                           'condition': func_condition,
                           'on_true': func_on_true,
                           'on_false': func_on_false
                       }

        # Add Stage to the Pipeline
        p.add_stages(s1)

    return p
    def describe_MD_stages():

        # Docking stage
        s1 = Stage()
        s1.name = 'Docking.%d' % CUR_NEW_STAGE

        # Docking task
        t1 = Task()
        t1.executable = ['sleep']
        t1.arguments = ['3']

        # Add the Docking task to the Docking Stage
        s1.add_tasks(t1)

        # MD stage
        s2 = Stage()
        s2.name = 'Simulation.%d' % CUR_NEW_STAGE

        # Each Task() is an OpenMM executable that will run on a single GPU.
        # Set sleep time for local testing
        for i in range(6):
            t2 = Task()
            t2.executable = ['sleep']
            t2.arguments = ['5']

            # Add the MD task to the Docking Stage
            s2.add_tasks(t2)

        # Add post-exec to the Stage
        s2.post_exec = func_condition

        return [s1, s2]
Beispiel #4
0
 def post_stage():
     if (not os.path.exists(f'{run_dir}/aggregator/stop.aggregator')):
         nstages = len(p.stages)
         s = Stage()
         s.name = f"{nstages}"
         t = Task()
         t.cpu_reqs = {
             'processes': 1,
             'process_type': None,
             'threads_per_process': 4,
             'thread_type': 'OpenMP'
         }
         t.gpu_reqs = {
             'processes': 0,
             'process_type': None,
             'threads_per_process': 0,
             'thread_type': None
         }
         t.name = f" {i}_{nstages} "
         t.executable = PYTHON
         t.arguments = [
             f'{current_dir}/simulation.py',
             f'{run_dir}/simulations/all/{i}_{nstages}', ADIOS_XML
         ]
         subprocess.getstatusoutput(
             f'ln -s  {run_dir}/simulations/all/{i}_{nstages} {run_dir}/simulations/new/{i}_{nstages}'
         )
         s.add_tasks(t)
         s.post_exec = post_stage
         p.add_stages(s)
Beispiel #5
0
    def func_on_true():

        global CUR_NEW_STAGE, CUR_TASKS, CUR_CORES, duration

        CUR_NEW_STAGE += 1

        s = Stage()

        for i in range(CUR_TASKS):
            t = Task()
            t.pre_exec = [
                'export PATH=/u/sciteam/balasubr/modules/stress-ng-0.09.34:$PATH'
            ]
            t.executable = ['stress-ng']
            t.arguments = ['-c', str(CUR_CORES), '-t', str(duration)]
            t.cpu_reqs = {
                'processes': 1,
                'process_type': '',
                'threads_per_process': CUR_CORES,
                'thread_type': ''
            }

            # Add the Task to the Stage
            s.add_tasks(t)

        # Add post-exec to the Stage
        s.post_exec = {
            'condition': func_condition,
            'on_true': func_on_true,
            'on_false': func_on_false
        }

        p.add_stages(s)
Beispiel #6
0
def test_stage_post_exec():

    global p1

    p1.name = 'p1'

    s = Stage()
    s.name = 's1'

    for t in range(NUM_TASKS):
        s.add_tasks(create_single_task())

    s.post_exec = {
        'condition': condition,
        'on_true': on_true,
        'on_false': on_false
    }

    p1.add_stages(s)

    res_dict = {
        'resource': 'local.localhost',
        'walltime': 30,
        'cpus': 1,
    }

    os.environ['RADICAL_PILOT_DBURL'] = MLAB
    appman = AppManager(rts='radical.pilot', hostname=hostname, port=port)
    appman.resource_desc = res_dict
    appman.workflow = [p1]
    appman.run()
        def add_md_stg(rid,cycle):
            #md stg h
            md_tsk = Task()
            md_stg = Stage()
            md_tsk.name = 'mdtsk-{replica}-{cycle}'.format(replica=rid, cycle=cycle)
            md_tsk.link_input_data += ['%s/inpcrd' %replica_sandbox, 
                                   '%s/prmtop' %replica_sandbox, 
                                   '%s/mdin-{replica}-{cycle}'.format(replica=rid, cycle=0) %replica_sandbox]
            md_tsk.arguments = ['-O', 
                            '-i',   'mdin-{replica}-{cycle}'.format(replica=rid, cycle=0), 
                            '-p',   'prmtop', 
                            '-c',   'inpcrd', 
                            '-o',   'out',
                            '-r',   '%s/restrt-{replica}-{cycle}'.format(replica=rid, cycle=cycle) %replica_sandbox,
                            '-x',   'mdcrd',
                            '-inf', '%s/mdinfo-{replica}-{cycle}'.format(replica=rid, cycle=cycle) %replica_sandbox]
            md_tsk.executable = ['/home/scm177/mantel/AMBER/amber14/bin/sander']
            md_tsk.cpu_reqs = {
                            'processes': replica_cores,
                            'process_type': '',
                            'threads_per_process': 1,
                            'thread_type': None
                               }
            md_tsk.pre_exec   = ['export dummy_variable=19', 'echo $SHARED']
         
            md_stg.add_tasks(md_tsk)
            md_stg.post_exec = {
                            'condition': md_post,
                            'on_true': suspend,
                            'on_false': exchange_stg
                          } 

            return md_stg
def test_stage_post_exec():

    global p1
    
    p1.name = 'p1'

    s = Stage()
    s.name = 's1'

    for t in range(NUM_TASKS):
        s.add_tasks(create_single_task())

    s.post_exec = condition

    p1.add_stages(s)

    res_dict = {

            'resource': 'local.localhost',
            'walltime': 30,
            'cpus': 1,
    }

    os.environ['RADICAL_PILOT_DBURL'] = MLAB
    appman = AppManager(rts='radical.pilot', hostname=hostname, port=port)
    appman.resource_desc = res_dict
    appman.workflow = [p1]
    appman.run()
Beispiel #9
0
def generate_pipeline():
    def func_condition():

        global CUR_NEW_STAGE, MAX_NEW_STAGE

        if CUR_NEW_STAGE <= MAX_NEW_STAGE:
            func_on_true()

        func_on_false()

    def func_on_true():

        global CUR_NEW_STAGE

        CUR_NEW_STAGE += 1

        s = Stage()

        for i in range(10):
            t = Task()
            t.executable = '/bin/sleep'
            t.arguments = ['30']

            s.add_tasks(t)

        # Add post-exec to the Stage
        s.post_exec = func_condition

        p.add_stages(s)

    def func_on_false():
        print('Done')

    # Create a Pipeline object
    p = Pipeline()

    # Create a Stage object
    s1 = Stage()

    for i in range(10):

        t1 = Task()
        t1.executable = 'sleep'
        t1.arguments = ['30']

        # Add the Task to the Stage
        s1.add_tasks(t1)

    # Add post-exec to the Stage
    s1.post_exec = func_condition

    # Add Stage to the Pipeline
    p.add_stages(s1)

    return p
Beispiel #10
0
def test_stage_post_exec_assignment(l, d):

    s = Stage()

    def func():
        return True

    with pytest.raises(TypeError):
        s.post_exec = l

    with pytest.raises(TypeError):
        s.post_exec = d

    s.post_exec = func

    class Tmp(object):
        def func(self):
            return True

    tmp = Tmp()
    s.post_exec = tmp.func
Beispiel #11
0
def test_stage_post_exec_assignment(l, d):

    s = Stage()

    def func():
        return True

    with pytest.raises(TypeError):
        s.post_exec = l

    with pytest.raises(ValueError):
        s.post_exec = d

    pe_d = {'condition': 1, 'on_true': 2, 'on_false': 3}

    with pytest.raises(TypeError):
        s.post_exec = pe_d

    pe_d['condition'] = func
    with pytest.raises(TypeError):
        s.post_exec = pe_d

    pe_d['on_true'] = func
    with pytest.raises(TypeError):
        s.post_exec = pe_d

    pe_d['on_false'] = func
    s.post_exec = pe_d
Beispiel #12
0
    def generate_interfacing_stage():
        s4 = Stage()
        s4.name = 'scanning'

        # Scaning for outliers and prepare the next stage of MDs
        t4 = Task()
        t4.pre_exec = []
        #t4.pre_exec += ['. /sw/summit/python/2.7/anaconda2/5.3.0/etc/profile.d/conda.sh']
        #t4.pre_exec += ['module load cuda/9.1.85']
        #t4.pre_exec += ['conda activate %s' % conda_path]
        t4.pre_exec += [
            'module unload prrte', 'module unload python', 'module load xl',
            'module load xalt', 'module load spectrum-mpi', 'module load cuda',
            'module list'
        ]
        t4.pre_exec += [
            '. /sw/summit/ibm-wml-ce/anaconda-base/etc/profile.d/conda.sh',
            'source /sw/summit/ibm-wml-ce/anaconda-base/etc/profile.d/conda.sh',
            'conda deactivate', 'conda deactivate',
            'conda activate /gpfs/alpine/proj-shared/med110/wf-2/conda/envs/ibm-wml-ce-cloned'
        ]
        #'conda activate /sw/summit/ibm-wml-ce/anaconda-base/envs/ibm-wml-ce-1.7.0-2']

        t4.pre_exec += [
            'export PYTHONPATH=%s/CVAE_exps:%s/CVAE_exps/cvae:$PYTHONPATH' %
            (base_path, base_path)
        ]
        t4.pre_exec += ['cd %s/Outlier_search' % base_path]
        #t4.executable = ['%s/bin/python' % conda_path]
        t4.executable = ['python']
        t4.arguments = [
            'outlier_locator.py', '--md', md_path, '--cvae', cvae_path,
            '--pdb', pdb_file
        ]
        #'--ref', ref_pdb_file]

        t4.cpu_reqs = {
            'processes': 1,
            'process_type': None,
            'threads_per_process': 16,
            'thread_type': 'OpenMP'
        }
        t4.gpu_reqs = {
            'processes': 1,
            'process_type': None,
            'threads_per_process': 1,
            'thread_type': 'CUDA'
        }
        s4.add_tasks(t4)
        s4.post_exec = func_condition

        return s4
Beispiel #13
0
    def generate_interfacing_stage():
        s4 = Stage()
        s4.name = 'scanning'

        # Scaning for outliers and prepare the next stage of MDs
        t4 = Task()
        t4.pre_exec = []
        t4.pre_exec = ['module reset']
        t4.pre_exec += [
            '. /sw/summit/python/2.7/anaconda2/5.3.0/etc/profile.d/conda.sh'
        ]
        t4.pre_exec += ['module load cuda/9.1.85']
        t4.pre_exec += ['conda activate rp.copy']
        t4.pre_exec += ['export CUDA_VISIBLE_DEVICES=0']

        t4.pre_exec += [
            'export PYTHONPATH=/gpfs/alpine/bip179/scratch/hrlee/hyperspace/microscope/experiments/CVAE_exps:$PYTHONPATH'
        ]
        t4.pre_exec += [
            'cd /gpfs/alpine/bip179/scratch/hrlee/hyperspace/microscope/experiments/Outlier_search'
        ]
        # python outlier_locator.py -m ../MD_exps/fs-pep -c ../CVAE_exps -p ../MD_exps/fs-pep/pdb/100-fs-peptide-400K.pdb
        t4.executable = ['/ccs/home/hrlee/.conda/envs/rp.copy/bin/python']
        t4.arguments = [
            'outlier_locator.py', '--md', '../MD_exps/fs-pep', '--cvae',
            '../CVAE_exps --pdb',
            '../MD_exps/fs-pep/pdb/100-fs-peptide-400K.pdb'
        ]
        #     t4.arguments = ['/gpfs/alpine/bip179/scratch/hrlee/hyperspace/microscope/experiments/Outlier_search/outlier_locator.py',
        #             '-m', '/gpfs/alpine/bip179/scratch/hrlee/hyperspace/microscope/experiments/MD_exps/fs-pep',
        #             '-c', '/gpfs/alpine/bip179/scratch/hrlee/hyperspace/microscope/experiments/CVAE_exps',
        #             '-p', '/gpfs/alpine/bip179/scratch/hrlee/hyperspace/microscope/experiments/MD_exps/fs-pep/pdb/100-fs-peptide-400K.pdb'
        #             ]

        t4.cpu_reqs = {
            'processes': 1,
            'process_type': None,
            'threads_per_process': 12,
            'thread_type': 'OpenMP'
        }
        t4.gpu_reqs = {
            'processes': 1,
            'process_type': None,
            'threads_per_process': 1,
            'thread_type': 'CUDA'
        }
        s4.add_tasks(t4)
        s4.post_exec = func_condition

        return s4
Beispiel #14
0
def generate_pipeline():

    def func_condition():

        global CUR_NEW_STAGE, MAX_NEW_STAGE

        if CUR_NEW_STAGE <= MAX_NEW_STAGE:
            return True

        return False

    def func_on_true():

        global CUR_NEW_STAGE
        CUR_NEW_STAGE += 1

        shuffle(p.stages[CUR_NEW_STAGE:])

    def func_on_false():
        print 'Done'

    # Create a Pipeline object
    p = Pipeline()

    for s in range(MAX_NEW_STAGE+1):

        # Create a Stage object
        s1 = Stage()

        for i in range(CUR_TASKS):

            t1 = Task()
            t1.executable = '/bin/sleep'
            t1.arguments = [ '30']

            # Add the Task to the Stage
            s1.add_tasks(t1)

        # Add post-exec to the Stage
        s1.post_exec = {
                        condition': func_condition,
                        on_true': func_on_true,
                        on_false': func_on_false
                        }

        # Add Stage to the Pipeline
        p.add_stages(s1)

    return p
Beispiel #15
0
    def generate_interfacing_stage():
        s4 = Stage()
        s4.name = 'scanning'

        # Scaning for outliers and prepare the next stage of MDs
        t4 = Task()

        t4.pre_exec  = ['. /sw/summit/python/3.6/anaconda3/5.3.0/etc/profile.d/conda.sh']
        t4.pre_exec += ['conda activate %s' % cfg['conda_pytorch']]
        t4.pre_exec += ['mkdir -p %s/Outlier_search/outlier_pdbs' % cfg['base_path']]
        t4.pre_exec += ['export models=""; for i in `ls -d %s/CVAE_exps/model-cvae_runs*/`; do if [ "$models" != "" ]; then    models=$models","$i; else models=$i; fi; done;cat /dev/null' % cfg['base_path']]
        t4.pre_exec += ['export LANG=en_US.utf-8', 'export LC_ALL=en_US.utf-8']
        t4.pre_exec += ['unset CUDA_VISIBLE_DEVICES', 'export OMP_NUM_THREADS=4']

        cmd_cat = 'cat /dev/null'
        cmd_jsrun = 'jsrun -n %s -a 6 -g 6 -r 1 -c 7' % cfg['node_counts']

        #molecules_path = '/gpfs/alpine/world-shared/ven201/tkurth/molecules/'
        t4.executable = [' %s; %s %s/examples/outlier_detection/run_optics_dist_summit_entk.sh' % (cmd_cat, cmd_jsrun, cfg['molecules_path'])]
        t4.arguments = ['%s/bin/python' % cfg['conda_pytorch']]
        t4.arguments += ['%s/examples/outlier_detection/optics.py' % cfg['molecules_path'],
                        '--sim_path', '%s/MD_exps/%s' % (cfg['base_path'], cfg['system_name']),
                        '--pdb_out_path', '%s/Outlier_search/outlier_pdbs' % cfg['base_path'],
                        '--restart_points_path',
                        '%s/Outlier_search/restart_points.json' % cfg['base_path'],
                        '--data_path', '%s/MD_to_CVAE/cvae_input.h5' % cfg['base_path'],
                        '--model_paths', '$models',
                        '--model_type', cfg['model_type'],
                        '--min_samples', 10,
                        '--n_outliers', 500,
                        '--dim1', cfg['residues'],
                        '--dim2', cfg['residues'],
                        '--cm_format', 'sparse-concat',
                        '--batch_size', cfg['batch_size'],
                        '--distributed',
                        '-iw', cfg['init_weights']]

        t4.cpu_reqs = {'processes'          : 1,
                       'process_type'       : None,
                       'threads_per_process': 12,
                       'thread_type'        : 'OpenMP'}
        t4.gpu_reqs = {'processes'          : 1,
                       'process_type'       : None,
                       'threads_per_process': 1,
                       'thread_type'        : 'CUDA'}

        s4.add_tasks(t4)
        s4.post_exec = func_condition
        return s4
Beispiel #16
0
def generate_pipeline():
    def func_condition():

        global CUR_NEW_STAGE, MAX_NEW_STAGE

        if CUR_NEW_STAGE <= MAX_NEW_STAGE:
            return True

        return False

    def func_on_true():

        global CUR_NEW_STAGE
        CUR_NEW_STAGE += 1
        for t in p.stages[CUR_NEW_STAGE].tasks:
            dur = randint(10, 30)
            t.arguments = [str(dur)]

    def func_on_false():
        print 'Done'

    # Create a Pipeline object
    p = Pipeline()

    for s in range(MAX_NEW_STAGE + 1):

        # Create a Stage object
        s1 = Stage()

        for _ in range(CUR_TASKS):

            t1 = Task()
            t1.executable = ['sleep']
            t1.arguments = ['30']

            # Add the Task to the Stage
            s1.add_tasks(t1)

        # Add post-exec to the Stage
        s1.post_exec = {
            'condition': func_condition,
            'on_true': func_on_true,
            'on_false': func_on_false
        }

        # Add Stage to the Pipeline
        p.add_stages(s1)

    return p
Beispiel #17
0
    def test_stage_post_exec_assignment(self, mocked_init, l, d):

        s = Stage()
        s._uid = 'test_stage'

        def func():
            return True

        with self.assertRaises(TypeError):
            s.post_exec = l

        with self.assertRaises(TypeError):
            s.post_exec = d

        s.post_exec = func
        self.assertEqual(s._post_exec, func)

        class Tmp(object):
            def func(self):
                return True

        tmp = Tmp()
        s.post_exec = tmp.func
        self.assertEqual(s._post_exec, tmp.func)
    def on_true():

        nonlocal NUM_TASKS, CUR_STAGE, p1

        NUM_TASKS *= 2

        s = Stage()
        s.name = 's%s' % CUR_STAGE

        for _ in range(NUM_TASKS):
            s.add_tasks(create_single_task())

        s.post_exec = condition

        p1.add_stages(s)
def on_true():

    global NUM_TASKS, CUR_STAGE

    NUM_TASKS *= 2

    s = Stage()
    s.name = 's%s'%CUR_STAGE

    for t in range(NUM_TASKS):
        s.add_tasks(create_single_task())

    s.post_exec = condition

    p1.add_stages(s)
def test_stage_post_exec_assignment(l, d):

    s = Stage()

    def func():
        return True

    with pytest.raises(TypeError):
        s.post_exec = l

    with pytest.raises(TypeError):
        s.post_exec = d


    s.post_exec = func

    class Tmp(object):

        def func(self):
            return True


    tmp = Tmp()
    s.post_exec = tmp.func
Beispiel #21
0
    def func_post():

        global CUR_NEW_STAGE, MAX_NEW_STAGE

        if CUR_NEW_STAGE <= MAX_NEW_STAGE:

            CUR_NEW_STAGE += 1
            s = Stage()
            for i in range(10):
                t = Task()
                t.executable = '/bin/sleep'
                t.arguments = ['30']
                s.add_tasks(t)

        # Add post-exec to the Stage
        s.post_exec = func_post

        p.add_stages(s)
Beispiel #22
0
    def describe_MD_pipline():
        p = Pipeline()
        p.name = 'MD'

        # Docking stage
        s1 = Stage()
        s1.name = 'Docking'

        # Docking task
        t1 = Task()
        t1.executable = ['sleep']
        t1.arguments = ['30']

        # Add the Docking task to the Docking Stage
        s1.add_tasks(t1)

        # Add Docking stage to the pipeline
        p.add_stages(s1)

        # MD stage
        s2 = Stage()
        s2.name = 'Simulation'

        # Each Task() is an OpenMM executable that will run on a single GPU.
        # Set sleep time for local testing
        for i in range(6):
            t2 = Task()
            t2.executable = ['sleep']
            t2.arguments = ['60']

            # Add the MD task to the Docking Stage
            s2.add_tasks(t2)

        # Add post-exec to the Stage
        s2.post_exec = {
            'condition': func_condition,
            'on_true': func_on_true,
            'on_false': func_on_false
        }

        # Add MD stage to the MD Pipeline
        p.add_stages(s2)

        return p
def generate_pipeline():

    def func_condition():

        p.suspend()
        print 'Suspending pipeline %s for 10 seconds' %p.uid
        sleep(10)
        return True

    def func_on_true():

        print 'Resuming pipeline %s' %p.uid
        p.resume()

    def func_on_false():
        pass

    # Create a Pipeline object
    p = Pipeline()

    # Create a Stage object
    s1 = Stage()

    for i in range(10):

        t1 = Task()
        t1.executable = ['sleep']
        t1.arguments = ['30']

        # Add the Task to the Stage
        s1.add_tasks(t1)

    # Add post-exec to the Stage
    s1.post_exec = {
        'condition': func_condition,
        'on_true': func_on_true,
        'on_false': func_on_false
    }

    # Add Stage to the Pipeline
    p.add_stages(s1)

    return p
def generate_pipeline():

    def func_condition():

        p.suspend()
        print 'Suspending pipeline %s for 10 seconds' %p.uid
        sleep(10)
        return True

    def func_on_true():

        print 'Resuming pipeline %s' %p.uid
        p.resume()

    def func_on_false():
        pass

    # Create a Pipeline object
    p = Pipeline()

    # Create a Stage object
    s1 = Stage()

    for i in range(10):

        t1 = Task()
        t1.executable = '/bin/sleep'
        t1.arguments = ['30']

        # Add the Task to the Stage
        s1.add_tasks(t1)

    # Add post-exec to the Stage
    s1.post_exec = {
        'condition': func_condition,
        'on_true': func_on_true,
        'on_false': func_on_false
    }

    # Add Stage to the Pipeline
    p.add_stages(s1)

    return p
Beispiel #25
0
def on_true():

    global NUM_TASKS, CUR_STAGE

    NUM_TASKS *= 2

    s = Stage()
    s.name = 's%s' % CUR_STAGE

    for t in range(NUM_TASKS):
        s.add_tasks(create_single_task())

    s.post_exec = {
        'condition': condition,
        'on_true': on_true,
        'on_false': on_false
    }

    p1.add_stages(s)
    def create_exch_stage():

        print 'Replica %s | Cycle %s | Event: Creating exchange task' % (
            index, replica_cycles[index])

        s_exch = Stage()
        t_exch = Task()
        t_exch.executable = ['/bin/sleep']
        t_exch.arguments = ['5']

        s_exch.add_tasks(t_exch)

        s_exch.post_exec = {
            'condition': exch_condition,
            'on_true': exch_on_true,
            'on_false': exch_on_false
        }

        return s_exch
Beispiel #27
0
    def func_on_true():

        global CUR_NEW_STAGE

        CUR_NEW_STAGE += 1

        s = Stage()

        for i in range(10):
            t = Task()
            t.executable = '/bin/sleep'
            t.arguments = ['30']

            s.add_tasks(t)

        # Add post-exec to the Stage
        s.post_exec = func_condition

        p.add_stages(s)
Beispiel #28
0
    def generate_interfacing_stage():
        s4 = Stage()
        s4.name = 'scanning'

        # Scaning for outliers and prepare the next stage of MDs
        t4 = Task()
        t4.pre_exec = []
        t4.pre_exec += [
            '. /sw/summit/python/2.7/anaconda2/5.3.0/etc/profile.d/conda.sh'
        ]
        t4.pre_exec += ['module load cuda/9.1.85']
        t4.pre_exec += ['conda activate %s' % conda_path]

        t4.pre_exec += [
            'export PYTHONPATH=%s/CVAE_exps:$PYTHONPATH' % base_path
        ]
        t4.pre_exec += ['cd %s/Outlier_search' % base_path]
        t4.executable = ['%s/bin/python' % conda_path]
        t4.arguments = [
            'outlier_locator.py', '--md', md_path, '--cvae', cvae_path,
            '--pdb', pdb_file, '--ref', ref_pdb_file
        ]

        t4.cpu_reqs = {
            'processes': 1,
            'process_type': None,
            'threads_per_process': 12,
            'thread_type': 'OpenMP'
        }
        t4.gpu_reqs = {
            'processes': 1,
            'process_type': None,
            'threads_per_process': 1,
            'thread_type': 'CUDA'
        }
        s4.add_tasks(t4)
        s4.post_exec = func_condition

        return s4
    def create_md_stage():

        print 'Replica %s | Cycle %s | Event: Creating MD task' % (
            index, replica_cycles[index])

        # Create a Stage object
        s_md = Stage()

        # Create a Task object which creates a file named 'output.txt' of size 1 MB
        t_md = Task()
        t_md.executable = ['/bin/sleep']
        t_md.arguments = ['%s' % randint(2, 7)]

        # Add the Task to the Stage
        s_md.add_tasks(t_md)

        s_md.post_exec = {
            'condition': md_condition,
            'on_true': md_on_true,
            'on_false': md_on_false
        }

        return s_md
Beispiel #30
0
    def func_on_true():

        global CUR_NEW_STAGE

        CUR_NEW_STAGE += 1

        s = Stage()

        for i in range(10):
            t = Task()
            t.executable = ['sleep']
            t.arguments = ['30']

            s.add_tasks(t)

        # Add post-exec to the Stage
        s.post_exec = {
            'condition': func_condition,
            'on_true': func_on_true,
            'on_false': func_on_false
        }

        p.add_stages(s)
    def func_on_true():

        global CUR_NEW_STAGE

        CUR_NEW_STAGE += 1

        s = Stage()

        for i in range(10):
            t = Task()
            t.executable = '/bin/sleep'
            t.arguments = [ '30']

            s.add_tasks(t)

        # Add post-exec to the Stage
        s.post_exec = {
                        'condition': func_condition,
                        'on_true': func_on_true,
                        'on_false': func_on_false
                    }

        p.add_stages(s)
Beispiel #32
0
def generate_pipeline():
    def func_post():

        global CUR_NEW_STAGE, MAX_NEW_STAGE

        if CUR_NEW_STAGE <= MAX_NEW_STAGE:
            CUR_NEW_STAGE += 1
            for t in p.stages[CUR_NEW_STAGE].tasks:
                dur = randint(10, 30)
                t.arguments = [str(dur)]
        else:
            print('Done')

    # Create a Pipeline object
    p = Pipeline()

    for s in range(MAX_NEW_STAGE + 1):

        # Create a Stage object
        s1 = Stage()

        for _ in range(CUR_TASKS):

            t1 = Task()
            t1.executable = '/bin/sleep'
            t1.arguments = ['30']

            # Add the Task to the Stage
            s1.add_tasks(t1)

        # Add post-exec to the Stage
        s1.post_exec = func_post

        # Add Stage to the Pipeline
        p.add_stages(s1)

    return p
def generate_pipeline():

    def func_condition():

        global CUR_NEW_STAGE, MAX_NEW_STAGE

        if CUR_NEW_STAGE <= MAX_NEW_STAGE:
            return True

        return False

    def func_on_true():

        global CUR_NEW_STAGE

        CUR_NEW_STAGE += 1

        s = Stage()

        for i in range(10):
            t = Task()
            t.executable = '/bin/sleep'
            t.arguments = [ '30']

            s.add_tasks(t)

        # Add post-exec to the Stage
        s.post_exec = {
                        'condition': func_condition,
                        'on_true': func_on_true,
                        'on_false': func_on_false
                    }

        p.add_stages(s)

    def func_on_false():
        print 'Done'

    # Create a Pipeline object
    p = Pipeline()

    # Create a Stage object
    s1 = Stage()

    for i in range(10):

        t1 = Task()
        t1.executable = ['sleep']
        t1.arguments = [ '30']

        # Add the Task to the Stage
        s1.add_tasks(t1)

    # Add post-exec to the Stage
    s1.post_exec = {
                        'condition': func_condition,
                        'on_true': func_on_true,
                        'on_false': func_on_false
                    }

    # Add Stage to the Pipeline
    p.add_stages(s1)

    return p
Beispiel #34
0
    def func_on_true():

        global cur_iter, book

        # Create Stage 2
        s2 = Stage()
        s2.name = 'iter%s-s2' % cur_iter[instance]

        # Create a Task
        t2 = Task()
        t2.name = 'iter%s-s2-t2' % cur_iter[instance]

        t2.pre_exec = ['source %s/bin/GMXRC.bash' % GMX_PATH]
        t2.executable = ['gmx grompp']
        t2.arguments = [
            '-f', 'CB7G3_run.mdp', '-c', 'CB7G3.gro', '-p', 'CB7G3.top', '-n',
            'CB7G3.ndx', '-o', 'CB7G3.tpr', '-maxwarn', '10'
        ]
        t2.cores = 1
        t2.copy_input_data = [
            '$SHARED/CB7G3.ndx', '$SHARED/CB7G3.top', '$SHARED/3atomtypes.itp',
            '$SHARED/3_GMX.itp', '$SHARED/cucurbit_7_uril_GMX.itp'
        ]

        if cur_iter[instance] == 1:
            t2.copy_input_data += [
                '$Pipeline_%s_Stage_%s_Task_%s/CB7G3_run.mdp' %
                (p.name, book[p.name]['stages'][-1]['name'],
                 book[p.name]['stages'][-1]['task']), '$SHARED/CB7G3.gro'
            ]
        else:
            t2.copy_input_data += [
                '$Pipeline_%s_Stage_%s_Task_%s/CB7G3_run.mdp' %
                (p.name, book[p.name]['stages'][-1]['name'],
                 book[p.name]['stages'][-1]['task']),
                '$Pipeline_%s_Stage_%s_Task_%s/CB7G3.gro' %
                (p.name, book[p.name]['stages'][-2]['name'],
                 book[p.name]['stages'][-2]['task'])
            ]

        # Add the Task to the Stage
        s2.add_tasks(t2)

        # Add current Task and Stage to our book
        book[p.name]['stages'].append({'name': s2.name, 'task': t2.name})

        # Add Stage to the Pipeline
        p.add_stages(s2)

        # Create Stage 3
        s3 = Stage()
        s3.name = 'iter%s-s3' % cur_iter[instance]

        # Create a Task
        t3 = Task()
        t3.name = 'iter%s-s3-t3' % cur_iter[instance]
        t3.pre_exec = ['source %s/bin/GMXRC.bash' % GMX_PATH]
        t3.executable = ['gmx mdrun']
        t3.arguments = [
            '-nt',
            20,
            '-deffnm',
            'CB7G3',
            '-dhdl',
            'CB7G3_dhdl.xvg',
        ]
        t3.cores = 20
        # t3.mpi = True
        t3.copy_input_data = [
            '$Pipeline_%s_Stage_%s_Task_%s/CB7G3.tpr' %
            (p.name, book[p.name]['stages'][-1]['name'],
             book[p.name]['stages'][-1]['task'])
        ]
        t3.copy_output_data = [
            'CB7G3_dhdl.xvg > $SHARED/CB7G3_run{1}_gen{0}_dhdl.xvg'.format(
                cur_iter[instance], instance),
            'CB7G3_pullf.xvg > $SHARED/CB7G3_run{1}_gen{0}_pullf.xvg'.format(
                cur_iter[instance], instance),
            'CB7G3_pullx.xvg > $SHARED/CB7G3_run{1}_gen{0}_pullx.xvg'.format(
                cur_iter[instance], instance),
            'CB7G3.log > $SHARED/CB7G3_run{1}_gen{0}.log'.format(
                cur_iter[instance], instance)
        ]
        t3.download_output_data = [
            'CB7G3.xtc > ./output/CB7G3_run{1}_gen{0}.xtc'.format(
                cur_iter[instance], instance),
            'CB7G3.log > ./output/CB7G3_run{1}_gen{0}.log'.format(
                cur_iter[instance], instance),
            'CB7G3_dhdl.xvg > ./output/CB7G3_run{1}_gen{0}_dhdl.xvg'.format(
                cur_iter[instance], instance),
            'CB7G3_pullf.xvg > ./output/CB7G3_run{1}_gen{0}_pullf.xvg'.format(
                cur_iter[instance], instance),
            'CB7G3_pullx.xvg > ./output/CB7G3_run{1}_gen{0}_pullx.xvg'.format(
                cur_iter[instance], instance),
            'CB7G3.gro > ./output/CB7G3_run{1}_gen{0}.gro'.format(
                cur_iter[instance], instance)
        ]

        # Add the Task to the Stage
        s3.add_tasks(t3)

        # Add current Task and Stage to our book
        book[p.name]['stages'].append({'name': s3.name, 'task': t3.name})

        # Add Stage to the Pipeline
        p.add_stages(s3)

        # Create Stage 4
        s4 = Stage()
        s4.name = 'iter%s-s4' % cur_iter[instance]

        # Create a Task
        t4 = Task()
        t4.name = 'iter%s-s4-t4' % cur_iter[instance]
        t4.pre_exec = [
            'module load python/2.7.7-anaconda',
            'export PYTHONPATH=%s/alchemical_analysis:$PYTHONPATH' %
            ALCH_ANA_PATH,
            'export PYTHONPATH=%s:$PYTHONPATH' % ALCH_ANA_PATH,
            'export PYTHONPATH=/home/vivek91/.local/lib/python2.7/site-packages:$PYTHONPATH',
            'ln -s ../staging_area data'
        ]
        t4.executable = ['python']
        t4.arguments = [
            'analysis_2.py',
            '--newname=CB7G3_run.mdp',
            '--template=CB7G3_template.mdp',
            '--dir=./data',
            # '--prev_data=%s'%DATA_LOC
            '--gen={0}'.format(cur_iter[instance], instance),
            '--run={1}'.format(cur_iter[instance], instance)
        ]
        t4.cores = 1
        t4.copy_input_data = [
            '$SHARED/analysis_2.py',
            '$SHARED/alchemical_analysis.py',
            '$SHARED/CB7G3_template.mdp',
        ]

        t4.download_output_data = [
            'analyze_1/results.txt > ./output/results_run{1}_gen{0}.txt'.
            format(cur_iter[instance],
                   instance), 'STDOUT > ./output/stdout_run{1}_gen{0}'.format(
                       cur_iter[instance], instance),
            'STDERR > ./output/stderr_run{1}_gen{0}'.format(
                cur_iter[instance], instance),
            'CB7G3_run.mdp > ./output/CB7G3_run{1}_gen{0}.mdp'.format(
                cur_iter[instance], instance),
            'results_average.txt > ./output/results_average_run{1}_gen{0}.txt'.
            format(cur_iter[instance], instance)
        ]

        s4.post_exec = {
            'condition': func_condition,
            'on_true': func_on_true,
            'on_false': func_on_false
        }

        # Add the Task to the Stage
        s4.add_tasks(t4)

        # Add current Task and Stage to our book
        book[p.name]['stages'].append({'name': s4.name, 'task': t4.name})

        # Add Stage to the Pipeline
        p.add_stages(s4)

        print book
Beispiel #35
0
def generate_simulation_pipeline(i):
    def post_stage():
        if (not os.path.exists(f'{run_dir}/aggregator/stop.aggregator')):
            nstages = len(p.stages)
            s = Stage()
            s.name = f"{nstages}"
            t = Task()
            t.cpu_reqs = {
                'processes': 1,
                'process_type': None,
                'threads_per_process': 4,
                'thread_type': 'OpenMP'
            }
            t.gpu_reqs = {
                'processes': 0,
                'process_type': None,
                'threads_per_process': 0,
                'thread_type': None
            }
            t.name = f" {i}_{nstages} "
            t.executable = PYTHON
            t.arguments = [
                f'{current_dir}/simulation.py',
                f'{run_dir}/simulations/all/{i}_{nstages}', ADIOS_XML
            ]
            subprocess.getstatusoutput(
                f'ln -s  {run_dir}/simulations/all/{i}_{nstages} {run_dir}/simulations/new/{i}_{nstages}'
            )
            s.add_tasks(t)
            s.post_exec = post_stage
            p.add_stages(s)

    p = Pipeline()
    nstages = len(p.stages)
    p.name = f"{i}"
    s = Stage()
    s.name = f"{nstages}"
    t = Task()
    t.cpu_reqs = {
        'processes': 1,
        'process_type': None,
        'threads_per_process': 4,
        'thread_type': 'OpenMP'
    }
    t.gpu_reqs = {
        'processes': 0,
        'process_type': None,
        'threads_per_process': 0,
        'thread_type': None
    }
    t.name = f" {i}_{nstages} "
    t.executable = PYTHON
    t.arguments = [
        f'{current_dir}/simulation.py',
        f'{run_dir}/simulations/all/{i}_{nstages}', ADIOS_XML
    ]
    subprocess.getstatusoutput(
        f'ln -s  {run_dir}/simulations/all/{i}_{nstages} {run_dir}/simulations/new/{i}_{nstages}'
    )
    s.add_tasks(t)
    s.post_exec = post_stage
    p.add_stages(s)
    print(f"In generate_simulation_pipelin({i}): {nstages}")
    print("=" * 20)
    print(p.to_dict())
    print("=" * 20)
    print('-' * 15)
    print(s.to_dict())
    print('-' * 15)
    print('_' * 10)
    print(t.to_dict())
    print('_' * 10)

    return p
Beispiel #36
0
def generate_pipeline():

    global duration
    
    def func_condition():

        global CUR_TASKS, MAX_TASKS
    
        if CUR_TASKS < MAX_TASKS:
            return True

        return False

    def func_on_true():

        global CUR_TASKS, CUR_CORES

        CUR_TASKS = CUR_TASKS*2
        CUR_CORES = CUR_CORES/2

        s = Stage()

        for i in range(CUR_TASKS):
            t = Task()    
            t.pre_exec = ['export PATH=/u/sciteam/balasubr/modules/stress-ng-0.09.34:$PATH']
            t.executable = ['stress-ng']   
            t.arguments = [ '-c', str(CUR_CORES), '-t', str(duration)] 
            #t.executable = ['sleep']
            #t.arguments = ['20']
            t.cpu_reqs = {
                        'processes': 1,
                        'process_type': '',
                        'threads_per_process': CUR_CORES,
                        'thread_type': ''
                    }

            # Add the Task to the Stage
            s.add_tasks(t)

        # Add post-exec to the Stage
        s.post_exec = {
                       'condition': func_condition,
                       'on_true': func_on_true,
                       'on_false': func_on_false
                    }

        p.add_stages(s)

    def func_on_false():
        print 'Done'

    # Create a Pipeline object
    p = Pipeline()

    # Create a Stage object 
    s1 = Stage()

    for i in range(CUR_TASKS):

        t1 = Task()    
        t1.pre_exec = ['export PATH=/u/sciteam/balasubr/modules/stress-ng-0.09.34:$PATH']
        t1.executable = ['stress-ng']   
        t1.arguments = [ '-c', str(CUR_CORES), '-t', str(duration)]  
        #t1.executable = ['sleep']
        #t1.arguments = ['20']
        t1.cpu_reqs = { 
                        'processes': 1,
                        'process_type': '',
                        'threads_per_process': CUR_CORES,
                        'thread_type': ''
                    }

        # Add the Task to the Stage
        s1.add_tasks(t1)

    # Add post-exec to the Stage
    s1.post_exec = {
                       'condition': func_condition,
                       'on_true': func_on_true,
                       'on_false': func_on_false
                   }

    # Add Stage to the Pipeline
    p.add_stages(s1)    

    return p