Example #1
0
def test_issue_26():

    appman = AppManager(hostname=hostname, port=port, username=username,
            password=password, autoterminate=False)
    appman.resource_desc = {'resource': 'local.localhost',
                            'walltime': 10,
                            'cpus'    : 1,
                            'project' : ''}

    p1 = create_pipeline()
    p2 = create_pipeline()

    appman.workflow = [p1]
    appman.run()

    appman.workflow = [p2]
    appman.run()

    appman.resource_terminate()

    lhs = int(p1.stages[0].uid.split('.')[-1]) + 1
    rhs = int(p2.stages[0].uid.split('.')[-1])
    assert lhs == rhs

    for t in p1.stages[0].tasks:
        for tt in p2.stages[0].tasks:
            lhs = int(t.uid.split('.')[-1]) + 1
            rhs = int(tt.uid.split('.')[-1])
            assert lhs == rhs
def test_issue_26():

    def create_pipeline():

        p = Pipeline()

        s = Stage()

        t1 = Task()
        t1.name = 'simulation'
        t1.executable = ['/bin/echo']
        t1.arguments = ['hello']
        t1.copy_input_data = []
        t1.copy_output_data = []

        s.add_tasks(t1)

        p.add_stages(s)

        return p


    res_dict = {

            'resource': 'local.localhost',
            'walltime': 10,
            'cpus': 1,
            'project': ''

    }

    os.environ['RADICAL_PILOT_DBURL'] = MLAB

    appman = AppManager(hostname=hostname, port=port, autoterminate=False)
    appman.resource_desc = res_dict


    p1 = create_pipeline()
    appman.workflow = [p1]
    appman.run()
    print p1.uid, p1.stages[0].uid

    p2 = create_pipeline()
    appman.workflow = [p2]
    appman.run()
    print p2.uid, p2.stages[0].uid

    appman.resource_terminate()

    lhs = int(p1.stages[0].uid.split('.')[-1]) + 1
    rhs = int(p2.stages[0].uid.split('.')[-1])
    assert lhs == rhs

    for t in p1.stages[0].tasks:
        for tt in p2.stages[0].tasks:
            lhs = int(t.uid.split('.')[-1]) + 1
            rhs = int(tt.uid.split('.')[-1])
            assert lhs == rhs
Example #3
0
def test_issue_26():
    def create_pipeline():

        p = Pipeline()

        s = Stage()

        t1 = Task()
        t1.name = 'simulation'
        t1.executable = ['/bin/echo']
        t1.arguments = ['hello']
        t1.copy_input_data = []
        t1.copy_output_data = []

        s.add_tasks(t1)

        p.add_stages(s)

        return p

    res_dict = {
        'resource': 'local.localhost',
        'walltime': 5,
        'cpus': 1,
        'project': ''
    }

    os.environ['RADICAL_PILOT_DBURL'] = MLAB

    appman = AppManager(hostname=hostname, port=port, autoterminate=False)
    appman.resource_desc = res_dict

    p1 = create_pipeline()
    appman.workflow = [p1]
    appman.run()
    print p1.uid, p1.stages[0].uid

    p2 = create_pipeline()
    appman.workflow = [p2]
    appman.run()
    print p2.uid, p2.stages[0].uid

    appman.resource_terminate()

    lhs = int(p1.stages[0].uid.split('.')[-1]) + 1
    rhs = int(p2.stages[0].uid.split('.')[-1])
    assert lhs == rhs

    for t in p1.stages[0].tasks:
        for tt in p2.stages[0].tasks:
            lhs = int(t.uid.split('.')[-1]) + 1
            rhs = int(tt.uid.split('.')[-1])
            assert lhs == rhs
Example #4
0
def test_shared_data():

    for f in glob('%s/file*.txt' % cur_dir):
        os.remove(f)

    os.system('echo "Hello" > %s/file1.txt' % cur_dir)
    os.system('echo "World" > %s/file2.txt' % cur_dir)

    # Create a dictionary describe four mandatory keys:
    # resource, walltime, cpus and project
    # resource is 'local.localhost' to execute locally
    res_dict = {'resource': 'local.localhost', 'walltime': 1, 'cpus': 1}

    # Create Application Manager
    appman = AppManager(hostname=hostname, port=port)

    # Assign resource manager to the Application Manager
    appman.resource_desc = res_dict
    appman.shared_data = ['%s/file1.txt' % cur_dir, '%s/file2.txt' % cur_dir]

    p = generate_pipeline()

    # Assign the workflow as a set of Pipelines to the Application Manager
    appman.workflow = [p]

    # Run the Application Manager
    appman.run()

    with open('%s/output.txt' % cur_dir, 'r') as fp:
        assert [d.strip() for d in fp.readlines()] == ['Hello', 'World']

    os.remove('%s/file1.txt' % cur_dir)
    os.remove('%s/file2.txt' % cur_dir)
    os.remove('%s/output.txt' % cur_dir)
def test_stage_post_exec():

    global p1
    
    p1.name = 'p1'

    s = Stage()
    s.name = 's1'

    for t in range(NUM_TASKS):
        s.add_tasks(create_single_task())

    s.post_exec = condition

    p1.add_stages(s)

    res_dict = {

            'resource': 'local.localhost',
            'walltime': 30,
            'cpus': 1,
    }

    os.environ['RADICAL_PILOT_DBURL'] = MLAB
    appman = AppManager(rts='radical.pilot', hostname=hostname, port=port)
    appman.resource_desc = res_dict
    appman.workflow = [p1]
    appman.run()
Example #6
0
def test_issue_271():

    # Create Application Manager
    appman = AppManager(hostname=hostname,
                        port=port,
                        username=username,
                        password=password)

    # Create a dictionary describe four mandatory keys:
    # resource, walltime, and cpus
    # resource is 'local.localhost' to execute locally
    res_dict = {'resource': 'local.localhost', 'walltime': 10, 'cpus': 1}

    # Assign resource request description to the Application Manager
    appman.resource_desc = res_dict

    # Assign the workflow as a set or list of Pipelines to the Application Manager
    # Note: The list order is not guaranteed to be preserved
    p = generate_pipeline()
    appman.workflow = set([p])

    # Run the Application Manager
    appman.run()

    # assert
    for t in p.stages[0].tasks:
        assert t.state == states.FAILED
Example #7
0
def test_issue_214():

    # Create a dictionary describe four mandatory keys:
    # resource, walltime, cpus and project
    # resource is 'local.localhost' to execute locally
    res_dict = {
        'resource': 'local.localhost',
        'walltime': int(sleep) + 5,
        'cpus': 1
    }

    os.environ['RADICAL_PILOT_DBURL'] = MLAB

    # Create Application Manager
    appman = AppManager(hostname=hostname, port=port)

    # Assign resource manager to the Application Manager
    appman.resource_desc = res_dict

    p = generate_pipeline()

    # Assign the workflow as a set of Pipelines to the Application Manager
    appman.workflow = [p]

    # Run the Application Manager
    appman.run()
Example #8
0
def test_stage_post_exec():

    global p1

    p1.name = 'p1'

    s = Stage()
    s.name = 's1'

    for t in range(NUM_TASKS):
        s.add_tasks(create_single_task())

    s.post_exec = {
        'condition': condition,
        'on_true': on_true,
        'on_false': on_false
    }

    p1.add_stages(s)

    res_dict = {
        'resource': 'local.localhost',
        'walltime': 30,
        'cpus': 1,
    }

    os.environ['RADICAL_PILOT_DBURL'] = MLAB
    appman = AppManager(rts='radical.pilot', hostname=hostname, port=port)
    appman.resource_desc = res_dict
    appman.workflow = [p1]
    appman.run()
Example #9
0
def test_write_session_description():

    amgr = AppManager(hostname=hostname, port=port)
    amgr.resource_desc = {'resource' : 'xsede.stampede',
                          'walltime' : 59,
                          'cpus'     : 128,
                          'gpus'     : 64,
                          'project'  : 'xyz',
                          'queue'    : 'high'}

    workflow      = [generate_pipeline(1), generate_pipeline(2)]
    amgr.workflow = workflow

    amgr._wfp = WFprocessor(sid=amgr.sid,
                            workflow=amgr._workflow,
                            pending_queue=amgr._pending_queue,
                            completed_queue=amgr._completed_queue,
                            resubmit_failed=amgr._resubmit_failed,
                            rmq_conn_params=amgr._rmq_conn_params)
    amgr._workflow = amgr._wfp.workflow

    amgr._task_manager = TaskManager(sid=amgr._sid,
                                     pending_queue=amgr._pending_queue,
                                     completed_queue=amgr._completed_queue,
                                     rmgr=amgr._rmgr,
                                     rmq_conn_params=amgr._rmq_conn_params
                                     )

    write_session_description(amgr)

    desc = ru.read_json('%s/radical.entk.%s.json' % (amgr._sid, amgr._sid))
    src  = '%s/sample_data' % pwd

    assert desc == ru.read_json('%s/expected_desc_write_session.json' % src)
Example #10
0
def test_amgr_assign_workflow():

    amgr = Amgr()

    with pytest.raises(TypeError):
        amgr.workflow = [1, 2, 3]

    with pytest.raises(TypeError):
        amgr.workflow = set([1, 2, 3])

    p1 = Pipeline()
    p2 = Pipeline()
    p3 = Pipeline()

    amgr._workflow = [p1, p2, p3]
    amgr._workflow = set([p1, p2, p3])
def test_amgr_assign_workflow():

    amgr = Amgr()

    with pytest.raises(TypeError):
        amgr.workflow = [1, 2, 3]

    with pytest.raises(TypeError):
        amgr.workflow = set([1, 2, 3])

    p1 = Pipeline()
    p2 = Pipeline()
    p3 = Pipeline()

    amgr._workflow = [p1, p2, p3]
    amgr._workflow = set([p1, p2, p3])
def test_issue_199():


    # Create a dictionary describe four mandatory keys:
    # resource, walltime, cpus and project
    # resource is 'local.localhost' to execute locally
    res_dict = {

            'resource': 'local.localhost',
            'walltime': 10,
            'cpus': 1
    }

    os.environ['RADICAL_PILOT_DBURL'] = MLAB

    # Create Application Manager
    appman = AppManager(hostname=hostname, port=port)

    # Assign resource manager to the Application Manager
    appman.resource_desc = res_dict

    p = generate_pipeline()

    # Assign the workflow as a set of Pipelines to the Application Manager
    appman.workflow = [p]

    # Run the Application Manager
    appman.run()
Example #13
0
def test_issue_214():

    # Create a dictionary describe four mandatory keys:
    # resource, walltime, cpus and project
    # resource is 'local.localhost' to execute locally
    res_dict = {
        'resource': 'local.localhost',
        'walltime': (int(sleep) // 60) + 5,
        'cpus': 1
    }

    # Create Application Manager
    appman = AppManager(hostname=hostname,
                        port=port,
                        username=username,
                        password=password)

    # Assign resource manager to the Application Manager
    appman.resource_desc = res_dict

    p = generate_pipeline()

    # Assign the workflow as a set of Pipelines to the Application Manager
    appman.workflow = [p]

    # Run the Application Manager
    appman.run()
Example #14
0
def test_amgr_synchronizer():

    amgr = Amgr(hostname=host, port=port, username=username, password=password)
    amgr._setup_mqs()

    p = Pipeline()
    s = Stage()

    # Create and add 10 tasks to the stage
    for cnt in range(10):

        t = Task()
        t.executable = 'some-executable-%s' % cnt

        s.add_tasks(t)

    p.add_stages(s)
    p._validate()

    amgr.workflow = [p]

    sid  = 'test.0016'
    rmgr = BaseRmgr({}, sid, None, {})
    tmgr = BaseTmgr(sid=sid,
                    pending_queue=['pending-1'],
                    completed_queue=['completed-1'],
                    rmgr=rmgr,
                    rmq_conn_params=amgr._rmq_conn_params,
                    rts=None)

    amgr._rmgr         = rmgr
    rmgr._task_manager = tmgr

    for t in p.stages[0].tasks:
        assert t.state == states.INITIAL

    assert p.stages[0].state == states.INITIAL
    assert p.state           == states.INITIAL

    # Start the synchronizer method in a thread
    amgr._terminate_sync = mt.Event()
    sync_thread = mt.Thread(target=amgr._synchronizer,
                            name='synchronizer-thread')
    sync_thread.start()

    # Start the synchronizer method in a thread
    proc = mp.Process(target=func_for_synchronizer_test, name='temp-proc',
                      args=(amgr._sid, p, tmgr))

    proc.start()
    proc.join()


    # Wait for AppManager to finish the message exchange
    # no need to set *)terminate_sync* but a timeout instead
    # amgr._terminate_sync.set()
    sync_thread.join(15)

    for t in p.stages[0].tasks:
        assert t.state == states.COMPLETED
Example #15
0
def test_issue_259():

    # Create Application Manager
    appman = AppManager(hostname=hostname, port=port, username=username,
            password=password)

    # Create a dictionary describe four mandatory keys:
    # resource, walltime, and cpus
    # resource is 'local.localhost' to execute locally
    res_dict = {

        'resource': 'local.localhost',
        'walltime': 10,
        'cpus': 1
    }

    # Assign resource request description to the Application Manager
    appman.resource_desc = res_dict

    # Assign the workflow as a set or list of Pipelines to the Application Manager
    # Note: The list order is not guaranteed to be preserved
    appman.workflow = set([generate_pipeline()])

    # Run the Application Manager
    appman.run()

    # assert
    with open('output.txt','r') as f:
        assert '"Hello World"' == f.readlines()[0].strip()
def test_issue_271():

    # Create Application Manager
    appman = AppManager(hostname=hostname, port=port)

    # Create a dictionary describe four mandatory keys:
    # resource, walltime, and cpus
    # resource is 'local.localhost' to execute locally
    res_dict = {

        'resource': 'local.localhost',
        'walltime': 10,
        'cpus': 1
    }

    # Assign resource request description to the Application Manager
    appman.resource_desc = res_dict

    # Assign the workflow as a set or list of Pipelines to the Application Manager
    # Note: The list order is not guaranteed to be preserved
    p = generate_pipeline()
    appman.workflow = set([p])

    # Run the Application Manager
    appman.run()

    # assert
    for t in p.stages[0].tasks:
        assert t.state == states.FAILED
def test_diff_rmq():
    def create_pipeline():

        p = Pipeline()

        s = Stage()

        t1 = Task()
        t1.name = 'simulation'
        t1.executable = '/bin/echo'
        t1.arguments = ['hello']
        t1.copy_input_data = []
        t1.copy_output_data = []

        s.add_tasks(t1)

        p.add_stages(s)

        return p

    res_dict = {
        'resource': 'local.localhost',
        'walltime': 5,
        'cpus': 1,
    }

    appman = AppManager(hostname=hostname, port=port)
    appman.resource_desc = res_dict

    p1 = create_pipeline()
    print(p1.uid, p1.stages[0].uid)
    appman.workflow = [p1]
    appman.run()
Example #18
0
def test_amgr_assign_workflow():

    amgr = Amgr(hostname=host, port=port, username=username,
            password=password)

    with pytest.raises(TypeError):
        amgr.workflow = [1, 2, 3]

    with pytest.raises(TypeError):
        amgr.workflow = set([1, 2, 3])

    p1 = Pipeline()
    p2 = Pipeline()
    p3 = Pipeline()

    amgr._workflow = [p1, p2, p3]
    amgr._workflow = set([p1, p2, p3])
Example #19
0
def main():

    cmd = "{0} 'ls {1}'".format(ssh, dir_)
    p = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE)
    out, _ = p.communicate()

    out = out.decode('utf-8').strip().split(linesep)

    fullpaths = [op.join(dir_, p) for p in out]
    print(fullpaths)

    # Start radical entk pipeline

    p = Pipeline()

    for i in range(iterations):

        s = Stage()

        for fp in fullpaths:

            t = Task()
            t.name = 'Incrementation {}'.format(i)
            t.pre_exec = [
                'source /home/vhayot/miniconda3/etc/profile.d/conda.sh',
                'conda activate radenv'
            ]
            t.executable = 'python /home/vhayot/inc.py'

            if i == 0:
                t.arguments = [fp, out_dir, i]
            else:
                # Note: assuming all data is accessible through shared dir
                # radical entk functions without sharedfs, however
                t.arguments = [
                    op.join(out_dir,
                            "it-{0}-{1}".format(i - 1, op.basename(fp))),
                    out_dir, i
                ]

            s.add_tasks(t)

        # Create a new stage everytime there's a dependency
        p.add_stages(s)

    appman = AppManager(hostname=hostname, port=port)

    appman.resource_desc = {
        'resource': 'xsede.bridges',
        'walltime': 20,
        'cpus': 5,
        'project': 'mc3bggp',
        'schema': 'gsissh'
    }

    appman.workflow = set([p])

    appman.run()
Example #20
0
def test_wfp_workflow_incomplete():

    p = Pipeline()
    s = Stage()
    t = Task()
    t.executable = ['/bin/date']
    s.add_tasks(t)
    p.add_stages(s)

    amgr = Amgr(hostname=hostname, port=port)
    amgr._setup_mqs()

    wfp = WFprocessor(sid=amgr._sid,
                      workflow=[p],
                      pending_queue=amgr._pending_queue,
                      completed_queue=amgr._completed_queue,
                      mq_hostname=amgr._mq_hostname,
                      port=amgr._port,
                      resubmit_failed=False)

    wfp._initialize_workflow()

    assert wfp.workflow_incomplete()

    amgr.workflow = [p]
    profiler = ru.Profiler(name='radical.entk.temp')

    p.stages[0].state == states.SCHEDULING
    p.state == states.SCHEDULED
    for t in p.stages[0].tasks:
        t.state = states.COMPLETED

    import json
    import pika

    task_as_dict = json.dumps(t.to_dict())
    mq_connection = pika.BlockingConnection(
        pika.ConnectionParameters(host=amgr._mq_hostname, port=amgr._port))
    mq_channel = mq_connection.channel()
    mq_channel.basic_publish(exchange='',
                             routing_key='%s-completedq-1' % amgr._sid,
                             body=task_as_dict)

    amgr._terminate_sync = Event()
    sync_thread = Thread(target=amgr._synchronizer, name='synchronizer-thread')
    sync_thread.start()

    proc = Process(target=func_for_dequeue_test,
                   name='temp-proc',
                   args=(wfp, ))
    proc.start()
    proc.join()

    amgr._terminate_sync.set()
    sync_thread.join()

    assert not wfp.workflow_incomplete()
def test_state_order():

    """
    **Purpose**: Test if the Pipeline, Stage and Task are assigned their states in the correct order
    """

    def create_single_task():

        t1 = Task()
        t1.name = 'simulation'
        t1.executable = ['/bin/date']
        t1.copy_input_data = []
        t1.copy_output_data = []

        return t1

    p1 = Pipeline()
    p1.name = 'p1'

    s = Stage()
    s.name = 's1'
    s.tasks = create_single_task()
    s.add_tasks(create_single_task())

    p1.add_stages(s)

    res_dict = {

            'resource': 'local.localhost',
            'walltime': 5,
            'cpus': 1,
            'project': ''

    }

    os.environ['RADICAL_PILOT_DBURL'] = MLAB
    os.environ['RP_ENABLE_OLD_DEFINES'] = 'True'
    
    appman = Amgr(hostname=hostname, port=port)
    appman.resource_desc = res_dict

    appman.workflow = [p1]
    appman.run()

    p_state_hist = p1.state_history
    assert p_state_hist == ['DESCRIBED', 'SCHEDULING', 'DONE']

    s_state_hist = p1.stages[0].state_history
    assert s_state_hist == ['DESCRIBED', 'SCHEDULING', 'SCHEDULED', 'DONE']

    tasks = p1.stages[0].tasks

    for t in tasks:

        t_state_hist = t.state_history
        assert t_state_hist == ['DESCRIBED', 'SCHEDULING', 'SCHEDULED', 'SUBMITTING', 'SUBMITTED',
                            'EXECUTED', 'DEQUEUEING', 'DEQUEUED', 'DONE']
Example #22
0
def test_state_order():

    """
    **Purpose**: Test if the Pipeline, Stage and Task are assigned their states in the correct order
    """

    def create_single_task():

        t1 = Task()
        t1.name = 'simulation'
        t1.executable = ['/bin/date']
        t1.copy_input_data = []
        t1.copy_output_data = []

        return t1

    p1 = Pipeline()
    p1.name = 'p1'

    s = Stage()
    s.name = 's1'
    s.tasks = create_single_task()
    s.add_tasks(create_single_task())

    p1.add_stages(s)

    res_dict = {

            'resource': 'local.localhost',
            'walltime': 5,
            'cpus': 1,
            'project': ''

    }

    os.environ['RADICAL_PILOT_DBURL'] = MLAB
    os.environ['RP_ENABLE_OLD_DEFINES'] = 'True'
    
    appman = Amgr(hostname=hostname, port=port)
    appman.resource_desc = res_dict

    appman.workflow = [p1]
    appman.run()

    p_state_hist = p1.state_history
    assert p_state_hist == ['DESCRIBED', 'SCHEDULING', 'DONE']

    s_state_hist = p1.stages[0].state_history
    assert s_state_hist == ['DESCRIBED', 'SCHEDULING', 'SCHEDULED', 'DONE']

    tasks = p1.stages[0].tasks

    for t in tasks:

        t_state_hist = t.state_history
        assert t_state_hist == ['DESCRIBED', 'SCHEDULING', 'SCHEDULED', 'SUBMITTING', 'SUBMITTED',
                            'EXECUTED', 'DEQUEUEING', 'DEQUEUED', 'DONE']
Example #23
0
def test_amgr_synchronizer():

    logger = ru.get_logger('radical.entk.temp_logger')
    profiler = ru.Profiler(name='radical.entk.temp')
    amgr = Amgr(hostname=hostname, port=port)

    mq_connection = pika.BlockingConnection(pika.ConnectionParameters(host=hostname, port=port))
    mq_channel = mq_connection.channel()

    amgr._setup_mqs()

    p = Pipeline()
    s = Stage()

    # Create and add 100 tasks to the stage
    for cnt in range(100):

        t = Task()
        t.executable = ['some-executable-%s' % cnt]

        s.add_tasks(t)

    p.add_stages(s)
    p._assign_uid(amgr._sid)
    p._validate()

    amgr.workflow = [p]

    for t in p.stages[0].tasks:
        assert t.state == states.INITIAL

    assert p.stages[0].state == states.INITIAL
    assert p.state == states.INITIAL

    # Start the synchronizer method in a thread
    amgr._terminate_sync = Event()
    sync_thread = Thread(target=amgr._synchronizer, name='synchronizer-thread')
    sync_thread.start()

    # Start the synchronizer method in a thread
    proc = Process(target=func_for_synchronizer_test, name='temp-proc',
                   args=(amgr._sid, p, logger, profiler))

    proc.start()
    proc.join()

    for t in p.stages[0].tasks:
        assert t.state == states.SCHEDULING

    assert p.stages[0].state == states.SCHEDULING
    assert p.state == states.SCHEDULING

    amgr._terminate_sync.set()
    sync_thread.join()
def test_wfp_workflow_incomplete():

    p = Pipeline()
    s = Stage()
    t = Task()
    t.executable = ['/bin/date']
    s.add_tasks(t)
    p.add_stages(s)

    amgr = Amgr(hostname=hostname, port=port)
    amgr._setup_mqs()

    wfp = WFprocessor(sid=amgr._sid,
                      workflow=[p],
                      pending_queue=amgr._pending_queue,
                      completed_queue=amgr._completed_queue,
                      mq_hostname=amgr._mq_hostname,
                      port=amgr._port,
                      resubmit_failed=False)

    wfp._initialize_workflow()

    assert wfp.workflow_incomplete()

    amgr.workflow = [p]
    profiler = ru.Profiler(name='radical.entk.temp')

    p.stages[0].state == states.SCHEDULING
    p.state == states.SCHEDULED
    for t in p.stages[0].tasks:
        t.state = states.COMPLETED

    import json
    import pika

    task_as_dict = json.dumps(t.to_dict())
    mq_connection = pika.BlockingConnection(pika.ConnectionParameters(host=amgr._mq_hostname, port=amgr._port))
    mq_channel = mq_connection.channel()
    mq_channel.basic_publish(exchange='',
                             routing_key='%s-completedq-1' % amgr._sid,
                             body=task_as_dict)

    amgr._terminate_sync = Event()
    sync_thread = Thread(target=amgr._synchronizer, name='synchronizer-thread')
    sync_thread.start()

    proc = Process(target=func_for_dequeue_test, name='temp-proc', args=(wfp,))
    proc.start()
    proc.join()

    amgr._terminate_sync.set()
    sync_thread.join()

    assert not wfp.workflow_incomplete()
Example #25
0
def test_issue_236():

    '''
    Create folder temp to transfer as input to task:
    .
    ./temp
    ./temp/dir1
    ./temp/dir1/file2.txt
    ./temp/file1.txt
    '''

    os.makedirs('%s/temp' % cur_dir)
    os.makedirs('%s/temp/dir1' % cur_dir)
    os.system('echo "Hello world" > %s/temp/file1.txt' % cur_dir)
    os.system('echo "Hello world" > %s/temp/dir1/file2.txt' % cur_dir)


    # Create a dictionary describe four mandatory keys:
    # resource, walltime, cpus and project
    # resource is 'local.localhost' to execute locally
    res_dict = {

            'resource': 'local.localhost',
            'walltime': 10,
            'cpus': 1
    }


    # Create Application Manager
    appman = AppManager(hostname=hostname, port=port, username=username,
            password=password)

    # Assign resource manager to the Application Manager
    appman.resource_desc = res_dict

    p = generate_pipeline()

    # Assign the workflow as a set of Pipelines to the Application Manager
    appman.workflow = [p]

    # Run the Application Manager
    appman.run()

    # Assert folder movement
    assert len(glob('/tmp/temp*')) >= 1
    assert len(glob('/tmp/temp/dir*')) == 1
    assert len(glob('/tmp/temp/*.txt')) == 1
    assert len(glob('/tmp/temp/dir1/*.txt')) == 1

    # Cleanup
    shutil.rmtree('%s/temp' % cur_dir)
    shutil.rmtree('/tmp/temp')
def test_issue_236():

    '''
    Create folder temp to transfer as input to task:
    .
    ./temp
    ./temp/dir1
    ./temp/dir1/file2.txt
    ./temp/file1.txt
    '''

    os.makedirs('%s/temp' %cur_dir)
    os.makedirs('%s/temp/dir1' %cur_dir)
    os.system('echo "Hello world" > %s/temp/file1.txt' %cur_dir)
    os.system('echo "Hello world" > %s/temp/dir1/file2.txt' %cur_dir)


    # Create a dictionary describe four mandatory keys:
    # resource, walltime, cpus and project
    # resource is 'local.localhost' to execute locally
    res_dict = {

            'resource': 'local.localhost',
            'walltime': 10,
            'cpus': 1
    }

    os.environ['RADICAL_PILOT_DBURL'] = MLAB

    # Create Application Manager
    appman = AppManager(hostname=hostname, port=port)

    # Assign resource manager to the Application Manager
    appman.resource_desc = res_dict

    p = generate_pipeline()

    # Assign the workflow as a set of Pipelines to the Application Manager
    appman.workflow = [p]

    # Run the Application Manager
    appman.run()

    # Assert folder movement
    assert len(glob('/tmp/temp*')) >=1
    assert len(glob('/tmp/temp/dir*')) ==1
    assert len(glob('/tmp/temp/*.txt')) ==1
    assert len(glob('/tmp/temp/dir1/*.txt')) ==1

    # Cleanup
    shutil.rmtree('%s/temp' %cur_dir)
    shutil.rmtree('/tmp/temp')
Example #27
0
def test_amgr_run():

    amgr = Amgr(hostname=host, port=port)

    with pytest.raises(MissingError):
        amgr.run()

    p1 = Pipeline()
    p2 = Pipeline()
    p3 = Pipeline()

    with pytest.raises(MissingError):
        amgr.workflow = [p1, p2, p3]
def test_amgr_synchronizer():

    logger = ru.Logger('radical.entk.temp_logger')
    profiler = ru.Profiler(name='radical.entk.temp')
    amgr = Amgr(hostname=hostname, port=port)

    amgr._setup_mqs()

    p = Pipeline()
    s = Stage()

    # Create and add 100 tasks to the stage
    for cnt in range(100):

        t = Task()
        t.executable = ['some-executable-%s' % cnt]

        s.add_tasks(t)

    p.add_stages(s)
    p._assign_uid(amgr._sid)
    p._validate()

    amgr.workflow = [p]

    for t in p.stages[0].tasks:
        assert t.state == states.INITIAL

    assert p.stages[0].state == states.INITIAL
    assert p.state == states.INITIAL

    # Start the synchronizer method in a thread
    amgr._terminate_sync = Event()
    sync_thread = Thread(target=amgr._synchronizer, name='synchronizer-thread')
    sync_thread.start()

    # Start the synchronizer method in a thread
    proc = Process(target=func_for_synchronizer_test, name='temp-proc',
                   args=(amgr._sid, p, logger, profiler))

    proc.start()
    proc.join()

    for t in p.stages[0].tasks:
        assert t.state == states.SCHEDULING

    assert p.stages[0].state == states.SCHEDULING
    assert p.state == states.SCHEDULING

    amgr._terminate_sync.set()
    sync_thread.join()
Example #29
0
def test_wfp_enqueue():

    p = Pipeline()
    s = Stage()
    t = Task()
    t.executable = ['/bin/date']
    s.add_tasks(t)
    p.add_stages(s)

    amgr = Amgr(hostname=hostname, port=port)
    amgr._setup_mqs()

    wfp = WFprocessor(sid=amgr._sid,
                      workflow=[p],
                      pending_queue=amgr._pending_queue,
                      completed_queue=amgr._completed_queue,
                      mq_hostname=amgr._mq_hostname,
                      port=amgr._port,
                      resubmit_failed=False)

    wfp._initialize_workflow()

    amgr.workflow = [p]
    profiler = ru.Profiler(name='radical.entk.temp')

    for t in p.stages[0].tasks:
        assert t.state == states.INITIAL

    assert p.stages[0].state == states.INITIAL
    assert p.state == states.INITIAL

    amgr._terminate_sync = Event()
    sync_thread = Thread(target=amgr._synchronizer, name='synchronizer-thread')
    sync_thread.start()

    proc = Process(target=func_for_enqueue_test,
                   name='temp-proc',
                   args=(wfp, ))
    proc.start()
    proc.join()

    amgr._terminate_sync.set()
    sync_thread.join()

    for t in p.stages[0].tasks:
        assert t.state == states.SCHEDULED

    assert p.stages[0].state == states.SCHEDULED
    assert p.state == states.SCHEDULING
def test_issue_255():

    def create_pipeline():

        p = Pipeline()

        s = Stage()

        t1 = Task()
        t1.name = 'simulation'
        t1.executable = ['sleep']
        t1.arguments = ['10']

        s.add_tasks(t1)

        p.add_stages(s)

        return p

    res_dict = {

        'resource': 'local.localhost',
        'walltime': 5,
        'cpus': 1,
        'project': ''

    }

    os.environ['RADICAL_PILOT_DBURL'] = MLAB

    appman = AppManager(hostname=hostname, port=port, autoterminate=False)
    appman.resource_desc = res_dict

    p1 = create_pipeline()
    appman.workflow = [p1]
    appman.run()

    # p1 = create_pipeline()
    # appman.workflow = [p1]
    # appman.run()

    # appman.resource_terminate()

    tmgr = appman._task_manager

    tmgr.terminate_manager()
    # tmgr.terminate_heartbeat()

    tmgr.start_manager()
Example #31
0
def test_write_workflow():

    try:
        wf = list()
        wf.append(generate_pipeline(1))
        wf.append(generate_pipeline(2))

        amgr = AppManager(hostname=hostname, port=port)
        amgr.workflow = wf
        amgr._wfp = WFprocessor(sid=amgr._sid,
                                workflow=amgr._workflow,
                                pending_queue=amgr._pending_queue,
                                completed_queue=amgr._completed_queue,
                                mq_hostname=amgr._mq_hostname,
                                port=amgr._port,
                                resubmit_failed=amgr._resubmit_failed)
        amgr._wfp._initialize_workflow()
        wf = amgr._wfp.workflow

        write_workflow(wf, 'test')

        data = ru.read_json('test/entk_workflow.json')
        assert len(data) == len(wf) + 1

        stack = data.pop(0)
        assert stack.keys() == ['stack']
        assert stack['stack'].keys() == ['sys','radical']
        assert stack['stack']['sys'].keys() == ["python","pythonpath","virtualenv"]
        assert stack['stack']['radical'].keys() == ['saga', 'radical.pilot', 'radical.utils', 'radical.entk']

        p_cnt = 0
        for p in data:
            assert p['uid'] == wf[p_cnt].uid
            assert p['name'] == wf[p_cnt].name
            assert p['state_history'] == wf[p_cnt].state_history
            s_cnt = 0
            for s in p['stages']:
                assert s['uid'] == wf[p_cnt].stages[s_cnt].uid
                assert s['name'] == wf[p_cnt].stages[s_cnt].name
                assert s['state_history'] == wf[p_cnt].stages[s_cnt].state_history
                for t in wf[p_cnt].stages[s_cnt].tasks:
                    assert t.to_dict() in s['tasks']
                s_cnt += 1
            p_cnt += 1

    except Exception as ex:
        shutil.rmtree('test')
        raise
Example #32
0
def test_issue_255():
    def create_pipeline():

        p = Pipeline()

        s = Stage()

        t1 = Task()
        t1.name = 'simulation'
        t1.executable = 'sleep'
        t1.arguments = ['10']

        s.add_tasks(t1)

        p.add_stages(s)

        return p

    res_dict = {
        'resource': 'local.localhost',
        'walltime': 5,
        'cpus': 1,
        'project': ''
    }

    appman = AppManager(hostname=hostname,
                        port=port,
                        username=username,
                        password=password,
                        autoterminate=False)
    appman.resource_desc = res_dict

    p1 = create_pipeline()
    appman.workflow = [p1]
    appman.run()

    # p1 = create_pipeline()
    # appman.workflow = [p1]
    # appman.run()

    # appman.resource_terminate()

    tmgr = appman._task_manager

    tmgr.terminate_manager()
    # tmgr.terminate_heartbeat()

    tmgr.start_manager()
def test_wfp_enqueue():

    p = Pipeline()
    s = Stage()
    t = Task()
    t.executable = ['/bin/date']
    s.add_tasks(t)
    p.add_stages(s)

    amgr = Amgr(hostname=hostname, port=port)
    amgr._setup_mqs()

    wfp = WFprocessor(sid=amgr._sid,
                      workflow=[p],
                      pending_queue=amgr._pending_queue,
                      completed_queue=amgr._completed_queue,
                      mq_hostname=amgr._mq_hostname,
                      port=amgr._port,
                      resubmit_failed=False)

    wfp._initialize_workflow()

    amgr.workflow = [p]
    profiler = ru.Profiler(name='radical.entk.temp')

    for t in p.stages[0].tasks:
        assert t.state == states.INITIAL

    assert p.stages[0].state == states.INITIAL
    assert p.state == states.INITIAL

    amgr._terminate_sync = Event()
    sync_thread = Thread(target=amgr._synchronizer, name='synchronizer-thread')
    sync_thread.start()

    proc = Process(target=func_for_enqueue_test, name='temp-proc', args=(wfp,))
    proc.start()
    proc.join()

    amgr._terminate_sync.set()
    sync_thread.join()

    for t in p.stages[0].tasks:
        assert t.state == states.SCHEDULED

    assert p.stages[0].state == states.SCHEDULED
    assert p.state == states.SCHEDULING
Example #34
0
def test_write_session_description():

    amgr = AppManager(hostname=hostname,
                      port=port,
                      username=username,
                      password=password)
    amgr.resource_desc = {
        'resource': 'xsede.stampede',
        'walltime': 59,
        'cpus': 128,
        'gpus': 64,
        'project': 'xyz',
        'queue': 'high'
    }

    workflow = [generate_pipeline(1), generate_pipeline(2)]
    amgr.workflow = workflow

    amgr._wfp = WFprocessor(sid=amgr.sid,
                            workflow=amgr._workflow,
                            pending_queue=amgr._pending_queue,
                            completed_queue=amgr._completed_queue,
                            resubmit_failed=amgr._resubmit_failed,
                            rmq_conn_params=amgr._rmq_conn_params)
    amgr._workflow = amgr._wfp.workflow

    amgr._task_manager = TaskManager(sid=amgr._sid,
                                     pending_queue=amgr._pending_queue,
                                     completed_queue=amgr._completed_queue,
                                     rmgr=amgr._rmgr,
                                     rmq_conn_params=amgr._rmq_conn_params)

    write_session_description(amgr)

    desc = ru.read_json('%s/radical.entk.%s.json' % (amgr._sid, amgr._sid))
    # tasks are originally set but saved as a list in json
    # uses sorting for convenient comparison, this doesn't change validity
    for k, v in (desc['tree'].items()):
        if k.startswith("stage"):
            desc['tree'][k]['children'] = sorted(v['children'])

    src = '%s/sample_data' % pwd

    assert desc == ru.read_json('%s/expected_desc_write_session.json' % src)
Example #35
0
def test_write_session_description():

    hostname = os.environ.get('RMQ_HOSTNAME', 'localhost')
    port = int(os.environ.get('RMQ_PORT', 5672))
    amgr = AppManager(hostname=hostname, port=port)
    amgr.resource_desc = {
        'resource': 'xsede.stampede',
        'walltime': 60,
        'cpus': 128,
        'gpus': 64,
        'project': 'xyz',
        'queue': 'high'
    }

    workflow = [generate_pipeline(1), generate_pipeline(2)]
    amgr.workflow = workflow

    amgr._wfp = WFprocessor(sid=amgr._sid,
                            workflow=amgr._workflow,
                            pending_queue=amgr._pending_queue,
                            completed_queue=amgr._completed_queue,
                            mq_hostname=amgr._mq_hostname,
                            port=amgr._port,
                            resubmit_failed=amgr._resubmit_failed)
    amgr._wfp._initialize_workflow()
    amgr._workflow = amgr._wfp.workflow

    amgr._task_manager = TaskManager(sid=amgr._sid,
                                     pending_queue=amgr._pending_queue,
                                     completed_queue=amgr._completed_queue,
                                     mq_hostname=amgr._mq_hostname,
                                     rmgr=amgr._resource_manager,
                                     port=amgr._port
                                     )

    # os.mkdir(amgr._sid)

    write_session_description(amgr)

    desc = ru.read_json('%s/radical.entk.%s.json' % (amgr._sid, amgr._sid))
    curdir = os.path.dirname(os.path.abspath(__file__))
    src = '%s/sample_data' % curdir
    assert desc == ru.read_json('%s/expected_desc_write_session.json' % src)
def test_shared_data():

    for f in glob('%s/file*.txt' %cur_dir):
        os.remove(f)

    os.system('echo "Hello" > %s/file1.txt' %cur_dir)
    os.system('echo "World" > %s/file2.txt' %cur_dir)


    # Create a dictionary describe four mandatory keys:
    # resource, walltime, cpus and project
    # resource is 'local.localhost' to execute locally
    res_dict = {

            'resource': 'local.localhost',
            'walltime': 1,
            'cpus': 1
    }

    os.environ['RADICAL_PILOT_DBURL'] = MLAB

    # Create Application Manager
    appman = AppManager(hostname=hostname, port=port)

    # Assign resource manager to the Application Manager
    appman.resource_desc = res_dict
    appman.shared_data = ['%s/file1.txt' %cur_dir, '%s/file2.txt' %cur_dir]

    p = generate_pipeline()

    # Assign the workflow as a set of Pipelines to the Application Manager
    appman.workflow = [p]

    # Run the Application Manager
    appman.run()

    with open('%s/output.txt' %cur_dir, 'r') as fp:
        assert [d.strip() for d in fp.readlines()] == ['Hello', 'World']

    os.remove('%s/file1.txt' %cur_dir)
    os.remove('%s/file2.txt' %cur_dir)
    os.remove('%s/output.txt' %cur_dir)
Example #37
0
def test_integration_local():

    """
    **Purpose**: Run an EnTK application on localhost
    """


    def create_single_task():

        t1 = Task()
        t1.name = 'simulation'
        t1.executable = '/bin/echo'
        t1.arguments = ['hello']
        t1.copy_input_data = []
        t1.copy_output_data = []

        return t1


    p1 = Pipeline()
    p1.name = 'p1'
    s = Stage()
    s.name = 's1'
    s.tasks = create_single_task()
    s.add_tasks(create_single_task())

    p1.add_stages(s)

    res_dict = {

            'resource': 'local.localhost',
            'walltime': 5,
            'cpus': 1,
            'project': ''

    }


    appman = AppManager(hostname=hostname, port=port)
    appman.resource_desc = res_dict
    appman.workflow = [p1]
    appman.run()
def test_integration_local():

    """
    **Purpose**: Run an EnTK application on localhost
    """

    def create_single_task():

        t1 = Task()
        t1.name = 'simulation'
        t1.executable = ['/bin/echo']
        t1.arguments = ['hello']
        t1.copy_input_data = []
        t1.copy_output_data = []

        return t1

    p1 = Pipeline()
    p1.name = 'p1'

    s = Stage()
    s.name = 's1'
    s.tasks = create_single_task()
    s.add_tasks(create_single_task())

    p1.add_stages(s)

    res_dict = {

            'resource': 'local.localhost',
            'walltime': 5,
            'cpus': 1,
            'project': ''

    }

    os.environ['RADICAL_PILOT_DBURL'] = MLAB

    appman = AppManager(hostname=hostname, port=port)
    appman.resource_desc = res_dict
    appman.workflow = [p1]
    appman.run()
Example #39
0
def test_amgr_run_mock():

    p = Pipeline()
    s = Stage()
    t = Task()

    t.name       = 'simulation'
    t.executable = '/bin/date'
    s.tasks      = t
    p.add_stages(s)

    res_dict = {'resource': 'local.localhost',
                'walltime': 5,
                'cpus'    : 1,
                'project' : ''}

    appman = Amgr(hostname=host, port=port, rts="mock")
    appman.resource_desc = res_dict

    appman.workflow = [p]
    appman.run()
Example #40
0
def test_write_workflow():

    wf = list()
    wf.append(generate_pipeline(1))
    wf.append(generate_pipeline(2))

    amgr = AppManager(hostname=hostname, port=port)
    amgr.workflow = wf
    amgr._wfp = WFprocessor(sid=amgr._sid,
                            workflow=amgr._workflow,
                            pending_queue=amgr._pending_queue,
                            completed_queue=amgr._completed_queue,
                            mq_hostname=amgr._mq_hostname,
                            port=amgr._port,
                            resubmit_failed=amgr._resubmit_failed)
    amgr._wfp._initialize_workflow()
    wf = amgr._wfp.workflow

    write_workflow(wf, 'test')

    data = ru.read_json('test/entk_workflow.json')
    assert len(data) == len(wf)

    p_cnt = 0
    for p in data:
        assert p['uid'] == wf[p_cnt].uid
        assert p['name'] == wf[p_cnt].name
        assert p['state_history'] == wf[p_cnt].state_history
        s_cnt = 0
        for s in p['stages']:
            assert s['uid'] == wf[p_cnt].stages[s_cnt].uid
            assert s['name'] == wf[p_cnt].stages[s_cnt].name
            assert s['state_history'] == wf[p_cnt].stages[s_cnt].state_history
            for t in wf[p_cnt].stages[s_cnt].tasks:
                assert t.to_dict() in s['tasks']
            s_cnt += 1
        p_cnt += 1

    shutil.rmtree('test')
def test_diff_rmq():

    def create_pipeline():

        p = Pipeline()

        s = Stage()

        t1 = Task()
        t1.name = 'simulation'
        t1.executable = ['/bin/echo']
        t1.arguments = ['hello']
        t1.copy_input_data = []
        t1.copy_output_data = []

        s.add_tasks(t1)

        p.add_stages(s)

        return p
    

    res_dict = {

            'resource': 'local.localhost',
            'walltime': 5,
            'cpus': 1,

    }

    os.environ['RADICAL_PILOT_DBURL'] = MLAB
    appman = AppManager(hostname=hostname, port=port)
    appman.resource_desc = res_dict

    p1 = create_pipeline()
    print p1.uid, p1.stages[0].uid
    appman.workflow = [p1]
    appman.run()
def test_suspend_pipeline():

    # Create a dictionary describe four mandatory keys:
    # resource, walltime, cores and project
    # resource is 'local.localhost' to execute locally
    res_dict = {

        'resource': 'local.localhost_anaconda',
        'walltime': 15,
        'cpus': 2,
    }

    # Create Application Manager
    appman = AppManager(hostname=hostname, port=port)
    appman.resource_desc = res_dict

    p = generate_pipeline()

    # Assign the workflow as a set of Pipelines to the Application Manager
    appman.workflow = [p]

    # Run the Application Manager
    appman.run()
def test_amgr_run_mock():

    p = Pipeline()
    s = Stage()
    t = Task()
    t.name = 'simulation'
    t.executable = ['/bin/date']
    s.tasks = t
    p.add_stages(s)

    res_dict = {

            'resource': 'local.localhost',
            'walltime': 5,
            'cpus': 1,
            'project': ''

    }

    appman = Amgr(hostname=hostname, port=port, rts="mock")
    appman.resource_desc = res_dict

    appman.workflow = [p]
    appman.run()
if __name__ == '__main__':

    pipelines = []

    for cnt in range(10):
        pipelines.append(generate_pipeline())

    # Create Application Manager
    appman = AppManager(hostname=hostname, port=port)

    # Create a dictionary describe four mandatory keys:
    # resource, walltime, and cpus
    # resource is 'local.localhost' to execute locally
    res_dict = {

        'resource': 'local.localhost',
        'walltime': 10,
        'cpus': 1
    }

    # Assign resource request description to the Application Manager
    appman.resource_desc = res_dict

    # Assign the workflow as a set or list of Pipelines to the Application Manager
    # Note: The list order is not guaranteed to be preserved
    appman.workflow = set(pipelines)

    # Run the Application Manager
    appman.run()
Example #45
0
    # Create a dictionary to describe four mandatory keys:
    # resource, walltime, cores and project
    # resource is 'local.localhost' to execute locally
    res_dict = {
        'resource': 'local.summit',
        'queue': 'batch',
        'schema': 'local',
        'walltime': 15,
        'cpus': 48,
        'gpus': 4
    }

    # Create Application Manager
    appman = AppManager()
    appman.resource_desc = res_dict

    p1 = generate_MD_pipeline()
    p2 = generate_ML_pipeline()

    pipelines = []
    pipelines.append(p1)
    pipelines.append(p2)

    # Assign the workflow as a list of Pipelines to the Application Manager. In
    # this way, all the pipelines in the list will execute concurrently.
    appman.workflow = pipelines

    # Run the Application Manager
    appman.run()
def test_rp_da_scheduler_bw():

    """
    **Purpose**: Run an EnTK application on localhost
    """

    p1 = Pipeline()
    p1.name = 'p1'

    n = 10

    s1 = Stage()
    s1.name = 's1'
    for x in range(n):
        t = Task()
        t.name = 't%s'%x
        t.executable = ['/bin/hostname']
        t.arguments = ['>','hostname.txt']
        t.cpu_reqs['processes'] = 1
        t.cpu_reqs['threads_per_process'] = 16
        t.cpu_reqs['thread_type'] = ''
        t.cpu_reqs['process_type'] = ''
        t.lfs_per_process = 10
        t.download_output_data = ['hostname.txt > s1_t%s_hostname.txt'%(x)]

        s1.add_tasks(t)

    p1.add_stages(s1)

    s2 = Stage()
    s2.name = 's2'
    for x in range(n):
        t = Task()
        t.executable = ['/bin/hostname']
        t.arguments = ['>','hostname.txt']
        t.cpu_reqs['processes'] = 1
        t.cpu_reqs['threads_per_process'] = 16
        t.cpu_reqs['thread_type'] = ''
        t.cpu_reqs['process_type'] = ''
        t.download_output_data = ['hostname.txt > s2_t%s_hostname.txt'%(x)]
        t.tag = 't%s'%x

        s2.add_tasks(t)


    p1.add_stages(s2)

    res_dict = {
                'resource'      : 'ncsa.bw_aprun',
                'walltime'      : 10,
                'cpus'          : 128,
                'project'       : 'gk4',
                'queue'         : 'high'
            }

    os.environ['RADICAL_PILOT_DBURL'] = MLAB

    appman = AppManager(hostname=hostname, port=port)
    appman.resource_desc = res_dict
    appman.workflow = [p1]
    appman.run()

    for i in range(n):
        assert open('s1_t%s_hostname.txt'%i,'r').readline().strip() == open('s2_t%s_hostname.txt'%i,'r').readline().strip()


    txts = glob('%s/*.txt' % os.getcwd())
    for f in txts:
        os.remove(f)
    t.arguments = ['Hello World']  # Assign arguments for the task executable

    # Add Task to the Stage
    s.add_tasks(t)

    # Add Stage to the Pipeline
    p.add_stages(s)

    # Create Application Manager
    appman = AppManager(hostname=hostname, port=port)

    # Create a dictionary describe four mandatory keys:
    # resource, walltime, and cpus
    # resource is 'local.localhost' to execute locally
    res_dict = {

        'resource': 'local.localhost',
        'walltime': 10,
        'cpus': 1
    }

    # Assign resource request description to the Application Manager
    appman.resource_desc = res_dict

    # Assign the workflow as a set or list of Pipelines to the Application Manager
    # Note: The list order is not guaranteed to be preserved
    appman.workflow = set([p])

    # Run the Application Manager
    appman.run()
    }

    # Add Stage to the Pipeline
    p.add_stages(s1)

    return p


if __name__ == '__main__':

    # Create a dictionary describe four mandatory keys:
    # resource, walltime, cores and project
    # resource is 'local.localhost' to execute locally
    res_dict = {

        'resource': 'local.localhost',
        'walltime': 15,
        'cpus': 2,
    }

    # Create Application Manager
    appman = AppManager(hostname=hostname, port=port)
    appman.resource_desc = res_dict

    p = generate_pipeline()

    # Assign the workflow as a set of Pipelines to the Application Manager
    appman.workflow = [p]

    # Run the Application Manager
appman.run()
                        }

        # Add Stage to the Pipeline
        p.add_stages(s1)

    return p

if __name__ == '__main__':

    # Create a dictionary describe four mandatory keys:
    # resource, walltime, cores and project
    # resource is 'local.localhost' to execute locally
    res_dict = {

        'resource': 'local.localhost',
        'walltime': 15,
        'cpus': 2,
    }

    # Create Application Manager
    appman = AppManager()
    appman.resource_desc = res_dict

    p = generate_pipeline()

    # Assign the workflow as a set of Pipelines to the Application Manager
    appman.workflow = [p]

    # Run the Application Manager
    appman.run()
    return p



if __name__ == '__main__':

    p1 = generate_pipeline(name='Pipeline 1', stages=1)
    p2 = generate_pipeline(name='Pipeline 2', stages=2)

    # Create Application Manager
    appman = AppManager(hostname=hostname, port=port)

    # Assign the workflow as a set or list of Pipelines to the Application Manager
    # Note: The list order is not guaranteed to be preserved
    appman.workflow = set([p1, p2])

    # Create a dictionary describe four mandatory keys:
    # resource, walltime, and cpus
    # resource is 'local.localhost' to execute locally
    res_dict = {

        'resource': 'local.localhost',
        'walltime': 10,
        'cpus': 1
    }

    # Assign resource request description to the Application Manager
    appman.resource_desc = res_dict

    # Run the Application Manager
if __name__ == '__main__':

    # Create Application Manager
    appman = AppManager(hostname=hostname, port=port)

    # Create a dictionary describe four mandatory keys:
    # resource, walltime, and cpus
    # resource is 'local.localhost' to execute locally
    res_dict = {

    #    'resource': 'ncsa.bw_aprun',
    #    'walltime': 10,
    #    'cpus': 32,
    #'project': 'bamm',
    #'queue': 'high'
	'resource': 'local.localhost',
	'walltime': 10,
	'cpus':2
    }

    # Assign resource request description to the Application Manager
    appman.resource_desc = res_dict

    # Assign the workflow as a set or list of Pipelines to the Application Manager
    # Note: The list order is not guaranteed to be preserved
    appman.workflow = set([generate_pipeline()])

    # Run the Application Manager
    appman.run()