예제 #1
0
def test_amgr_setup_mqs():

    amgr = Amgr(hostname=hostname, port=port)
    assert amgr._setup_mqs() == True

    assert len(amgr._pending_queue) == 1
    assert len(amgr._completed_queue) == 1

    mq_connection = pika.BlockingConnection(pika.ConnectionParameters(host=amgr._mq_hostname, port=amgr._port))
    mq_channel = mq_connection.channel()

    qs = [
        '%s-tmgr-to-sync' % amgr._sid,
        '%s-cb-to-sync' % amgr._sid,
        '%s-enq-to-sync' % amgr._sid,
        '%s-deq-to-sync' % amgr._sid,
        '%s-sync-to-tmgr' % amgr._sid,
        '%s-sync-to-cb' % amgr._sid,
        '%s-sync-to-enq' % amgr._sid,
        '%s-sync-to-deq' % amgr._sid
    ]

    for q in qs:
        mq_channel.queue_delete(queue=q)

    with open('.%s.txt' % amgr._sid, 'r') as fp:
        lines = fp.readlines()

    for i in range(len(lines)):
        lines[i] = lines[i].strip()

    assert set(qs) < set(lines)
def test_stage_post_exec():

    global p1
    
    p1.name = 'p1'

    s = Stage()
    s.name = 's1'

    for t in range(NUM_TASKS):
        s.add_tasks(create_single_task())

    s.post_exec = condition

    p1.add_stages(s)

    res_dict = {

            'resource': 'local.localhost',
            'walltime': 30,
            'cpus': 1,
    }

    os.environ['RADICAL_PILOT_DBURL'] = MLAB
    appman = AppManager(rts='radical.pilot', hostname=hostname, port=port)
    appman.resource_desc = res_dict
    appman.workflow = [p1]
    appman.run()
예제 #3
0
def test_sid_in_mqs():

    appman = Amgr(hostname=hostname, port=port)
    appman._setup_mqs()
    sid = appman._sid

    qs = [
        '%s-tmgr-to-sync' % sid,
        '%s-cb-to-sync' % sid,
        '%s-enq-to-sync' % sid,
        '%s-deq-to-sync' % sid,
        '%s-sync-to-tmgr' % sid,
        '%s-sync-to-cb' % sid,
        '%s-sync-to-enq' % sid,
        '%s-sync-to-deq' % sid
    ]

    mq_connection = pika.BlockingConnection(
        pika.ConnectionParameters(host=hostname, port=port))
    mq_channel = mq_connection.channel()

    def callback():
        print True

    for q in qs:

        try:
            mq_channel.basic_consume(callback, queue=q, no_ack=True)
        except Exception as ex:
            raise EnTKError(ex)
예제 #4
0
def test_amgr_read_config():

    amgr = Amgr(hostname=host, port=port)

    assert amgr._reattempts == 3

    assert amgr._rmq_cleanup
    assert amgr._autoterminate

    assert not amgr._write_workflow
    assert not amgr._resubmit_failed

    assert amgr._rts              == 'radical.pilot'
    assert amgr._num_pending_qs   == 1
    assert amgr._num_completed_qs == 1
    assert amgr._rts_config       == {"sandbox_cleanup": False,
                                      "db_cleanup"     : False}

    d = {"hostname"       : "radical.two",
         "port"           : 25672,
         "username"       : user,
         "password"       : passwd,
         "reattempts"     : 5,
         "resubmit_failed": True,
         "autoterminate"  : False,
         "write_workflow" : True,
         "rts"            : "mock",
         "rts_config"     : {"sandbox_cleanup": True,
                             "db_cleanup"     : True},
         "pending_qs"     : 2,
         "completed_qs"   : 3,
         "rmq_cleanup"    : False}

    ru.write_json(d, './config.json')
    amgr._read_config(config_path='./',
                      hostname=None,
                      port=None,
                      username=None,
                      password=None,
                      reattempts=None,
                      resubmit_failed=None,
                      autoterminate=None,
                      write_workflow=None,
                      rts=None,
                      rmq_cleanup=None,
                      rts_config=None)

    assert amgr._hostname         == d['hostname']
    assert amgr._port             == d['port']
    assert amgr._reattempts       == d['reattempts']
    assert amgr._resubmit_failed  == d['resubmit_failed']
    assert amgr._autoterminate    == d['autoterminate']
    assert amgr._write_workflow   == d['write_workflow']
    assert amgr._rts              == d['rts']
    assert amgr._rts_config       == d['rts_config']
    assert amgr._num_pending_qs   == d['pending_qs']
    assert amgr._num_completed_qs == d['completed_qs']
    assert amgr._rmq_cleanup      == d['rmq_cleanup']

    os.remove('./config.json')
예제 #5
0
def test_wfp_check_processor():

    p = Pipeline()
    s = Stage()
    t = Task()

    t.executable = '/bin/date'
    s.add_tasks(t)
    p.add_stages(s)

    amgr = Amgr(hostname=hostname, port=port)
    amgr._setup_mqs()

    wfp = WFprocessor(sid=amgr._sid,
                      workflow=[p],
                      pending_queue=amgr._pending_queue,
                      completed_queue=amgr._completed_queue,
                      rmq_conn_params=amgr._rmq_conn_params,
                      resubmit_failed=False)

    wfp.start_processor()
    assert wfp.check_processor()

    wfp.terminate_processor()
    assert not wfp.check_processor()
예제 #6
0
def test_amgr_cleanup_mqs():

    amgr = Amgr(hostname=hostname, port=port)
    sid = amgr._sid

    amgr._setup_mqs()
    amgr._cleanup_mqs()

    mq_connection = pika.BlockingConnection(
        pika.ConnectionParameters(host=hostname, port=port))

    qs = [
        '%s-tmgr-to-sync' % sid,
        '%s-cb-to-sync' % sid,
        '%s-enq-to-sync' % sid,
        '%s-deq-to-sync' % sid,
        '%s-sync-to-tmgr' % sid,
        '%s-sync-to-cb' % sid,
        '%s-sync-to-enq' % sid,
        '%s-sync-to-deq' % sid,
        '%s-pendingq-1' % sid,
        '%s-completedq-1' % sid
    ]

    for q in qs:
        with pytest.raises(pika.exceptions.ChannelClosed):
            mq_channel = mq_connection.channel()
            mq_channel.queue_purge(q)
예제 #7
0
    def __init__(self, resource='local', comm_server=None):
        """The workhorse of high throughput binding affinity calculations.

        Manages arbitrary number of protocols on any resource (including supercomputers).

        Parameters
        ----------
        resource: str
            The name of the resource where the protocols will be run. This is usually then name of the supercomputer
            or 'local' if the job will be executed locally. (the default is to try to run locally).
        comm_server: tuple(str, int)
            The communication server used by the execution system. Specify a hostname and port number as a tuple. If
            None, then the dedicated server might be used from the resource description if present.
        """

        self.resource = yaml.load(resource_stream(__name__, 'resources.yaml'))[resource]

        if comm_server is None:
            comm_server = self.resource.get('dedicated_rabbitmq_server')

        self._protocols = list()
        self._app_manager = AppManager(*comm_server)

        # Profiler for Runner
        self._uid = ru.generate_id('radical.htbac.workflow_runner')
        self._logger = ru.get_logger('radical.htbac.workflow_runner')
        self._prof = ru.Profiler(name=self._uid)
        self._prof.prof('create workflow_runner obj', uid=self._uid)
        self._root_directories = list()
예제 #8
0
    def run(self, strong_scaled=1, autoterminate=True, queue='high', walltime=1440):
        pipelines = set()
        input_data = list()

        for protocol in self._protocols:
            gen_pipeline = protocol.generate_pipeline()
            pipelines.add(gen_pipeline)
            input_data.extend(protocol.input_data)
            self.ids[protocol.id()] = gen_pipeline
            # protocol.id is the uuid, gen_pipeline.uid is the pipeline

            self.total_replicas += protocol.replicas

        self._cores = self._cores * self.total_replicas
        print 'Running on', self._cores, 'cores.'

        res_dict = {'resource': 'ncsa.bw_aprun',
                    'walltime': walltime,
                    'cores': int(self._cores*strong_scaled),
                    'project': 'bamm',
                    'queue': queue,
                    'access_schema': 'gsissh'}

        # Create Resource Manager object with the above resource description
        resource_manager = ResourceManager(res_dict)
        resource_manager.shared_data = input_data

        # Create Application Manager
        self.app_manager = AppManager(hostname=self._hostname, port=self._port, autoterminate=autoterminate)
        self.app_manager.resource_manager = resource_manager
        self.app_manager.assign_workflow(pipelines)

        self._prof.prof('execution_run')
        print 'Running...'
        self.app_manager.run()    # this method is blocking until all pipelines show state = completed
def test_issue_271():

    # Create Application Manager
    appman = AppManager(hostname=hostname, port=port)

    # Create a dictionary describe four mandatory keys:
    # resource, walltime, and cpus
    # resource is 'local.localhost' to execute locally
    res_dict = {

        'resource': 'local.localhost',
        'walltime': 10,
        'cpus': 1
    }

    # Assign resource request description to the Application Manager
    appman.resource_desc = res_dict

    # Assign the workflow as a set or list of Pipelines to the Application Manager
    # Note: The list order is not guaranteed to be preserved
    p = generate_pipeline()
    appman.workflow = set([p])

    # Run the Application Manager
    appman.run()

    # assert
    for t in p.stages[0].tasks:
        assert t.state == states.FAILED
예제 #10
0
def test_sid_in_mqs():

    # FIXME: what is tested / asserted here?

    appman = Amgr(hostname=host, port=port)
    sid    = appman._sid
    appman._setup_mqs()

    qs = ['%s-tmgr-to-sync' % sid,
          '%s-cb-to-sync'   % sid,
          '%s-sync-to-tmgr' % sid,
          '%s-sync-to-cb'   % sid]

    mq_connection = pika.BlockingConnection(pika.ConnectionParameters(
                                                      host=host, port=port))
    mq_channel    = mq_connection.channel()

    def callback():
        pass

    for q in qs:
        try:
            mq_channel.basic_consume(callback, queue=q, no_ack=True)

        except Exception as ex:
            raise EnTKError(ex)
def test_issue_199():


    # Create a dictionary describe four mandatory keys:
    # resource, walltime, cpus and project
    # resource is 'local.localhost' to execute locally
    res_dict = {

            'resource': 'local.localhost',
            'walltime': 10,
            'cpus': 1
    }

    os.environ['RADICAL_PILOT_DBURL'] = MLAB

    # Create Application Manager
    appman = AppManager(hostname=hostname, port=port)

    # Assign resource manager to the Application Manager
    appman.resource_desc = res_dict

    p = generate_pipeline()

    # Assign the workflow as a set of Pipelines to the Application Manager
    appman.workflow = [p]

    # Run the Application Manager
    appman.run()
def test_amgr_setup_mqs():

    amgr = Amgr(hostname=hostname, port=port)
    assert amgr._setup_mqs() == True

    assert len(amgr._pending_queue) == 1
    assert len(amgr._completed_queue) == 1

    mq_connection = pika.BlockingConnection(pika.ConnectionParameters(host=amgr._mq_hostname, port=amgr._port))
    mq_channel = mq_connection.channel()

    qs = [
        '%s-tmgr-to-sync' % amgr._sid,
        '%s-cb-to-sync' % amgr._sid,
        '%s-enq-to-sync' % amgr._sid,
        '%s-deq-to-sync' % amgr._sid,
        '%s-sync-to-tmgr' % amgr._sid,
        '%s-sync-to-cb' % amgr._sid,
        '%s-sync-to-enq' % amgr._sid,
        '%s-sync-to-deq' % amgr._sid
    ]

    for q in qs:
        mq_channel.queue_delete(queue=q)

    with open('.%s.txt' % amgr._sid, 'r') as fp:
        lines = fp.readlines()

    for ind, val in enumerate(lines):
        lines[ind] = val.strip()

    assert set(qs) < set(lines)
def test_amgr_cleanup_mqs():

    amgr = Amgr(hostname=hostname, port=port)
    sid = amgr._sid

    amgr._setup_mqs()
    amgr._cleanup_mqs()

    mq_connection = pika.BlockingConnection(
        pika.ConnectionParameters(host=hostname, port=port))

    qs = ['%s-tmgr-to-sync' % sid,
          '%s-cb-to-sync' % sid,
          '%s-enq-to-sync' % sid,
          '%s-deq-to-sync' % sid,
          '%s-sync-to-tmgr' % sid,
          '%s-sync-to-cb' % sid,
          '%s-sync-to-enq' % sid,
          '%s-sync-to-deq' % sid,
          '%s-pendingq-1' % sid,
          '%s-completedq-1' % sid]

    for q in qs:
        with pytest.raises(pika.exceptions.ChannelClosed):
            mq_channel = mq_connection.channel()
            mq_channel.queue_purge(q)
예제 #14
0
def test_amgr_setup_mqs():

    amgr = Amgr(hostname=hostname, port=port)
    amgr._setup_mqs()

    assert len(amgr._pending_queue)   == 1
    assert len(amgr._completed_queue) == 1

    mq_connection = pika.BlockingConnection(pika.ConnectionParameters(
                                       host=amgr._hostname, port=amgr._port))
    mq_channel    = mq_connection.channel()

    qs = ['%s-tmgr-to-sync' % amgr._sid,
          '%s-cb-to-sync'   % amgr._sid,
          '%s-sync-to-tmgr' % amgr._sid,
          '%s-sync-to-cb'   % amgr._sid,
          '%s-pendingq-1'   % amgr._sid,
          '%s-completedq-1' % amgr._sid]

    for q in qs:
        mq_channel.queue_delete(queue=q)

    with open('.%s.txt' % amgr._sid, 'r') as fp:
        lines = fp.readlines()

    for ind, val in enumerate(lines):
        lines[ind] = val.strip()

    assert set(qs) == set(lines)
def test_sid_in_mqs():

    appman = Amgr(hostname=hostname, port=port)
    appman._setup_mqs()
    sid = appman._sid

    qs = [
        '%s-tmgr-to-sync' % sid,
        '%s-cb-to-sync' % sid,
        '%s-enq-to-sync' % sid,
        '%s-deq-to-sync' % sid,
        '%s-sync-to-tmgr' % sid,
        '%s-sync-to-cb' % sid,
        '%s-sync-to-enq' % sid,
        '%s-sync-to-deq' % sid
    ]

    mq_connection = pika.BlockingConnection(
        pika.ConnectionParameters(
            host=hostname,
            port=port)
    )
    mq_channel = mq_connection.channel()

    def callback():
        print True

    for q in qs:

        try:
            mq_channel.basic_consume(callback, queue=q, no_ack=True)
        except Exception as ex:
            raise EnTKError(ex)
예제 #16
0
def test_wfp_start_processor():

    p = Pipeline()
    s = Stage()
    t = Task()
    t.executable = ['/bin/date']
    s.add_tasks(t)
    p.add_stages(s)

    amgr = Amgr(hostname=hostname, port=port)
    amgr._setup_mqs()

    wfp = WFprocessor(sid=amgr._sid,
                      workflow=[p],
                      pending_queue=amgr._pending_queue,
                      completed_queue=amgr._completed_queue,
                      mq_hostname=amgr._mq_hostname,
                      port=amgr._port,
                      resubmit_failed=False)

    assert wfp.start_processor()
    assert not wfp._enqueue_thread
    assert not wfp._dequeue_thread
    assert not wfp._enqueue_thread_terminate.is_set()
    assert not wfp._dequeue_thread_terminate.is_set()
    assert not wfp._wfp_terminate.is_set()
    assert wfp._wfp_process.is_alive()

    wfp._wfp_terminate.set()
    wfp._wfp_process.join()
def test_wfp_check_processor():

    p = Pipeline()
    s = Stage()
    t = Task()
    t.executable = ['/bin/date']
    s.add_tasks(t)
    p.add_stages(s)

    amgr = Amgr(hostname=hostname, port=port)
    amgr._setup_mqs()

    wfp = WFprocessor(sid=amgr._sid,
                      workflow=[p],
                      pending_queue=amgr._pending_queue,
                      completed_queue=amgr._completed_queue,
                      mq_hostname=amgr._mq_hostname,
                      port=amgr._port,
                      resubmit_failed=False)

    wfp.start_processor()
    assert wfp.check_processor()

    wfp.terminate_processor()
    assert not wfp.check_processor()
def test_issue_26():

    def create_pipeline():

        p = Pipeline()

        s = Stage()

        t1 = Task()
        t1.name = 'simulation'
        t1.executable = ['/bin/echo']
        t1.arguments = ['hello']
        t1.copy_input_data = []
        t1.copy_output_data = []

        s.add_tasks(t1)

        p.add_stages(s)

        return p


    res_dict = {

            'resource': 'local.localhost',
            'walltime': 10,
            'cpus': 1,
            'project': ''

    }

    os.environ['RADICAL_PILOT_DBURL'] = MLAB

    appman = AppManager(hostname=hostname, port=port, autoterminate=False)
    appman.resource_desc = res_dict


    p1 = create_pipeline()
    appman.workflow = [p1]
    appman.run()
    print p1.uid, p1.stages[0].uid

    p2 = create_pipeline()
    appman.workflow = [p2]
    appman.run()
    print p2.uid, p2.stages[0].uid

    appman.resource_terminate()

    lhs = int(p1.stages[0].uid.split('.')[-1]) + 1
    rhs = int(p2.stages[0].uid.split('.')[-1])
    assert lhs == rhs

    for t in p1.stages[0].tasks:
        for tt in p2.stages[0].tasks:
            lhs = int(t.uid.split('.')[-1]) + 1
            rhs = int(tt.uid.split('.')[-1])
            assert lhs == rhs
예제 #19
0
def test_state_order():

    """
    **Purpose**: Test if the Pipeline, Stage and Task are assigned their states in the correct order
    """

    def create_single_task():

        t1 = Task()
        t1.name = 'simulation'
        t1.executable = ['/bin/date']
        t1.copy_input_data = []
        t1.copy_output_data = []

        return t1

    p1 = Pipeline()
    p1.name = 'p1'

    s = Stage()
    s.name = 's1'
    s.tasks = create_single_task()
    s.add_tasks(create_single_task())

    p1.add_stages(s)

    res_dict = {

            'resource': 'local.localhost',
            'walltime': 5,
            'cpus': 1,
            'project': ''

    }

    os.environ['RADICAL_PILOT_DBURL'] = MLAB
    os.environ['RP_ENABLE_OLD_DEFINES'] = 'True'
    
    appman = Amgr(hostname=hostname, port=port)
    appman.resource_desc = res_dict

    appman.workflow = [p1]
    appman.run()

    p_state_hist = p1.state_history
    assert p_state_hist == ['DESCRIBED', 'SCHEDULING', 'DONE']

    s_state_hist = p1.stages[0].state_history
    assert s_state_hist == ['DESCRIBED', 'SCHEDULING', 'SCHEDULED', 'DONE']

    tasks = p1.stages[0].tasks

    for t in tasks:

        t_state_hist = t.state_history
        assert t_state_hist == ['DESCRIBED', 'SCHEDULING', 'SCHEDULED', 'SUBMITTING', 'SUBMITTED',
                            'EXECUTED', 'DEQUEUEING', 'DEQUEUED', 'DONE']
예제 #20
0
def test_wfp_dequeue():

    p = Pipeline()
    s = Stage()
    t = Task()

    t.executable = '/bin/date'
    s.add_tasks(t)
    p.add_stages(s)

    amgr = Amgr(hostname=hostname, port=port)
    amgr._setup_mqs()

    wfp = WFprocessor(sid=amgr._sid,
                      workflow=[p],
                      pending_queue=amgr._pending_queue,
                      completed_queue=amgr._completed_queue,
                      mq_hostname=amgr._hostname,
                      port=amgr._port,
                      resubmit_failed=False)

    wfp.initialize_workflow()

    assert p.state == states.INITIAL
    assert p.stages[0].state == states.INITIAL

    for t in p.stages[0].tasks:
        assert t.state == states.INITIAL

    p.state == states.SCHEDULED
    p.stages[0].state == states.SCHEDULING

    for t in p.stages[0].tasks:
        t.state = states.COMPLETED

    task_as_dict = json.dumps(t.to_dict())
    mq_connection = pika.BlockingConnection(
        pika.ConnectionParameters(host=amgr._hostname, port=amgr._port))
    mq_channel = mq_connection.channel()

    mq_channel.basic_publish(exchange='',
                             routing_key='%s' % amgr._completed_queue[0],
                             body=task_as_dict)

    wfp.start_processor()

    th = mt.Thread(target=func_for_dequeue_test, name='temp-proc', args=(p, ))
    th.start()
    th.join()

    wfp.terminate_processor()

    assert p.state == states.DONE
    assert p.stages[0].state == states.DONE

    for t in p.stages[0].tasks:
        assert t.state == states.DONE
예제 #21
0
def test_assign_workload_exception():

    appman = AppManager()
    data_type = [1, 'a', True, [1], set([1])]

    for data in data_type:

        with pytest.raises(TypeError):
            appman.assign_workflow(data)
def test_state_order():

    """
    **Purpose**: Test if the Pipeline, Stage and Task are assigned their states in the correct order
    """

    def create_single_task():

        t1 = Task()
        t1.name = 'simulation'
        t1.executable = ['/bin/date']
        t1.copy_input_data = []
        t1.copy_output_data = []

        return t1

    p1 = Pipeline()
    p1.name = 'p1'

    s = Stage()
    s.name = 's1'
    s.tasks = create_single_task()
    s.add_tasks(create_single_task())

    p1.add_stages(s)

    res_dict = {

            'resource': 'local.localhost',
            'walltime': 5,
            'cpus': 1,
            'project': ''

    }

    os.environ['RADICAL_PILOT_DBURL'] = MLAB
    os.environ['RP_ENABLE_OLD_DEFINES'] = 'True'
    
    appman = Amgr(hostname=hostname, port=port)
    appman.resource_desc = res_dict

    appman.workflow = [p1]
    appman.run()

    p_state_hist = p1.state_history
    assert p_state_hist == ['DESCRIBED', 'SCHEDULING', 'DONE']

    s_state_hist = p1.stages[0].state_history
    assert s_state_hist == ['DESCRIBED', 'SCHEDULING', 'SCHEDULED', 'DONE']

    tasks = p1.stages[0].tasks

    for t in tasks:

        t_state_hist = t.state_history
        assert t_state_hist == ['DESCRIBED', 'SCHEDULING', 'SCHEDULED', 'SUBMITTING', 'SUBMITTED',
                            'EXECUTED', 'DEQUEUEING', 'DEQUEUED', 'DONE']
예제 #23
0
def test_wfp_workflow_incomplete():

    p = Pipeline()
    s = Stage()
    t = Task()
    t.executable = ['/bin/date']
    s.add_tasks(t)
    p.add_stages(s)

    amgr = Amgr(hostname=hostname, port=port)
    amgr._setup_mqs()

    wfp = WFprocessor(sid=amgr._sid,
                      workflow=[p],
                      pending_queue=amgr._pending_queue,
                      completed_queue=amgr._completed_queue,
                      mq_hostname=amgr._mq_hostname,
                      port=amgr._port,
                      resubmit_failed=False)

    wfp._initialize_workflow()

    assert wfp.workflow_incomplete()

    amgr.workflow = [p]
    profiler = ru.Profiler(name='radical.entk.temp')

    p.stages[0].state == states.SCHEDULING
    p.state == states.SCHEDULED
    for t in p.stages[0].tasks:
        t.state = states.COMPLETED

    import json
    import pika

    task_as_dict = json.dumps(t.to_dict())
    mq_connection = pika.BlockingConnection(
        pika.ConnectionParameters(host=amgr._mq_hostname, port=amgr._port))
    mq_channel = mq_connection.channel()
    mq_channel.basic_publish(exchange='',
                             routing_key='%s-completedq-1' % amgr._sid,
                             body=task_as_dict)

    amgr._terminate_sync = Event()
    sync_thread = Thread(target=amgr._synchronizer, name='synchronizer-thread')
    sync_thread.start()

    proc = Process(target=func_for_dequeue_test,
                   name='temp-proc',
                   args=(wfp, ))
    proc.start()
    proc.join()

    amgr._terminate_sync.set()
    sync_thread.join()

    assert not wfp.workflow_incomplete()
예제 #24
0
def test_amgr_synchronizer():

    logger = ru.get_logger('radical.entk.temp_logger')
    profiler = ru.Profiler(name='radical.entk.temp')
    amgr = Amgr(hostname=hostname, port=port)

    mq_connection = pika.BlockingConnection(pika.ConnectionParameters(host=hostname, port=port))
    mq_channel = mq_connection.channel()

    amgr._setup_mqs()

    p = Pipeline()
    s = Stage()

    # Create and add 100 tasks to the stage
    for cnt in range(100):

        t = Task()
        t.executable = ['some-executable-%s' % cnt]

        s.add_tasks(t)

    p.add_stages(s)
    p._assign_uid(amgr._sid)
    p._validate()

    amgr.workflow = [p]

    for t in p.stages[0].tasks:
        assert t.state == states.INITIAL

    assert p.stages[0].state == states.INITIAL
    assert p.state == states.INITIAL

    # Start the synchronizer method in a thread
    amgr._terminate_sync = Event()
    sync_thread = Thread(target=amgr._synchronizer, name='synchronizer-thread')
    sync_thread.start()

    # Start the synchronizer method in a thread
    proc = Process(target=func_for_synchronizer_test, name='temp-proc',
                   args=(amgr._sid, p, logger, profiler))

    proc.start()
    proc.join()

    for t in p.stages[0].tasks:
        assert t.state == states.SCHEDULING

    assert p.stages[0].state == states.SCHEDULING
    assert p.state == states.SCHEDULING

    amgr._terminate_sync.set()
    sync_thread.join()
def test_wfp_workflow_incomplete():

    p = Pipeline()
    s = Stage()
    t = Task()
    t.executable = ['/bin/date']
    s.add_tasks(t)
    p.add_stages(s)

    amgr = Amgr(hostname=hostname, port=port)
    amgr._setup_mqs()

    wfp = WFprocessor(sid=amgr._sid,
                      workflow=[p],
                      pending_queue=amgr._pending_queue,
                      completed_queue=amgr._completed_queue,
                      mq_hostname=amgr._mq_hostname,
                      port=amgr._port,
                      resubmit_failed=False)

    wfp._initialize_workflow()

    assert wfp.workflow_incomplete()

    amgr.workflow = [p]
    profiler = ru.Profiler(name='radical.entk.temp')

    p.stages[0].state == states.SCHEDULING
    p.state == states.SCHEDULED
    for t in p.stages[0].tasks:
        t.state = states.COMPLETED

    import json
    import pika

    task_as_dict = json.dumps(t.to_dict())
    mq_connection = pika.BlockingConnection(pika.ConnectionParameters(host=amgr._mq_hostname, port=amgr._port))
    mq_channel = mq_connection.channel()
    mq_channel.basic_publish(exchange='',
                             routing_key='%s-completedq-1' % amgr._sid,
                             body=task_as_dict)

    amgr._terminate_sync = Event()
    sync_thread = Thread(target=amgr._synchronizer, name='synchronizer-thread')
    sync_thread.start()

    proc = Process(target=func_for_dequeue_test, name='temp-proc', args=(wfp,))
    proc.start()
    proc.join()

    amgr._terminate_sync.set()
    sync_thread.join()

    assert not wfp.workflow_incomplete()
def test_amgr_read_config():

    amgr = Amgr()

    assert amgr._mq_hostname == 'localhost'
    assert amgr._port == 5672
    assert amgr._reattempts == 3
    assert amgr._resubmit_failed == False
    assert amgr._autoterminate == True
    assert amgr._write_workflow == False
    assert amgr._rts == 'radical.pilot'
    assert amgr._num_pending_qs == 1
    assert amgr._num_completed_qs == 1
    assert amgr._rmq_cleanup == True
    assert amgr._rts_config == { "sandbox_cleanup": False, "db_cleanup": False}

    d = {"hostname": "radical.two",
         "port": 25672,
         "reattempts": 5,
         "resubmit_failed": True,
         "autoterminate": False,
         "write_workflow": True,
         "rts": "mock",
         "rts_config": { "sandbox_cleanup": True, "db_cleanup": True},
         "pending_qs": 2,
         "completed_qs": 3,
         "rmq_cleanup": False}

    ru.write_json(d, './config.json')
    amgr._read_config(config_path='./',
                      hostname=None,
                      port=None,
                      reattempts=None,
                      resubmit_failed=None,
                      autoterminate=None,
                      write_workflow=None,
                      rts=None,
                      rmq_cleanup=None,
                      rts_config=None)

    assert amgr._mq_hostname == d['hostname']
    assert amgr._port == d['port']
    assert amgr._reattempts == d['reattempts']
    assert amgr._resubmit_failed == d['resubmit_failed']
    assert amgr._autoterminate == d['autoterminate']
    assert amgr._write_workflow == d['write_workflow']
    assert amgr._rts == d['rts']
    assert amgr._rts_config == d['rts_config']
    assert amgr._num_pending_qs == d['pending_qs']
    assert amgr._num_completed_qs == d['completed_qs']
    assert amgr._rmq_cleanup == d['rmq_cleanup']

    os.remove('./config.json')
예제 #27
0
    def test_amgr_read_config(self, mocked_init, mocked_PlainCredentials,
                              mocked_ConnectionParameters, d):

        amgr = Amgr(hostname='host',
                    port='port',
                    username='******',
                    password='******')

        d["rts"] = "mock"
        d["rts_config"] = {"sandbox_cleanup": True, "db_cleanup": True}

        ru.write_json(d, './config.json')
        amgr._read_config(config_path='./',
                          hostname=None,
                          port=None,
                          username=None,
                          password=None,
                          reattempts=None,
                          resubmit_failed=None,
                          autoterminate=None,
                          write_workflow=None,
                          rts=None,
                          rmq_cleanup=None,
                          rts_config=None)

        self.assertEqual(amgr._hostname, d['hostname'])
        self.assertEqual(amgr._port, d['port'])
        self.assertEqual(amgr._reattempts, d['reattempts'])
        self.assertEqual(amgr._resubmit_failed, d['resubmit_failed'])
        self.assertEqual(amgr._autoterminate, d['autoterminate'])
        self.assertEqual(amgr._write_workflow, d['write_workflow'])
        self.assertEqual(amgr._rts, d['rts'])
        self.assertEqual(amgr._rts_config, d['rts_config'])
        self.assertEqual(amgr._num_pending_qs, d['pending_qs'])
        self.assertEqual(amgr._num_completed_qs, d['completed_qs'])
        self.assertEqual(amgr._rmq_cleanup, d['rmq_cleanup'])

        d['rts'] = 'another'
        ru.write_json(d, './config.json')
        print(d)
        with self.assertRaises(ValueError):
            amgr._read_config(config_path='./',
                              hostname=None,
                              port=None,
                              username=None,
                              password=None,
                              reattempts=None,
                              resubmit_failed=None,
                              autoterminate=None,
                              write_workflow=None,
                              rts=None,
                              rmq_cleanup=None,
                              rts_config=None)
def test_issue_236():

    '''
    Create folder temp to transfer as input to task:
    .
    ./temp
    ./temp/dir1
    ./temp/dir1/file2.txt
    ./temp/file1.txt
    '''

    os.makedirs('%s/temp' %cur_dir)
    os.makedirs('%s/temp/dir1' %cur_dir)
    os.system('echo "Hello world" > %s/temp/file1.txt' %cur_dir)
    os.system('echo "Hello world" > %s/temp/dir1/file2.txt' %cur_dir)


    # Create a dictionary describe four mandatory keys:
    # resource, walltime, cpus and project
    # resource is 'local.localhost' to execute locally
    res_dict = {

            'resource': 'local.localhost',
            'walltime': 10,
            'cpus': 1
    }

    os.environ['RADICAL_PILOT_DBURL'] = MLAB

    # Create Application Manager
    appman = AppManager(hostname=hostname, port=port)

    # Assign resource manager to the Application Manager
    appman.resource_desc = res_dict

    p = generate_pipeline()

    # Assign the workflow as a set of Pipelines to the Application Manager
    appman.workflow = [p]

    # Run the Application Manager
    appman.run()

    # Assert folder movement
    assert len(glob('/tmp/temp*')) >=1
    assert len(glob('/tmp/temp/dir*')) ==1
    assert len(glob('/tmp/temp/*.txt')) ==1
    assert len(glob('/tmp/temp/dir1/*.txt')) ==1

    # Cleanup
    shutil.rmtree('%s/temp' %cur_dir)
    shutil.rmtree('/tmp/temp')
예제 #29
0
def test_amgr_assign_shared_data():

    amgr = Amgr(rts='radical.pilot', hostname=host, port=port)

    res_dict = {'resource': 'xsede.supermic',
                'walltime': 30,
                'cpus'    : 20,
                'project' : 'TG-MCB090174'}

    amgr.resource_desc = res_dict
    amgr.shared_data   = ['file1.txt','file2.txt']

    assert amgr._rmgr.shared_data == ['file1.txt','file2.txt']
예제 #30
0
def test_amgr_run():

    amgr = Amgr(hostname=host, port=port)

    with pytest.raises(MissingError):
        amgr.run()

    p1 = Pipeline()
    p2 = Pipeline()
    p3 = Pipeline()

    with pytest.raises(MissingError):
        amgr.workflow = [p1, p2, p3]
def test_amgr_synchronizer():

    logger = ru.Logger('radical.entk.temp_logger')
    profiler = ru.Profiler(name='radical.entk.temp')
    amgr = Amgr(hostname=hostname, port=port)

    amgr._setup_mqs()

    p = Pipeline()
    s = Stage()

    # Create and add 100 tasks to the stage
    for cnt in range(100):

        t = Task()
        t.executable = ['some-executable-%s' % cnt]

        s.add_tasks(t)

    p.add_stages(s)
    p._assign_uid(amgr._sid)
    p._validate()

    amgr.workflow = [p]

    for t in p.stages[0].tasks:
        assert t.state == states.INITIAL

    assert p.stages[0].state == states.INITIAL
    assert p.state == states.INITIAL

    # Start the synchronizer method in a thread
    amgr._terminate_sync = Event()
    sync_thread = Thread(target=amgr._synchronizer, name='synchronizer-thread')
    sync_thread.start()

    # Start the synchronizer method in a thread
    proc = Process(target=func_for_synchronizer_test, name='temp-proc',
                   args=(amgr._sid, p, logger, profiler))

    proc.start()
    proc.join()

    for t in p.stages[0].tasks:
        assert t.state == states.SCHEDULING

    assert p.stages[0].state == states.SCHEDULING
    assert p.state == states.SCHEDULING

    amgr._terminate_sync.set()
    sync_thread.join()
예제 #32
0
def test_amgr_resource_terminate():

    res_dict = {

        'resource': 'xsede.supermic',
        'walltime': 30,
        'cpus': 20,
        'project': 'TG-MCB090174'

    }

    from radical.entk.execman.rp import TaskManager

    amgr = Amgr(rts='radical.pilot', hostname=hostname, port=port)
    amgr.resource_desc = res_dict
    amgr._setup_mqs()
    amgr._rmq_cleanup = True
    amgr._task_manager = TaskManager(sid='test',
                                     pending_queue=list(),
                                     completed_queue=list(),
                                     mq_hostname=amgr._mq_hostname,
                                     rmgr=amgr._resource_manager,
                                     port=amgr._port
                                     )

    amgr.resource_terminate()
def test_amgr_terminate():

    res_dict = {

        'resource': 'xsede.supermic',
        'walltime': 30,
        'cpus': 20,
        'project': 'TG-MCB090174'

    }

    from radical.entk.execman.rp import TaskManager

    amgr = Amgr(rts='radical.pilot', hostname=hostname, port=port)
    amgr.resource_desc = res_dict
    amgr._setup_mqs()
    amgr._rmq_cleanup = True
    amgr._task_manager = TaskManager(sid='test',
                                     pending_queue=list(),
                                     completed_queue=list(),
                                     mq_hostname=amgr._mq_hostname,
                                     rmgr=amgr._resource_manager,
                                     port=amgr._port
                                     )

    amgr.terminate()
def test_issue_255():

    def create_pipeline():

        p = Pipeline()

        s = Stage()

        t1 = Task()
        t1.name = 'simulation'
        t1.executable = ['sleep']
        t1.arguments = ['10']

        s.add_tasks(t1)

        p.add_stages(s)

        return p

    res_dict = {

        'resource': 'local.localhost',
        'walltime': 5,
        'cpus': 1,
        'project': ''

    }

    os.environ['RADICAL_PILOT_DBURL'] = MLAB

    appman = AppManager(hostname=hostname, port=port, autoterminate=False)
    appman.resource_desc = res_dict

    p1 = create_pipeline()
    appman.workflow = [p1]
    appman.run()

    # p1 = create_pipeline()
    # appman.workflow = [p1]
    # appman.run()

    # appman.resource_terminate()

    tmgr = appman._task_manager

    tmgr.terminate_manager()
    # tmgr.terminate_heartbeat()

    tmgr.start_manager()
예제 #35
0
def test_wfp_enqueue():

    p = Pipeline()
    s = Stage()
    t = Task()
    t.executable = ['/bin/date']
    s.add_tasks(t)
    p.add_stages(s)

    amgr = Amgr(hostname=hostname, port=port)
    amgr._setup_mqs()

    wfp = WFprocessor(sid=amgr._sid,
                      workflow=[p],
                      pending_queue=amgr._pending_queue,
                      completed_queue=amgr._completed_queue,
                      mq_hostname=amgr._mq_hostname,
                      port=amgr._port,
                      resubmit_failed=False)

    wfp._initialize_workflow()

    amgr.workflow = [p]
    profiler = ru.Profiler(name='radical.entk.temp')

    for t in p.stages[0].tasks:
        assert t.state == states.INITIAL

    assert p.stages[0].state == states.INITIAL
    assert p.state == states.INITIAL

    amgr._terminate_sync = Event()
    sync_thread = Thread(target=amgr._synchronizer, name='synchronizer-thread')
    sync_thread.start()

    proc = Process(target=func_for_enqueue_test,
                   name='temp-proc',
                   args=(wfp, ))
    proc.start()
    proc.join()

    amgr._terminate_sync.set()
    sync_thread.join()

    for t in p.stages[0].tasks:
        assert t.state == states.SCHEDULED

    assert p.stages[0].state == states.SCHEDULED
    assert p.state == states.SCHEDULING
예제 #36
0
def test_write_workflow():

    try:
        wf = list()
        wf.append(generate_pipeline(1))
        wf.append(generate_pipeline(2))

        amgr = AppManager(hostname=hostname, port=port)
        amgr.workflow = wf
        amgr._wfp = WFprocessor(sid=amgr._sid,
                                workflow=amgr._workflow,
                                pending_queue=amgr._pending_queue,
                                completed_queue=amgr._completed_queue,
                                mq_hostname=amgr._mq_hostname,
                                port=amgr._port,
                                resubmit_failed=amgr._resubmit_failed)
        amgr._wfp._initialize_workflow()
        wf = amgr._wfp.workflow

        write_workflow(wf, 'test')

        data = ru.read_json('test/entk_workflow.json')
        assert len(data) == len(wf) + 1

        stack = data.pop(0)
        assert stack.keys() == ['stack']
        assert stack['stack'].keys() == ['sys','radical']
        assert stack['stack']['sys'].keys() == ["python","pythonpath","virtualenv"]
        assert stack['stack']['radical'].keys() == ['saga', 'radical.pilot', 'radical.utils', 'radical.entk']

        p_cnt = 0
        for p in data:
            assert p['uid'] == wf[p_cnt].uid
            assert p['name'] == wf[p_cnt].name
            assert p['state_history'] == wf[p_cnt].state_history
            s_cnt = 0
            for s in p['stages']:
                assert s['uid'] == wf[p_cnt].stages[s_cnt].uid
                assert s['name'] == wf[p_cnt].stages[s_cnt].name
                assert s['state_history'] == wf[p_cnt].stages[s_cnt].state_history
                for t in wf[p_cnt].stages[s_cnt].tasks:
                    assert t.to_dict() in s['tasks']
                s_cnt += 1
            p_cnt += 1

    except Exception as ex:
        shutil.rmtree('test')
        raise
def test_amgr_assign_workflow():

    amgr = Amgr()

    with pytest.raises(TypeError):
        amgr.workflow = [1, 2, 3]

    with pytest.raises(TypeError):
        amgr.workflow = set([1, 2, 3])

    p1 = Pipeline()
    p2 = Pipeline()
    p3 = Pipeline()

    amgr._workflow = [p1, p2, p3]
    amgr._workflow = set([p1, p2, p3])
예제 #38
0
def test_amgr_assign_workflow():

    amgr = Amgr()

    with pytest.raises(TypeError):
        amgr.workflow = [1, 2, 3]

    with pytest.raises(TypeError):
        amgr.workflow = set([1, 2, 3])

    p1 = Pipeline()
    p2 = Pipeline()
    p3 = Pipeline()

    amgr._workflow = [p1, p2, p3]
    amgr._workflow = set([p1, p2, p3])
def test_wfp_enqueue():

    p = Pipeline()
    s = Stage()
    t = Task()
    t.executable = ['/bin/date']
    s.add_tasks(t)
    p.add_stages(s)

    amgr = Amgr(hostname=hostname, port=port)
    amgr._setup_mqs()

    wfp = WFprocessor(sid=amgr._sid,
                      workflow=[p],
                      pending_queue=amgr._pending_queue,
                      completed_queue=amgr._completed_queue,
                      mq_hostname=amgr._mq_hostname,
                      port=amgr._port,
                      resubmit_failed=False)

    wfp._initialize_workflow()

    amgr.workflow = [p]
    profiler = ru.Profiler(name='radical.entk.temp')

    for t in p.stages[0].tasks:
        assert t.state == states.INITIAL

    assert p.stages[0].state == states.INITIAL
    assert p.state == states.INITIAL

    amgr._terminate_sync = Event()
    sync_thread = Thread(target=amgr._synchronizer, name='synchronizer-thread')
    sync_thread.start()

    proc = Process(target=func_for_enqueue_test, name='temp-proc', args=(wfp,))
    proc.start()
    proc.join()

    amgr._terminate_sync.set()
    sync_thread.join()

    for t in p.stages[0].tasks:
        assert t.state == states.SCHEDULED

    assert p.stages[0].state == states.SCHEDULED
    assert p.state == states.SCHEDULING
예제 #40
0
def test_amgr_assign_workflow():

    amgr = Amgr(hostname=host, port=port, username=username,
            password=password)

    with pytest.raises(TypeError):
        amgr.workflow = [1, 2, 3]

    with pytest.raises(TypeError):
        amgr.workflow = set([1, 2, 3])

    p1 = Pipeline()
    p2 = Pipeline()
    p3 = Pipeline()

    amgr._workflow = [p1, p2, p3]
    amgr._workflow = set([p1, p2, p3])
예제 #41
0
def test_wfp_enqueue():

    p = Pipeline()
    s = Stage()
    t = Task()

    t.executable = '/bin/date'
    s.add_tasks(t)
    p.add_stages(s)

    amgr = Amgr(hostname=hostname, port=port, username=username,
            password=password)
    amgr._setup_mqs()

    wfp = WFprocessor(sid=amgr._sid,
                      workflow=[p],
                      pending_queue=amgr._pending_queue,
                      completed_queue=amgr._completed_queue,
                      rmq_conn_params=amgr._rmq_conn_params,
                      resubmit_failed=False)

    assert p.uid           is not None
    assert p.stages[0].uid is not None

    for t in p.stages[0].tasks:
        assert t.uid is not None

    assert p.state           == states.INITIAL
    assert p.stages[0].state == states.INITIAL

    for t in p.stages[0].tasks:
        assert t.state == states.INITIAL

    wfp.start_processor()

    th = mt.Thread(target=func_for_enqueue_test, name='temp-proc', args=(p,))
    th.start()
    th.join()

    wfp.terminate_processor()

    assert p.state           == states.SCHEDULING
    assert p.stages[0].state == states.SCHEDULED

    for t in p.stages[0].tasks:
        assert t.state == states.SCHEDULED
예제 #42
0
def test_write_session_description():

    amgr = AppManager(hostname=hostname, port=port)
    amgr.resource_desc = {'resource' : 'xsede.stampede',
                          'walltime' : 59,
                          'cpus'     : 128,
                          'gpus'     : 64,
                          'project'  : 'xyz',
                          'queue'    : 'high'}

    workflow      = [generate_pipeline(1), generate_pipeline(2)]
    amgr.workflow = workflow

    amgr._wfp = WFprocessor(sid=amgr.sid,
                            workflow=amgr._workflow,
                            pending_queue=amgr._pending_queue,
                            completed_queue=amgr._completed_queue,
                            resubmit_failed=amgr._resubmit_failed,
                            rmq_conn_params=amgr._rmq_conn_params)
    amgr._workflow = amgr._wfp.workflow

    amgr._task_manager = TaskManager(sid=amgr._sid,
                                     pending_queue=amgr._pending_queue,
                                     completed_queue=amgr._completed_queue,
                                     rmgr=amgr._rmgr,
                                     rmq_conn_params=amgr._rmq_conn_params
                                     )

    write_session_description(amgr)

    desc = ru.read_json('%s/radical.entk.%s.json' % (amgr._sid, amgr._sid))
    src  = '%s/sample_data' % pwd

    assert desc == ru.read_json('%s/expected_desc_write_session.json' % src)
예제 #43
0
def test_shared_data():

    for f in glob('%s/file*.txt' % cur_dir):
        os.remove(f)

    os.system('echo "Hello" > %s/file1.txt' % cur_dir)
    os.system('echo "World" > %s/file2.txt' % cur_dir)

    # Create a dictionary describe four mandatory keys:
    # resource, walltime, cpus and project
    # resource is 'local.localhost' to execute locally
    res_dict = {'resource': 'local.localhost', 'walltime': 1, 'cpus': 1}

    # Create Application Manager
    appman = AppManager(hostname=hostname, port=port)

    # Assign resource manager to the Application Manager
    appman.resource_desc = res_dict
    appman.shared_data = ['%s/file1.txt' % cur_dir, '%s/file2.txt' % cur_dir]

    p = generate_pipeline()

    # Assign the workflow as a set of Pipelines to the Application Manager
    appman.workflow = [p]

    # Run the Application Manager
    appman.run()

    with open('%s/output.txt' % cur_dir, 'r') as fp:
        assert [d.strip() for d in fp.readlines()] == ['Hello', 'World']

    os.remove('%s/file1.txt' % cur_dir)
    os.remove('%s/file2.txt' % cur_dir)
    os.remove('%s/output.txt' % cur_dir)
예제 #44
0
def test_amgr_resource_description_assignment():

    res_dict = {'resource': 'xsede.supermic',
                'walltime': 30,
                'cpus'    : 1000,
                'project' : 'TG-MCB090174'}

    amgr = Amgr(rts='radical.pilot')
    amgr.resource_desc = res_dict

    from radical.entk.execman.rp import ResourceManager as RM_RP
    assert isinstance(amgr._rmgr, RM_RP)

    amgr = Amgr(rts='mock')
    amgr.resource_desc = res_dict

    from radical.entk.execman.mock import ResourceManager as RM_MOCK
    assert isinstance(amgr._rmgr, RM_MOCK)
예제 #45
0
def test_wfp_workflow_incomplete():

    p = Pipeline()
    s = Stage()
    t = Task()

    t.executable = '/bin/date'
    s.add_tasks(t)
    p.add_stages(s)

    amgr = Amgr(hostname=hostname, port=port, username=username,
            password=password)
    amgr._setup_mqs()

    wfp = WFprocessor(sid=amgr._sid,
                      workflow=[p],
                      pending_queue=amgr._pending_queue,
                      completed_queue=amgr._completed_queue,
                      rmq_conn_params=amgr._rmq_conn_params,
                      resubmit_failed=False)

    for t in p.stages[0].tasks:
        t.state = states.COMPLETED

    task_as_dict  = json.dumps(t.to_dict())
    credentials = pika.PlainCredentials(amgr._username, amgr._password)
    mq_connection = pika.BlockingConnection(pika.ConnectionParameters(
                                          host=amgr._hostname, port=amgr._port,
                                          credentials=credentials))
    mq_channel    = mq_connection.channel()

    mq_channel.basic_publish(exchange    = '',
                             routing_key = '%s' % amgr._completed_queue[0],
                             body        = task_as_dict)

    wfp.start_processor()

    th = mt.Thread(target=func_for_dequeue_test, name='temp-proc', args=(p,))
    th.start()
    th.join()

    wfp.terminate_processor()

    assert not wfp.workflow_incomplete()
def test_integration_local():

    """
    **Purpose**: Run an EnTK application on localhost
    """

    def create_single_task():

        t1 = Task()
        t1.name = 'simulation'
        t1.executable = ['/bin/echo']
        t1.arguments = ['hello']
        t1.copy_input_data = []
        t1.copy_output_data = []

        return t1

    p1 = Pipeline()
    p1.name = 'p1'

    s = Stage()
    s.name = 's1'
    s.tasks = create_single_task()
    s.add_tasks(create_single_task())

    p1.add_stages(s)

    res_dict = {

            'resource': 'local.localhost',
            'walltime': 5,
            'cpus': 1,
            'project': ''

    }

    os.environ['RADICAL_PILOT_DBURL'] = MLAB

    appman = AppManager(hostname=hostname, port=port)
    appman.resource_desc = res_dict
    appman.workflow = [p1]
    appman.run()
예제 #47
0
def test_amgr_assign_shared_data(s, i, b, se):
    amgr = Amgr(rts='radical.pilot', hostname=hostname, port=port)

    res_dict = {
        'resource': 'xsede.supermic',
        'walltime': 30,
        'cpus': 20,
        'project': 'TG-MCB090174'
    }

    amgr.resource_desc = res_dict

    data = [s, i, b, se]

    for d in data:
        with pytest.raises(TypeError):
            amgr.shared_data = d

    amgr.shared_data = ['file1.txt', 'file2.txt']
    assert amgr._resource_manager.shared_data == ['file1.txt', 'file2.txt']
def test_amgr_resource_description_assignment():

    res_dict = {

        'resource': 'xsede.supermic',
        'walltime': 30,
        'cpus': 1000,
        'project': 'TG-MCB090174'

    }

    amgr = Amgr(rts='radical.pilot')
    amgr.resource_desc = res_dict
    from radical.entk.execman.rp import ResourceManager
    assert isinstance(amgr._resource_manager, ResourceManager)

    amgr = Amgr(rts='mock')
    amgr.resource_desc = res_dict
    from radical.entk.execman.mock import ResourceManager
    assert isinstance(amgr._resource_manager, ResourceManager)
예제 #49
0
def test_amgr_setup_mqs():

    amgr = Amgr(hostname=host, port=port)
    amgr._setup_mqs()

    assert len(amgr._pending_queue)   == 1
    assert len(amgr._completed_queue) == 1

    mq_connection = pika.BlockingConnection(pika.ConnectionParameters(
                                       host=amgr._hostname, port=amgr._port))
    mq_channel    = mq_connection.channel()

    qs = ['%s-tmgr-to-sync' % amgr._sid,
          '%s-cb-to-sync'   % amgr._sid,
          '%s-sync-to-tmgr' % amgr._sid,
          '%s-sync-to-cb'   % amgr._sid,
          '%s-pendingq-1'   % amgr._sid,
          '%s-completedq-1' % amgr._sid]

    for q in qs:
        mq_channel.queue_delete(queue=q)
def test_amgr_assign_shared_data(s,i,b,se):
    amgr = Amgr(rts='radical.pilot', hostname=hostname, port=port)

    res_dict = {

        'resource': 'xsede.supermic',
        'walltime': 30,
        'cpus': 20,
        'project': 'TG-MCB090174'

    }

    amgr.resource_desc = res_dict

    data = [s, i, b, se]

    for d in data:
        with pytest.raises(TypeError):
            amgr.shared_data = d

    amgr.shared_data = ['file1.txt','file2.txt']
    assert amgr._resource_manager.shared_data == ['file1.txt','file2.txt'] 
def test_diff_rmq():

    def create_pipeline():

        p = Pipeline()

        s = Stage()

        t1 = Task()
        t1.name = 'simulation'
        t1.executable = ['/bin/echo']
        t1.arguments = ['hello']
        t1.copy_input_data = []
        t1.copy_output_data = []

        s.add_tasks(t1)

        p.add_stages(s)

        return p
    

    res_dict = {

            'resource': 'local.localhost',
            'walltime': 5,
            'cpus': 1,

    }

    os.environ['RADICAL_PILOT_DBURL'] = MLAB
    appman = AppManager(hostname=hostname, port=port)
    appman.resource_desc = res_dict

    p1 = create_pipeline()
    print p1.uid, p1.stages[0].uid
    appman.workflow = [p1]
    appman.run()
def test_amgr_run_mock():

    p = Pipeline()
    s = Stage()
    t = Task()
    t.name = 'simulation'
    t.executable = ['/bin/date']
    s.tasks = t
    p.add_stages(s)

    res_dict = {

            'resource': 'local.localhost',
            'walltime': 5,
            'cpus': 1,
            'project': ''

    }

    appman = Amgr(hostname=hostname, port=port, rts="mock")
    appman.resource_desc = res_dict

    appman.workflow = [p]
    appman.run()
def test_amgr_run():

    amgr = Amgr()

    with pytest.raises(MissingError):
        amgr.run()

    p1 = Pipeline()
    p2 = Pipeline()
    p3 = Pipeline()

    amgr._workflow = [p1, p2, p3]

    with pytest.raises(MissingError):
        amgr.run()
def test_shared_data():

    for f in glob('%s/file*.txt' %cur_dir):
        os.remove(f)

    os.system('echo "Hello" > %s/file1.txt' %cur_dir)
    os.system('echo "World" > %s/file2.txt' %cur_dir)


    # Create a dictionary describe four mandatory keys:
    # resource, walltime, cpus and project
    # resource is 'local.localhost' to execute locally
    res_dict = {

            'resource': 'local.localhost',
            'walltime': 1,
            'cpus': 1
    }

    os.environ['RADICAL_PILOT_DBURL'] = MLAB

    # Create Application Manager
    appman = AppManager(hostname=hostname, port=port)

    # Assign resource manager to the Application Manager
    appman.resource_desc = res_dict
    appman.shared_data = ['%s/file1.txt' %cur_dir, '%s/file2.txt' %cur_dir]

    p = generate_pipeline()

    # Assign the workflow as a set of Pipelines to the Application Manager
    appman.workflow = [p]

    # Run the Application Manager
    appman.run()

    with open('%s/output.txt' %cur_dir, 'r') as fp:
        assert [d.strip() for d in fp.readlines()] == ['Hello', 'World']

    os.remove('%s/file1.txt' %cur_dir)
    os.remove('%s/file2.txt' %cur_dir)
    os.remove('%s/output.txt' %cur_dir)
                        }

        # Add Stage to the Pipeline
        p.add_stages(s1)

    return p

if __name__ == '__main__':

    # Create a dictionary describe four mandatory keys:
    # resource, walltime, cores and project
    # resource is 'local.localhost' to execute locally
    res_dict = {

        'resource': 'local.localhost',
        'walltime': 15,
        'cpus': 2,
    }

    # Create Application Manager
    appman = AppManager()
    appman.resource_desc = res_dict

    p = generate_pipeline()

    # Assign the workflow as a set of Pipelines to the Application Manager
    appman.workflow = [p]

    # Run the Application Manager
    appman.run()
예제 #56
0
        t3.copy_input_data = ['$Pipeline_%s_Stage_%s_Task_%s/ccount.txt' % (p.name, s2.name, s2_task_uids[cnt])]
        # Download the output of the current task to the current location
        t3.download_output_data = ['chksum.txt > chksum_%s.txt' % cnt]

        # Add the Task to the Stage
        s3.add_tasks(t3)

    # Add Stage to the Pipeline
    p.add_stages(s3)

    return p

if __name__ == '__main__':

    # Create Application Manager
    appman = AppManager(hostname=hostname, port=port)

    # Create a dictionary describe four mandatory keys:
    # resource, walltime, and cpus
    # resource is 'local.localhost' to execute locally
    res_dict = {

    #    'resource': 'ncsa.bw_aprun',
    #    'walltime': 10,
    #    'cpus': 32,
    #'project': 'bamm',
    #'queue': 'high'
	'resource': 'local.localhost',
	'walltime': 10,
	'cpus':2
    }
    print p.stages
    print p.stages[1].tasks
 
    # Create a dictionary describe four mandatory keys:
    # resource, walltime, cores and project
    # resource is 'local.localhost' to execute locally
    res_dict = {

            'resource': 'xsede.supermic',
            'walltime': 240,
            'cores': 96,
            'access_schema': 'gsissh',
            'queue': 'workq',
            'project': 'TG-MCB090174',
    }

    # Create Resource Manager object with the above resource description
    rman = ResourceManager(res_dict)

    # Create Application Manager
    appman = AppManager()

    # Assign resource manager to the Application Manager
    appman.resource_manager = rman

    # Assign the workflow as a set of Pipelines to the Application Manager
    appman.assign_workflow(set([p]))

    # Run the Application Manager
##    appman.run()
    s = Stage()

    # Create a Task object
    t = Task()
    t.name = 'my-first-task'        # Assign a name to the task (optional, do not use ',' or '_')
    t.executable = '/bin/echo'   # Assign executable to the task
    t.arguments = ['Hello World']  # Assign arguments for the task executable

    # Add Task to the Stage
    s.add_tasks(t)

    # Add Stage to the Pipeline
    p.add_stages(s)

    # Create Application Manager
    appman = AppManager(hostname=hostname, port=port)

    # Create a dictionary describe four mandatory keys:
    # resource, walltime, and cpus
    # resource is 'local.localhost' to execute locally
    res_dict = {

        'resource': 'local.localhost',
        'walltime': 10,
        'cpus': 1
    }

    # Assign resource request description to the Application Manager
    appman.resource_desc = res_dict

    # Assign the workflow as a set or list of Pipelines to the Application Manager
예제 #59
0
    # Add Stage to the Pipeline
    p.add_stages(s3)

    return p


if __name__ == '__main__':

    pipelines = []

    for cnt in range(10):
        pipelines.append(generate_pipeline())

    # Create Application Manager
    appman = AppManager(hostname=hostname, port=port)

    # Create a dictionary describe four mandatory keys:
    # resource, walltime, and cpus
    # resource is 'local.localhost' to execute locally
    res_dict = {

        'resource': 'local.localhost',
        'walltime': 10,
        'cpus': 1
    }

    # Assign resource request description to the Application Manager
    appman.resource_desc = res_dict

    # Assign the workflow as a set or list of Pipelines to the Application Manager

    # Create a dictionary describe four mandatory keys:
    # resource, walltime, cpus and project
    # resource is 'local.localhost' to execute locally
    res_dict = {

            'resource': 'local.localhost',
            'walltime': 1,
            'cpus': 1
    }

    os.environ['RADICAL_PILOT_DBURL'] = 'mongodb://*****:*****@ds227821.mlab.com:27821/entk_0_7_0_release'

    # Create Application Manager
    appman = AppManager(hostname=hostname, port=port)

    # Assign resource manager to the Application Manager
    appman.resource_desc = res_dict
    appman.shared_data = ['%s/file1.txt' %cur_dir, '%s/file2.txt' %cur_dir]

    p = generate_pipeline()

    # Assign the workflow as a set of Pipelines to the Application Manager
    appman.workflow = [p]

    # Run the Application Manager
    appman.run()

    for x in range(10):
        with open('%s/output_%s.txt' %(cur_dir,x+1), 'r') as fp: