def test_amgr_terminate():

    res_dict = {

        'resource': 'xsede.supermic',
        'walltime': 30,
        'cpus': 20,
        'project': 'TG-MCB090174'

    }

    from radical.entk.execman.rp import TaskManager

    amgr = Amgr(rts='radical.pilot', hostname=hostname, port=port)
    amgr.resource_desc = res_dict
    amgr._setup_mqs()
    amgr._rmq_cleanup = True
    amgr._task_manager = TaskManager(sid='test',
                                     pending_queue=list(),
                                     completed_queue=list(),
                                     mq_hostname=amgr._mq_hostname,
                                     rmgr=amgr._resource_manager,
                                     port=amgr._port
                                     )

    amgr.terminate()
示例#2
0
def test_amgr_terminate():

    res_dict = {'resource': 'xsede.supermic',
                'walltime': 30,
                'cpus'    : 20,
                'project' : 'TG-MCB090174'}

    from radical.entk.execman.rp import TaskManager

    amgr = Amgr(rts='radical.pilot', hostname=host, port=port)
    amgr.resource_desc = res_dict

    amgr._setup_mqs()
    amgr._rmq_cleanup  = True
    amgr._task_manager = TaskManager(sid='test',
                                     pending_queue=list(),
                                     completed_queue=list(),
                                     rmgr=amgr._rmgr,
                                     rmq_conn_params=amgr._rmq_conn_params)

    amgr.terminate()
示例#3
0
        # Create a dictionary describe four mandatory keys:
        # resource, walltime, and cpus
        # resource is 'local.localhost' to execute locally
        res_dict = {
            'resource': 'xsede.comet_ssh',
            'project': 'unc100',
            'queue': 'compute',
            'walltime': 500,
            'cpus': number_of_cores[i],
            'access_schema': 'gsissh'
        }

        # Assign resource request description to the Application Manager
        appman.resource_desc = res_dict
        # Assign the workflow as a set or list of Pipelines to the Application Manager
        # Note: The list order is not guaranteed to be preserved
        appman.workflow = set(Pipelines)
        # Run the Application Manager
        run_time_start = time.time()
        appman.run()
        terminateTimeStart = time.time()
        appman.terminate()
        terminateTimeArray.append(time.time() - terminateTimeStart)

        run_time.append(time.time() - run_time_start)
        total_time.append(time.time() - start_time)
        print("runTime", run_time, "number_of_cores", number_of_cores[i])

    print("FINAL--->", "total_time", total_time, "runTime", run_time,
          "terminateTimeArray", terminateTimeArray)
示例#4
0
def run_experiment(exp_dir, debug_mode):

    # Initialize a list for pipelines
    pipelines = []

    # Define the configuration and resource file names
    rfile = os.path.join(exp_dir, "resource.yml")
    cfile = os.path.join(exp_dir, "config.yml")

    # Does the experiment configuration file exist?
    if not os.path.isfile(cfile):
        print('{} does not exist'.format(cfile))
        sys.exit(1)

    # Does the resource file exist?
    if not os.path.isfile(rfile):
        print('{} does not exist'.format(rfile))
        sys.exit(1)

    # Load the resource and experiment configuration files
    with open(rfile, 'r') as fp:
        rcfg = yaml.safe_load(fp)
    with open(cfile, 'r') as fp:
        ecfg = yaml.safe_load(fp)

    # Does the output directory exist? If not, make it
    try:
        os.makedirs(os.path.join(exp_dir, "output"))
    except OSError as e:
        if e.errno != errno.EEXIST:
            raise

    # Reserved configuration entries
    reserved_econfig_entries = [
        "global-options", "total-options", "extremesealevel-options"
    ]

    # Are there global options?
    if "global-options" in ecfg.keys():
        global_options = ecfg["global-options"]
    else:
        global_options = {}

    # Loop through the user-requested modules
    for this_mod in ecfg.keys():

        # Skip this entry if it's not associated with SLR projection workflow
        if this_mod in reserved_econfig_entries:
            continue

        # Load the pipeline configuration file for this module
        pcfg_file = os.path.join(os.path.dirname(__file__), "modules",
                                 ecfg[this_mod]['module_set'],
                                 ecfg[this_mod]['module'], "pipeline.yml")
        if not os.path.isfile(pcfg_file):
            print('{} does not exist'.format(pcfg_file))
            sys.exit(1)
        with open(pcfg_file, 'r') as fp:
            pcfg = yaml.safe_load(fp)

        # Append the global options to this module
        ecfg[this_mod]["options"].update(global_options)

        # Generate a pipeline for this module
        pipe_name = "-".join(
            (this_mod, ecfg[this_mod]['module_set'], ecfg[this_mod]['module']))
        pipelines.append(
            GeneratePipeline(pcfg, ecfg[this_mod], pipe_name, exp_dir))

    # Print out PST info if in debug mode
    if debug_mode:
        for p in pipelines:
            print("Pipeline {}:".format(p.name))
            print("################################")
            pprint(p.to_dict())
            for s in p.stages:
                print("Stage {}:".format(s.name))
                print("============================")
                pprint(s.to_dict())
                for t in s.tasks:
                    print("Task {}:".format(t.name))
                    print("----------------------------")
                    pprint(t.to_dict())

        # Exit
        sys.exit(0)

    # Initialize the EnTK App Manager
    amgr = AppManager(hostname=rcfg['rabbitmq']['hostname'],
                      port=rcfg['rabbitmq']['port'],
                      autoterminate=False)

    # Apply the resource configuration provided by the user
    res_desc = {
        'resource': rcfg['resource-desc']['name'],
        'walltime': rcfg['resource-desc']['walltime'],
        'cpus': rcfg['resource-desc']['cpus'],
        'queue': rcfg['resource-desc']['queue'],
        'project': rcfg['resource-desc']['project']
    }
    amgr.resource_desc = res_desc

    # Assign the list of pipelines to the workflow
    amgr.workflow = pipelines
    amgr.terminate()

    return (None)