Пример #1
0
def test_shared_data():

    for f in glob('%s/file*.txt' % cur_dir):
        os.remove(f)

    os.system('echo "Hello" > %s/file1.txt' % cur_dir)
    os.system('echo "World" > %s/file2.txt' % cur_dir)

    # Create a dictionary describe four mandatory keys:
    # resource, walltime, cpus and project
    # resource is 'local.localhost' to execute locally
    res_dict = {'resource': 'local.localhost', 'walltime': 1, 'cpus': 1}

    # Create Application Manager
    appman = AppManager(hostname=hostname, port=port)

    # Assign resource manager to the Application Manager
    appman.resource_desc = res_dict
    appman.shared_data = ['%s/file1.txt' % cur_dir, '%s/file2.txt' % cur_dir]

    p = generate_pipeline()

    # Assign the workflow as a set of Pipelines to the Application Manager
    appman.workflow = [p]

    # Run the Application Manager
    appman.run()

    with open('%s/output.txt' % cur_dir, 'r') as fp:
        assert [d.strip() for d in fp.readlines()] == ['Hello', 'World']

    os.remove('%s/file1.txt' % cur_dir)
    os.remove('%s/file2.txt' % cur_dir)
    os.remove('%s/output.txt' % cur_dir)
Пример #2
0
def test_amgr_assign_shared_data(s, i, b, se):
    amgr = Amgr(rts='radical.pilot', hostname=hostname, port=port)

    res_dict = {
        'resource': 'xsede.supermic',
        'walltime': 30,
        'cpus': 20,
        'project': 'TG-MCB090174'
    }

    amgr.resource_desc = res_dict

    data = [s, i, b, se]

    for d in data:
        with pytest.raises(TypeError):
            amgr.shared_data = d

    amgr.shared_data = ['file1.txt', 'file2.txt']
    assert amgr._resource_manager.shared_data == ['file1.txt', 'file2.txt']
def test_amgr_assign_shared_data(s,i,b,se):
    amgr = Amgr(rts='radical.pilot', hostname=hostname, port=port)

    res_dict = {

        'resource': 'xsede.supermic',
        'walltime': 30,
        'cpus': 20,
        'project': 'TG-MCB090174'

    }

    amgr.resource_desc = res_dict

    data = [s, i, b, se]

    for d in data:
        with pytest.raises(TypeError):
            amgr.shared_data = d

    amgr.shared_data = ['file1.txt','file2.txt']
    assert amgr._resource_manager.shared_data == ['file1.txt','file2.txt'] 
Пример #4
0
def test_amgr_assign_shared_data():

    amgr = Amgr(rts='radical.pilot', hostname=host, port=port)

    res_dict = {'resource': 'xsede.supermic',
                'walltime': 30,
                'cpus'    : 20,
                'project' : 'TG-MCB090174'}

    amgr.resource_desc = res_dict
    amgr.shared_data   = ['file1.txt','file2.txt']

    assert amgr._rmgr.shared_data == ['file1.txt','file2.txt']
def test_shared_data():

    for f in glob('%s/file*.txt' %cur_dir):
        os.remove(f)

    os.system('echo "Hello" > %s/file1.txt' %cur_dir)
    os.system('echo "World" > %s/file2.txt' %cur_dir)


    # Create a dictionary describe four mandatory keys:
    # resource, walltime, cpus and project
    # resource is 'local.localhost' to execute locally
    res_dict = {

            'resource': 'local.localhost',
            'walltime': 1,
            'cpus': 1
    }

    os.environ['RADICAL_PILOT_DBURL'] = MLAB

    # Create Application Manager
    appman = AppManager(hostname=hostname, port=port)

    # Assign resource manager to the Application Manager
    appman.resource_desc = res_dict
    appman.shared_data = ['%s/file1.txt' %cur_dir, '%s/file2.txt' %cur_dir]

    p = generate_pipeline()

    # Assign the workflow as a set of Pipelines to the Application Manager
    appman.workflow = [p]

    # Run the Application Manager
    appman.run()

    with open('%s/output.txt' %cur_dir, 'r') as fp:
        assert [d.strip() for d in fp.readlines()] == ['Hello', 'World']

    os.remove('%s/file1.txt' %cur_dir)
    os.remove('%s/file2.txt' %cur_dir)
    os.remove('%s/output.txt' %cur_dir)
Пример #6
0
        t = Task()
        t.name = 't%s' % (cnt + 1)
        t.pre_exec = ['export PATH=/home/karahbit/stress-ng-0.10.16:$PATH']
        t.executable = ['stress-ng']
        t.arguments = ['-c', '1', '-t', '100']
        t.cpu_reqs = {
            'processes': 1,
            'thread_type': None,
            'threads_per_process': 1,
            'process_type': None
        }

        s.add_tasks(t)

    p.add_stages(s)
    pipelines.add(p)

    # Resource and AppManager
    amgr = AppManager(hostname=hostname, port=port)
    amgr.workflow = pipelines
    amgr.shared_data = []

    amgr.resource_desc = {
        'resource': 'local.localhost',
        'walltime': 10,
        'cpus': 8
    }

    amgr.run()

    print("--- %s seconds ---" % (time.time() - start_time))
Пример #7
0
    p.add_stages(s2)

    # logger.info('adding stage {} with {} tasks'.format(s.name, s._task_count))
    # logger.info('adding pipeline {} with {} stages'.format(p.name, p._stage_count))

    # Create Application Manager
    appman = AppManager(hostname=hostname, port=port)

    res_dict = {
        'resource': 'xsede.bridges',
        'project': 'mc3bggp',
        'queue': 'RM',
        'walltime': walltime,
        'cpus': 2**len(final_hparams) * 28,
        'access_schema': 'gsissh'
    }

    # Assign resource manager to the Application Manager
    appman.resource_desc = res_dict
    appman.shared_data = [
        '%s/binaries/hyperspaces.py' % cur_dir,
        '%s/binaries/optimization.py' % cur_dir
    ]

    # Assign the workflow as a set of Pipelines to the Application Manager
    appman.workflow = [p]

    # Run the Application Manager
    appman.run()
Пример #8
0
    # resource is 'local.localhost' to execute locally
    res_dict = {

            'resource': 'local.localhost',
            'walltime': 1,
            'cpus': 1
    }

    os.environ['RADICAL_PILOT_DBURL'] = 'mongodb://*****:*****@ds227821.mlab.com:27821/entk_0_7_0_release'

    # Create Application Manager
    appman = AppManager(hostname=hostname, port=port)

    # Assign resource manager to the Application Manager
    appman.resource_desc = res_dict
    appman.shared_data = ['%s/file1.txt' %cur_dir, '%s/file2.txt' %cur_dir]

    p = generate_pipeline()

    # Assign the workflow as a set of Pipelines to the Application Manager
    appman.workflow = [p]

    # Run the Application Manager
    appman.run()

    for x in range(10):
        with open('%s/output_%s.txt' %(cur_dir,x+1), 'r') as fp:
            print 'Output %s: '%(x+1), fp.readlines()
        os.remove('%s/output_%s.txt' %(cur_dir,x+1))

Пример #9
0
                Kconfig.md_reference, Kconfig.md_run_dir + Kconfig.md_run_file,
                '%s/%s' % (Kconfig.helper_scripts, script_ana)
            ]
        print("shared_data_all", shared_data_all)
        #if Kconfig.ndx_file is not None:
        #    rman.shared_data.append(Kconfig.ndx_file)

        # Create Application Manager, only one extasy script on one rabbit-mq server now
        port = int(os.environ.get('RMQ_PORT', 5672))
        hostname = os.environ.get('RMQ_HOSTNAME', 'localhost')
        appman = AppManager(hostname='two.radical-project.org',
                            port=port)  #port=args.port)
        #appman = AppManager(hostname='localhost', port=5672)
        # appman = AppManager(port=) # if using docker, specify port here.
        appman.resource_desc = res_dict
        appman.shared_data = shared_data_all

        # Assign resource manager to the Application Manager
        #appman.resource_manager = rman

        # Assign the workflow as a set of Pipelines to the Application Manager
        appman.workflow = set([wf])

        # Run the Application Manager
        appman.run()

    except Exception as ex:

        print('Error: {0}'.format(str(ex)))
        print(traceback.format_exc())
Пример #10
0
    # Create Application Manager
    appman = AppManager(hostname=hostname, port=port)

    res_dict = {
        'resource': 'xsede.bridges',
        'project': 'mc3bggp',
        'queue': 'RM',
        'walltime': 90,
        'cpus': (2**hparams) * 28,
        'access_schema': 'gsissh'
    }

    # Assign resource manager to the Application Manager
    appman.resource_desc = res_dict
    appman.shared_data = []
    appman.shared_data += [
        '/home/jdakka/hyperspace/constellation/constellation/gbm/space2/optimize.py'
    ]
    appman.shared_data += [
        '/home/jdakka/hyperspace/constellation/constellation/data/fashion/t10k-images-idx3-ubyte.gz'
    ]
    appman.shared_data += [
        '/home/jdakka/hyperspace/constellation/constellation/data/fashion/t10k-labels-idx1-ubyte.gz'
    ]
    appman.shared_data += [
        '/home/jdakka/hyperspace/constellation/constellation/data/fashion/train-images-idx3-ubyte.gz'
    ]
    appman.shared_data += [
        '/home/jdakka/hyperspace/constellation/constellation/data/fashion/train-labels-idx1-ubyte.gz'
    ]
    # resource is 'local.localhost' to execute locally
    res_dict = {

            'resource': 'local.localhost',
            'walltime': 1,
            'cpus': 1
    }

    os.environ['RADICAL_PILOT_DBURL'] = 'mongodb://*****:*****@ds227821.mlab.com:27821/entk_0_7_0_release'

    # Create Application Manager
    appman = AppManager(hostname=hostname, port=port)

    # Assign resource manager to the Application Manager
    appman.resource_desc = res_dict
    appman.shared_data = ['%s/file1.txt' %cur_dir, '%s/file2.txt' %cur_dir]

    p = generate_pipeline()

    # Assign the workflow as a set of Pipelines to the Application Manager
    appman.workflow = [p]

    # Run the Application Manager
    appman.run()

    for x in range(10):
        with open('%s/output_%s.txt' %(cur_dir,x+1), 'r') as fp:
            print 'Output %s: '%(x+1), fp.readlines()
        os.remove('%s/output_%s.txt' %(cur_dir,x+1))

    # Create Application Manager
    appman = AppManager(hostname=hostname, port=port)

    res_dict = {

        'resource': 'xsede.bridges',
        'project' : 'mc3bggp',
        'queue' : 'RM',
        'walltime': 80*3,
        'cpus': (2**hparams)*28,
        'access_schema': 'gsissh'
    }


    # Assign resource manager to the Application Manager
    appman.resource_desc = res_dict
    appman.shared_data  = ['/home/jdakka/hyperspace/constellation/constellation/gbm/space4/optimize.py']
    appman.shared_data += ['/home/jdakka/hyperspace/constellation/constellation/data/fashion/t10k-images-idx3-ubyte.gz']
    appman.shared_data += ['/home/jdakka/hyperspace/constellation/constellation/data/fashion/t10k-labels-idx1-ubyte.gz']
    appman.shared_data += ['/home/jdakka/hyperspace/constellation/constellation/data/fashion/train-images-idx3-ubyte.gz']
    appman.shared_data += ['/home/jdakka/hyperspace/constellation/constellation/data/fashion/train-labels-idx1-ubyte.gz']
    
    
    # Assign the workflow as a set of Pipelines to the Application Manager
    appman.workflow = [p]

    # Run the Application Manager
    appman.run()

Пример #13
0
    res_dict = {
        'resource': 'xsede.supermic',
        'walltime': 30,
        'cpus': total_cores,
        'project': 'TG-MCB090174',
        'access_schema': 'gsissh'
    }

    # Download analysis file from MobleyLab repo
    os.system(
        'curl -O https://raw.githubusercontent.com/MobleyLab/alchemical-analysis/master/alchemical_analysis/alchemical_analysis.py'
    )

    # Create Application Manager
    amgr = AppManager(port=33231, hostname='two.radical-project.org')
    amgr.resource_desc = res_dict

    # Assign resource manager to the Application Manager
    amgr.shared_data = [
        './CB7G3.gro', './CB7G3.ndx', './CB7G3.top', './CB7G3_template.mdp',
        './analysis_1.py', './analysis_2.py', './determine_convergence.py',
        './alchemical_analysis.py', './3atomtypes.itp', './3_GMX.itp',
        './cucurbit_7_uril_GMX.itp'
    ]

    # Assign the workflow as a set of Pipelines to the Application Manager
    amgr.workflow = pipelines

    # Run the Application Manager
    amgr.run()
Пример #14
0
    # Create Application Manager
    appman = AppManager(hostname=resource_cfg['rabbitmq']['hostname'],
                        port=resource_cfg['rabbitmq']['port'])

    # Create a dictionary describe five mandatory keys:
    # resource, walltime, cores, queue and access_schema
    res_dict = {
        'resource': resource_cfg[resource]['label'],
        'walltime': resource_cfg[resource]['walltime'],
        'cpus': resource_cfg[resource]['cpus'],
        'queue': resource_cfg[resource]['queue'],
        'access_schema': resource_cfg[resource]['access_schema']
    }

    if 'project' in resource_cfg[resource]:
        res_dict['project'] = resource_cfg[resource]['project']

    # Assign resource manager to the Application Manager
    appman.resource_desc = res_dict

    # Data shared between multiple tasks can be transferred while the
    # job is waiting on queue
    appman.shared_data = workflow_cfg[resource]['shared_data']

    # Assign the workflow as a set or list of Pipelines to the Application Manager
    appman.workflow = [p]

    # Run the Application Manager
    appman.run()
Пример #15
0
def run():
	
	# Initialize the EnTK App Manager
	amgr = AppManager(hostname="localhost", port=5672)
	
	# Apply the resource configuration provided by the user
	res_desc = {'resource': "rutgers.amarel",
		'walltime': 60,
		'cpus': 4,
		'queue': "kopp_1",
		'project': "",
		'schema': "local"}
	amgr.resource_desc = res_desc
	
	# Push the input data to the shared directory
	amgr.shared_data = ['CMIP6_CanESM5_Omon_piControl_r1i1p1f1_zos_6000-6199.nc', 'xarray_script.py']
	
	# New pipeline
	p1 = Pipeline()
	p1.name = "Test-pipeline1"
	p2 = Pipeline()
	p2.name = "Test-pipeline2"
	
	# First stage with two tasks
	s1 = Stage()
	s1.name = "Test-stage1"
	
	s2 = Stage()
	s2.name = "Test-stage2"
	
	t1 = Task()
	t1.name = "Test-task1"
	t1.pre_exec = ["pip3 install --upgrade; pip3 install pandas zarr cftime toolz \"dask[complete]\" bottleneck xarray"]
	t1.executable = 'python3'
	t1.arguments = ['xarray_script.py']
	t1.copy_input_data = ["$SHARED/CMIP6_CanESM5_Omon_piControl_r1i1p1f1_zos_6000-6199.nc", "$SHARED/xarray_script.py"]
	t1.download_output_data = ["test_netcdf_file.nc > test_netcdf_file1.nc"]
	
	t2 = copy.deepcopy(t1)
	t2.name = "Test-task2"
	t2.download_output_data = ["test_netcdf_file.nc > test_netcdf_file2.nc"]
	
	t3 = copy.deepcopy(t1)
	t3.name = "Test-task3"
	t3.download_output_data = ["test_netcdf_file.nc > test_netcdf_file3.nc"]
	
	t4 = copy.deepcopy(t1)
	t4.name = "Test-task4"
	t4.download_output_data = ["test_netcdf_file.nc > test_netcdf_file4.nc"]
	
	# Assign tasks and stages to pipeline
	s1.add_tasks(t1)
	s1.add_tasks(t2)
	p1.add_stages(s1)
	
	s2.add_tasks(t3)
	s2.add_tasks(t4)
	p2.add_stages(s2)
	
	# Assign the pipeline to the workflow and run
	amgr.workflow = [p1, p2]
	amgr.run()
	
	# Done
	return(None)