def fresh_config(): return Config(executors=[ HighThroughputExecutor( label='cc_in2p3_htex', max_workers=1, provider=GridEngineProvider( channel=LocalChannel(), nodes_per_block=2, init_blocks=2, max_blocks=2, walltime="00:20:00", scheduler_options=user_opts['cc_in2p3']['scheduler_options'], worker_init=user_opts['cc_in2p3']['worker_init'], ), ) ], )
from funcx.config import Config from parsl.providers import GridEngineProvider from parsl.addresses import address_by_route config = Config(provider=GridEngineProvider(walltime='10000:00:00', nodes_per_block=1, init_blocks=1, max_blocks=4, scheduler_options="#$ -pe smp 8"), cores_per_worker=8, ) # For now, visible_to must be a list of URNs for globus auth users or groups, e.g.: # urn:globus:auth:identity:{user_uuid} # urn:globus:groups:id:{group_uuid} meta = { "name": "blt_large", "description": "", "organization": "", "department": "", "public": False, "visible_to": [] }
move_files=False, ), ) ) elif args.scheduler_name == 'grid_engine': executors.append(HighThroughputExecutor( label=step, worker_debug=True, address=address_by_hostname(), max_workers=int(cores_per_node[step]), # cap workers, or else defaults to infinity. provider=GridEngineProvider( channel=channel, launcher=SingleNodeLauncher(), nodes_per_block=node_count, worker_init=worker_init, init_blocks=1, max_blocks=1, walltime=walltimes[step], scheduler_options=options, queue='gpu.q', # enables Wynton GPU queues ), ) ) elif args.scheduler_name == 'cobalt': executors.append(HighThroughputExecutor( label=step, worker_debug=True, address=address_by_hostname(), provider=CobaltProvider( queue=args.scheduler_partition, account=args.scheduler_bank, # project name to submit the job
config = Config( executors=[HighThroughputExecutor(worker_debug=True, max_workers=2, address=address_by_route(), provider=GridEngineProvider( init_blocks=1, max_blocks=20), label="workers"), ThreadPoolExecutor(label="login", max_threads=20) ], ) """ config = Config( executors=[IPyParallelExecutor(workers_per_node=10, provider=GridEngineProvider( init_blocks=1, max_blocks=20), label="workers"), ThreadPoolExecutor(label="login", max_threads=20) ], ) parsl.set_stream_logger() parsl.load(config) from data_generation import generate_data proteomefile = sys.argv[1] directory = f'/home/users/ellenrichards/{sys.argv[2]}/' threshold = 1000
from parsl.executors import HighThroughputExecutor from parsl.channels import LocalChannel, SSHChannel from parsl.addresses import address_by_query import numpy THREADS = 8 #config = Config(executors=[ThreadPoolExecutor(max_threads=THREADS, label='local_threads')]) config = Config(executors=[ HighThroughputExecutor( label='local_threads', provider=GridEngineProvider( channel=LocalChannel(), worker_init= "source /idiap/user/tpereira/conda/bin/activate /idiap/user/tpereira/conda/envs/snakemaking" )) ]) parsl.load(config) # App that estimates pi by placing points in a box @python_app def pi(num_points): print("############## PI #############") from random import random #import ipdb; ipdb.set_trace() inside = 0 for i in range(num_points):
from parsl.config import Config from parsl.channels import LocalChannel from parsl.providers import GridEngineProvider from parsl.executors import HighThroughputExecutor from parsl.addresses import address_by_query config = Config( executors=[ HighThroughputExecutor( label='cc_in2p3_htex', address=address_by_query(), max_workers=2, provider=GridEngineProvider( channel=LocalChannel(), nodes_per_block=1, init_blocks=2, max_blocks=2, walltime="00:20:00", scheduler_options='', # Input your scheduler_options if needed worker_init='', # Input your worker_init if needed ), ) ], )
from parsl.executors import HighThroughputExecutor from parsl.addresses import address_by_query from parsl.tests.utils import get_rundir # If you are a developer running tests, make sure to update parsl/tests/configs/user_opts.py # If you are a user copying-and-pasting this as an example, make sure to either # 1) create a local `user_opts.py`, or # 2) delete the user_opts import below and replace all appearances of `user_opts` with the literal value # (i.e., user_opts['swan']['username'] -> 'your_username') from .user_opts import user_opts config = Config( executors=[ HighThroughputExecutor( label='cc_in2p3_htex', address=address_by_query(), max_workers=2, provider=GridEngineProvider( channel=LocalChannel(), nodes_per_block=1, init_blocks=2, max_blocks=2, walltime="00:20:00", scheduler_options=user_opts['cc_in2p3']['scheduler_options'], worker_init=user_opts['cc_in2p3']['worker_init'], ), ) ], run_dir=get_rundir(), )
parser.add_argument( "--iterations", help= "Number of iterations for bootstrapping. Optional. Set if bootstrapping.", type=int) args = parser.parse_args() cores = 48 if not args.cores else args.cores iterations = 100 if not args.iterations else args.iterations config = Config(executors=[ HighThroughputExecutor(worker_debug=True, cores_per_worker=cores, address=address_by_route(), provider=GridEngineProvider( walltime='10000:00:00', nodes_per_block=1, init_blocks=1, max_blocks=1, scheduler_options=f"#$ -pe smp {cores}"), label="workers") ], ) # Enable parsl logging if you want, but it prints out a lot of (useful) info # parsl.set_stream_logger() parsl.load(config) fu = run_raxml(args.mode, args.name, args.input, cores=cores, number=iterations, bootstrap=args.bootstrap).result()
import csv import parsl from parsl.app.app import python_app, bash_app from parsl.config import Config from parsl.providers import GridEngineProvider from parsl.executors import HighThroughputExecutor from parsl.executors import IPyParallelExecutor from parsl.executors import ThreadPoolExecutor from parsl.addresses import address_by_route, address_by_query, address_by_hostname config = Config(executors=[ HighThroughputExecutor(worker_debug=True, address=address_by_route(), provider=GridEngineProvider(walltime='100:00:00', init_blocks=1, max_blocks=20), label="workers"), ThreadPoolExecutor(label="login", max_threads=20) ], ) parsl.set_stream_logger() parsl.load(config) from data_generation import generate_data proteomefile = sys.argv[1] directory = f'/home/users/ellenrichards/{sys.argv[2]}/' threshold = 1000 if not os.path.isdir(directory):
from parsl.config import Config from parsl.providers import GridEngineProvider from parsl.executors import HighThroughputExecutor from parsl.addresses import address_by_query config = Config( executors=[ HighThroughputExecutor( label='CC.IN2P3_HTEX', address=address_by_query(), provider=GridEngineProvider( nodes_per_block=1, init_blocks=1, max_blocks=1, # string to prepend to #SBATCH blocks in the submit # script to the scheduler eg: '#$ -M [email protected] scheduler_options='', # Command to be run before starting a worker, such as: # 'module load Anaconda; source activate parsl_env'. worker_init='', walltime='00:20:00', ), ) ], )