def fresh_config(): config = Config( executors=[ HighThroughputExecutor( label="bw_htex", cores_per_worker=1, worker_debug=False, max_workers=1, address=address_by_hostname(), provider=TorqueProvider( queue='normal', launcher=AprunLauncher(overrides="-b -- bwpy-environ --"), # string to prepend to #SBATCH blocks in the submit # script to the scheduler eg: '#SBATCH --constraint=knl,quad,cache' scheduler_options='', # Command to be run before starting a worker, such as: # 'module load Anaconda; source activate parsl_env'. worker_init=user_opts['bluewaters']['worker_init'], init_blocks=1, max_blocks=1, min_blocks=1, nodes_per_block=2, walltime='00:30:00', cmd_timeout=120, ), ) ], ) return config
from parsl.config import Config from parsl.executors import HighThroughputExecutor # If you are a developer running tests, make sure to update parsl/tests/configs/user_opts.py # If you are a user copying-and-pasting this as an example, make sure to either # 1) create a local `user_opts.py`, or # 2) delete the user_opts import below and replace all appearances of `user_opts` with the literal value # (i.e., user_opts['swan']['username'] -> 'your_username') from .user_opts import user_opts config = Config( executors=[ HighThroughputExecutor( label='swan_htex', provider=TorqueProvider( channel=SSHChannel( hostname='swan.cray.com', username=user_opts['swan']['username'], script_dir=user_opts['swan']['script_dir'], ), nodes_per_block=1, init_blocks=1, max_blocks=1, launcher=AprunLauncher(), scheduler_options=user_opts['swan']['scheduler_options'], worker_init=user_opts['swan']['worker_init'], ), ) ] )
# This is an example config, make sure to # replace the specific values below with the literal values # (e.g., 'USERNAME' -> 'your_username') config = Config( executors=[ IPyParallelExecutor( label='swan_ipp', workers_per_node=2, provider=TorqueProvider( channel=SSHChannel( hostname='swan.cray.com', username= '******', # Please replace USERNAME with your username script_dir= '/home/users/USERNAME/parsl_scripts', # Please replace USERNAME with your username ), nodes_per_block=2, init_blocks=1, max_blocks=1, launcher=AprunLauncher(), scheduler_options='', # Input your scheduler_options if needed worker_init='', # Input your worker_init if needed ), controller=Controller( public_ip='PUBLIC_IP' ), # Please replace PUBLIC_IP with your public ip ) ], )
from parsl.config import Config from parsl.executors import HighThroughputExecutor from parsl.launchers import SingleNodeLauncher from parsl.providers import TorqueProvider from parsl.addresses import address_by_hostname import os config = Config(executors=[ HighThroughputExecutor( cores_per_worker=1, mem_per_worker=4, max_workers=12, worker_debug=True, address=address_by_hostname(), provider=TorqueProvider( launcher=SingleNodeLauncher(), worker_init= ("module load gcc/6.2.0 miniconda3/4.7.10; " "source activate parsl_env; export PYTHONPATH='{}:{{PYTHONPATH}}'" ).format(os.getcwd()), init_blocks=1, max_blocks=10, min_blocks=0, nodes_per_block=1, walltime='99:00:00', scheduler_options='#PBS -l mem=48gb,nodes=1:ppn=12'), ), ], checkpoint_mode='task_exit')
from parsl.executors.ipp import IPyParallelExecutor from parsl.tests.utils import get_rundir # If you are a developer running tests, make sure to update parsl/tests/configs/user_opts.py # If you are a user copying-and-pasting this as an example, make sure to either # 1) create a local `user_opts.py`, or # 2) delete the user_opts import below and replace all appearances of `user_opts` with the literal value # (i.e., user_opts['swan']['username'] -> 'your_username') from .user_opts import user_opts config = Config(executors=[ IPyParallelExecutor( label='beagle_multinode_mpi', workers_per_node=1, provider=TorqueProvider( 'debug', channel=SSHChannel( hostname='login4.beagle.ci.uchicago.edu', username=user_opts['beagle']['username'], script_dir="/lustre/beagle2/{}/parsl_scripts".format( user_opts['beagle']['username'])), nodes_per_block=1, init_blocks=1, max_blocks=1, launcher=AprunLauncher(), scheduler_options=user_opts['beagle']['scheduler_options'], worker_init=user_opts['beagle']['worker_init'], )) ], run_dir=get_rundir())
from parsl.launchers import AprunLauncher from parsl.providers import TorqueProvider from parsl.config import Config from parsl.executors import HighThroughputExecutor from parsl.addresses import address_by_hostname from parsl.data_provider.scheme import GlobusScheme config = Config(executors=[ HighThroughputExecutor( label="bluewaters_htex", worker_debug=True, address="<LOGIN_NODE>", provider=TorqueProvider( channel=LocalChannel(), init_blocks=1, max_blocks=1, min_blocks=1, nodes_per_block=1, launcher=AprunLauncher(overrides="-b -- bwpy-environ --"), scheduler_options='''#PBS -l nodes=1:ppn=32 #PBS -q debug''', worker_init='''module load bwpy''', walltime='00:30:00'), storage_access=[ GlobusScheme(endpoint_uuid="d59900ef-6d04-11e5-ba46-22000b92c6ec", endpoint_path="/", local_path="/") ]) ], )
from parsl.config import Config from parsl.executors.ipp import IPyParallelExecutor # This is an example config, make sure to # replace the specific values below with the literal values # (e.g., 'USERNAME' -> 'your_username') config = Config( executors=[ IPyParallelExecutor( label='beagle_multinode_mpi', provider=TorqueProvider( 'debug', channel=SSHChannel( hostname='login4.beagle.ci.uchicago.edu', username= '******', # Please replace USERNAME with your username script_dir= '/lustre/beagle2/USERNAME/parsl_scripts', # Please replace USERNAME with your username ), nodes_per_block=1, tasks_per_node=1, init_blocks=1, max_blocks=1, launcher=AprunLauncher(), scheduler_options='', # Input your scheduler_options if needed worker_init='', # Input your worker_init if needed )) ], )
from parsl.providers import TorqueProvider from parsl.channels import LocalChannel from parsl.config import Config from parsl.executors import HighThroughputExecutor from parsl.launchers import SingleNodeLauncher config = Config(executors=[ HighThroughputExecutor( label="test", max_workers=1, provider=TorqueProvider( launcher=SingleNodeLauncher(), account="mfknie", walltime="10:00:00", scheduler_options="#PBS -l mem=8gb -l nodes=1:ppn=2 -N test", worker_init="\n".join(("module load gcc/6.2.0", "module load pigz")), # "module load miniconda3/4.7.10", # "module load R/4.0.3", # "source activate /gpfs/data/gao-lab/software/local_ancest")), nodes_per_block=1, init_blocks=1, min_blocks=1, max_blocks=1, # parallelism=1 )) ])
config = Config( executors=[ HighThroughputExecutor( max_workers_per_node=1, worker_debug=False, address=address_by_hostname(), provider=TorqueProvider( queue='normal', launcher=AprunLauncher(overrides="-b -- bwpy-environ --"), # string to prepend to #SBATCH blocks in the submit scheduler_options=user_opts['bluewaters']['scheduler_options'], # Command to be run before starting a worker, such as: # 'module load bwpy; source activate funcx env'. worker_init=user_opts['bluewaters']['worker_init'], # Scale between 0-1 blocks with 2 nodes per block nodes_per_block=2, init_blocks=0, min_blocks=0, max_blocks=1, # Hold blocks for 30 minutes walltime='00:30:00'), ) ], ) # fmt: on
from parsl.config import Config from parsl.executors import HighThroughputExecutor from parsl.addresses import address_by_hostname from parsl.launchers import SingleNodeLauncher from parsl.providers import TorqueProvider config = Config( executors=[ HighThroughputExecutor( # cores_per_worker=2, mem_per_worker=600, worker_debug=True, address=address_by_hostname(), provider=TorqueProvider( launcher=SingleNodeLauncher(), worker_init="export PYTHONPATH='{}:{{PYTHONPATH}}'".format(os.getcwd()), init_blocks=1, max_blocks=10, min_blocks=1, nodes_per_block=1, walltime='00:320:00', scheduler_options='#PBS -l mem=600gb' ), ) ], checkpoint_mode='task_exit', retries=3 )
from parsl.config import Config from parsl.executors import HighThroughputExecutor from parsl.launchers import AprunLauncher from parsl.providers import TorqueProvider config = Config( executors=[ HighThroughputExecutor( label="bw_htex", cores_per_worker=1, worker_debug=False, provider=TorqueProvider( queue='normal', launcher=AprunLauncher(overrides="-b -- bwpy-environ --"), scheduler_options= '', # string to prepend to #SBATCH blocks in the submit script to the scheduler worker_init= '', # command to run before starting a worker, such as 'source activate env' init_blocks=1, max_blocks=1, min_blocks=1, nodes_per_block=2, walltime='00:10:00'), ) ], )