from parsl.config import Config from parsl.channels import LocalChannel from parsl.providers import SlurmProvider from parsl.executors import HighThroughputExecutor from parsl.addresses import address_by_hostname from parsl.data_provider.scheme import GlobusScheme config = Config(executors=[ HighThroughputExecutor( label="s2_htex", worker_debug=True, address=address_by_hostname(), provider=SlurmProvider(channel=LocalChannel(), nodes_per_block=1, init_blocks=1, min_blocks=1, max_blocks=1, partition='skx-normal', scheduler_options='''#SBATCH -A Parsl-Eval''', worker_init='''source activate parsl-test''', walltime='00:30:00'), storage_access=[ GlobusScheme(endpoint_uuid="ceea5ca0-89a9-11e7-a97f-22000a92523b", endpoint_path="/", local_path="/") ]) ], )
from parsl.config import Config from parsl.data_provider.scheme import GlobusScheme from parsl.executors.threads import ThreadPoolExecutor from parsl.tests.user_opts import user_opts # must be configured specifically for each user from parsl.tests.utils import get_rundir config = Config(executors=[ ThreadPoolExecutor(label='local_threads_globus', storage_access=GlobusScheme( endpoint_uuid=user_opts['globus']['endpoint'], endpoint_path=user_opts['globus']['path']), working_dir=user_opts['globus']['path']) ], run_dir=get_rundir())
from parsl.config import Config from parsl.providers import SlurmProvider from parsl.channels import LocalChannel from parsl.executors import HighThroughputExecutor from parsl.launchers import SingleNodeLauncher from parsl.addresses import address_by_hostname from parsl.data_provider.scheme import GlobusScheme config = Config(executors=[ HighThroughputExecutor( label="midway_htex", worker_debug=True, address=address_by_hostname(), provider=SlurmProvider('broadwl', launcher=SingleNodeLauncher(), worker_init='source activate parsl_dev', init_blocks=1, max_blocks=1, min_blocks=1, nodes_per_block=1, walltime='0:30:00'), storage_access=[ GlobusScheme(endpoint_uuid="af7bda53-6d04-11e5-ba46-22000b92c6ec", endpoint_path="/", local_path="/") ]) ], )
@bash_app def tar_list(tarfile, stdout='taroutput.txt'): # note we send output to stdout.txt, but don't do anything with it! bashcmd = '/usr/bin/tar tfz {}'.format(tarfile) return(bashcmd) # Specify the config for the machine the data will land on, particularly the # Globus endpoint by UUID within the storage_access construct: config = Config( executors=[ ThreadPoolExecutor( label='local_threads_globus', working_dir='/sdcc/u/dcde1000006/globus-scratch', storage_access=[GlobusScheme( endpoint_uuid='23f78cc8-41e0-11e9-a618-0a54e005f950' )], ) ], ) parsl.clear() parsl.load(config) # Try a trivial staging exercise, pulling this file if it's not already available: # Note this is not a public file! You probably want to find one you can read. tarfile = File('globus://e133a52e-6d04-11e5-ba46-22000b92c6ec/archive/d3c724/bbcp.tar.Z') f = tar_list(tarfile)
from parsl.launchers import AprunLauncher from parsl.providers import TorqueProvider from parsl.config import Config from parsl.executors import HighThroughputExecutor from parsl.addresses import address_by_hostname from parsl.data_provider.scheme import GlobusScheme config = Config(executors=[ HighThroughputExecutor( label="bluewaters_htex", worker_debug=True, address="<LOGIN_NODE>", provider=TorqueProvider( channel=LocalChannel(), init_blocks=1, max_blocks=1, min_blocks=1, nodes_per_block=1, launcher=AprunLauncher(overrides="-b -- bwpy-environ --"), scheduler_options='''#PBS -l nodes=1:ppn=32 #PBS -q debug''', worker_init='''module load bwpy''', walltime='00:30:00'), storage_access=[ GlobusScheme(endpoint_uuid="d59900ef-6d04-11e5-ba46-22000b92c6ec", endpoint_path="/", local_path="/") ]) ], )
from parsl.config import Config from parsl.data_provider.scheme import GlobusScheme from parsl.executors.threads import ThreadPoolExecutor from parsl.tests.utils import get_rundir # If you are a developer running tests, make sure to update parsl/tests/configs/user_opts.py # If you are a user copying-and-pasting this as an example, make sure to either # 1) create a local `user_opts.py`, or # 2) delete the user_opts import below and replace all appearances of `user_opts` with the literal value # (i.e., user_opts['swan']['username'] -> 'your_username') from .user_opts import user_opts config = Config(executors=[ ThreadPoolExecutor(label='local_threads_globus', storage_access=[ GlobusScheme( endpoint_uuid=user_opts['globus']['endpoint'], endpoint_path=user_opts['globus']['path']) ], working_dir=user_opts['globus']['path']) ], run_dir=get_rundir())
from parsl.config import Config from parsl.providers import SlurmProvider from parsl.executors import HighThroughputExecutor from parsl.addresses import address_by_hostname from parsl.data_provider.scheme import GlobusScheme config = Config( executors=[ HighThroughputExecutor( label='Stampede2_HTEX', address=address_by_hostname(), provider=SlurmProvider( nodes_per_block=2, init_blocks=1, min_blocks=1, partition='YOUR_PARTITION', # string to prepend to #SBATCH blocks in the submit # script to the scheduler eg: '#SBATCH --constraint=knl,quad,cache' scheduler_options='', # Command to be run before starting a worker, such as: # 'module load Anaconda; source activate parsl_env'. worker_init='', walltime='00:30:00'), storage_access=[ GlobusScheme( endpoint_uuid='ceea5ca0-89a9-11e7-a97f-22000a92523b', endpoint_path='/', local_path='/') ]) ], )
from parsl.config import Config from parsl.data_provider.scheme import GlobusScheme from parsl.executors.threads import ThreadPoolExecutor # This is an example config, make sure to # replace the specific values below with the literal values # (e.g., 'USERNAME' -> 'your_username') config = Config( executors=[ ThreadPoolExecutor( label='local_threads_globus', storage_access=[ GlobusScheme( endpoint_uuid='UUID', # Please replace UUID with your uuid endpoint_path='PATH' # Please replace PATH with your path ) ], working_dir='PATH' # Please replace PATH with your path ) ], )