def __init__(self, provider=LocalProvider(), label='ipp', working_dir=None, controller=Controller(), container_image=None, engine_dir=None, storage_access=None, engine_debug_level=None, workers_per_node=1, managed=True): self.provider = provider self.label = label self.working_dir = working_dir self.controller = controller self.engine_debug_level = engine_debug_level self.container_image = container_image self.engine_dir = engine_dir self.workers_per_node = workers_per_node self.storage_access = storage_access if storage_access is not None else [] if len(self.storage_access) > 1: raise ConfigurationError( 'Multiple storage access schemes are not yet supported') self.managed = managed self.debug_option = "" if self.engine_debug_level: self.debug_option = "--log-level={}".format( self.engine_debug_level)
def __init__(self, provider=LocalProvider(), label='ipp', working_dir=None, controller=Controller(), container_image=None, engine_dir=None, storage_access=None, engine_debug_level=None, workers_per_node=1, managed=True): self.provider = provider self.label = label self.working_dir = working_dir self.controller = controller self.engine_debug_level = engine_debug_level self.container_image = container_image self.engine_dir = engine_dir self.workers_per_node = workers_per_node self.storage_access = storage_access self.managed = managed self.debug_option = "" if self.engine_debug_level: self.debug_option = "--log-level={}".format( self.engine_debug_level)
def __init__( self, provider=Local(), label='ipp', engine_file='~/.ipython/profile_default/security/ipcontroller-engine.json', engine_dir='.', working_dir=None, controller=Controller(), container_image=None, storage_access=None, managed=True): self.provider = provider self.label = label self.engine_file = engine_file self.engine_dir = engine_dir self.working_dir = working_dir self.controller = controller self.container_image = container_image self.storage_access = storage_access if storage_access is not None else [] if len(self.storage_access) > 1: raise ConfigurationError( 'Multiple storage access schemes are not yet supported') self.managed = managed
# If you are a developer running tests, make sure to update parsl/tests/configs/user_opts.py # If you are a user copying-and-pasting this as an example, make sure to either # 1) create a local `user_opts.py`, or # 2) delete the user_opts import below and replace all appearances of `user_opts` with the literal value # (i.e., user_opts['swan']['username'] -> 'your_username') from .user_opts import user_opts config = Config( executors=[ IPyParallelExecutor( label='ec2_spot', provider=AWSProvider( user_opts['ec2']['image_id'], region=user_opts['ec2']['region'], key_name=user_opts['ec2']['key_name'], profile="default", state_file='awsproviderstate.json', spot_max_bid='1.0', nodes_per_block=1, tasks_per_node=1, init_blocks=1, max_blocks=1, min_blocks=0, walltime='00:25:00', ), controller=Controller(public_ip=user_opts['public_ip']), ) ], run_dir=get_rundir(), )
# This is an example config, make sure to # replace the specific values below with the literal values # (e.g., 'USERNAME' -> 'your_username') config = Config(executors=[ IPyParallelExecutor( provider=SlurmProvider( 'westmere', channel=SSHChannel( hostname='swift.rcc.uchicago.edu', username= '******', # Please replace USERNAME with your username script_dir= '/scratch/midway2/USERNAME/parsl_scripts', # Please replace USERNAME with your username ), init_blocks=1, min_blocks=1, max_blocks=2, nodes_per_block=1, tasks_per_node=4, parallelism=0.5, scheduler_options='', # Input your scheduler_options if needed worker_init='', # Input your worker_init if needed ), label='midway_ipp', controller=Controller( public_ip='PUBLIC_IP' ), # Please replace PUBLIC_IP with your public ip ) ])
provider=Condor( channel=SSHChannel( hostname='spce01.sdcc.bnl.gov', username= '******', # Please replace USERNAME with your username script_dir= '/usatlas/u/dcde1000001/parsl_scripts', # Please replace USERNAME with your username ), nodes_per_block=1, init_blocks=4, max_blocks=4, scheduler_options='accounting_group = group_sdcc.main', worker_init='', # Input your worker_init if needed ), controller=Controller( public_ip='130.199.185.10' ), # Please replace PUBLIC_IP with your public ip ) ], ) parsl.clear() parsl.load(config) @python_app() def hello(): return 'Hello World' print(parsl.__version__) print(hello().result())
# Untested from libsubmit.providers import CobaltProvider from libsubmit.launchers import SingleNodeLauncher from parsl.config import Config from parsl.executors.ipp import IPyParallelExecutor from parsl.executors.ipp_controller import Controller from parsl.tests.user_opts import user_opts from parsl.tests.utils import get_rundir config = Config( executors=[ IPyParallelExecutor(label='cooley_local_single_node', provider=CobaltProvider( launcher=SingleNodeLauncher(), nodes_per_block=1, tasks_per_node=1, init_blocks=1, max_blocks=1, walltime="00:05:00", overrides=user_opts['cooley']['overrides'], queue='debug', account=user_opts['cooley']['account']), controller=Controller(public_ip="10.230.100.210")) ], run_dir=get_rundir(), )
def make(self, rundir, config): ''' Construct the appropriate provider, executors and channels and link them together. ''' self.rundir = rundir sites = {} for site in config.get("sites"): logger.debug("Constructing site : %s ", site.get('site', 'Unnamed_site')) channel_name = site["auth"]["channel"] if channel_name in self.channels: channel_opts = site["auth"].copy() if "channel" in channel_opts: del channel_opts["channel"] channel = self.channels[channel_name](**channel_opts) else: logger.error("Site:{0} requests an invalid channel:{0}".format(site["site"], channel_name)) raise BadConfig(site["site"], "invalid channel:{0} requested".format(channel_name)) logger.debug("Created channel : {0}".format(channel)) provider_name = site["execution"]["provider"] if provider_name in self.execution_providers: provider = self.execution_providers[provider_name](site, channel=channel) else: logger.error("Site:{0} requests an invalid provider:{0}".format(site["site"], provider_name)) raise BadConfig(site["site"], "invalid provider:{0} requested".format(provider_name)) logger.debug("Created execution_provider : {0}".format(provider)) executor_name = site["execution"]["executor"] if executor_name in self.executors: controller = None if executor_name == 'ipp' and config.get("controller", None): logger.debug("Starting controller") # A controller needs to be started per run site["controller"] = copy.copy(config["controller"]) site["controller"]['ipythonDir'] = self.rundir site["controller"]['profile'] = config["controller"].get('profile', site["site"]) controller = Controller(**site["controller"]) logger.debug("Controller engine file : %s", controller.engine_file) logger.debug("Controller client file : %s", controller.client_file) executor = self.executors[executor_name](execution_provider=provider, controller=controller, config=site) else: logger.error("Site:{0} requests an invalid executor:{0}".format(site["site"], executor_name)) raise BadConfig(site["site"], "invalid executor:{0} requested".format(executor_name)) logger.debug("Created executor : {0}".format(executor)) sites[site["site"]] = executor return sites
""" Use the following config with caution. """ from parsl.config import Config from parsl.executors.ipp import IPyParallelExecutor from parsl.executors.ipp_controller import Controller config = Config( executors=[ IPyParallelExecutor(label='local_ipp_reuse', ), ], controller=Controller(reuse=True), )
from parsl.executors.ipp_controller import Controller from libsubmit.channels.local.local import LocalChannel from libsubmit.providers.condor.condor import Condor from parsl.config import Config from parsl.executors.ipp import IPyParallelExecutor from parsl.tests.user_opts import user_opts from parsl.tests.utils import get_rundir config = Config(executors=[ IPyParallelExecutor( label='osg_local_ipp', provider=Condor( channel=LocalChannel(username=user_opts['osg']['username'], script_dir=user_opts['osg']['script_dir']), nodes_per_block=1, tasks_per_node=1, init_blocks=4, max_blocks=4, overrides= 'Requirements = OSGVO_OS_STRING == "RHEL 6" && Arch == "X86_64" && HAS_MODULES == True', worker_setup= 'module load python/3.5.2; python3 -m venv parsl_env; source parsl_env/bin/activate; pip3 install ipyparallel' )) ], controller=Controller(public_ip='192.170.227.195'), run_dir=get_rundir())
from parsl.executors.ipp_controller import Controller from parsl.executors.ipp import IPyParallelExecutor from parsl.config import Config username = "******" remotes = [ 'midway2-login1.rcc.uchicago.edu', 'midway2-login2.rcc.uchicago.edu' ] config = Config( executors=[ IPyParallelExecutor( label='remote_ipp_{}'.format(m), workers_per_node=2, # Replaces provider.tasks_per_node engine_debug_level="DEBUG", controller=Controller(public_ip="128.135.112.73"), provider=LocalProvider( init_blocks=1, nodes_per_block=1, parallelism=0.5, worker_init= "source /scratch/midway2/yadunand/parsl_env_setup.sh", channel=SSHChannel( hostname=m, username=username, script_dir="/scratch/midway2/{}/parsl_tests/".format( username))), ) for m in remotes ], )
from libsubmit.providers.cobalt.cobalt import Cobalt from parsl.config import Config from parsl.executors.ipp import IPyParallelExecutor from parsl.executors.ipp_controller import Controller from parsl.tests.user_opts import user_opts from parsl.tests.utils import get_rundir config = Config( executors=[ IPyParallelExecutor( label='theta_local_ipp_multinode', provider=Cobalt( walltime="00:30:00", nodes_per_block=8, tasks_per_node=1, init_blocks=1, max_blocks=1, launcher='aprun', overrides=user_opts['account']['overrides'], account=user_opts['theta']['account'] ) ) ], run_dir=get_rundir(), controller=Controller(public_ip=user_opts['theta']['public_ip']) )