def __init__(self, partition, label='slurm', channel=LocalChannel(), script_dir='parsl_scripts', nodes_per_block=1, tasks_per_node=1, init_blocks=1, min_blocks=0, max_blocks=10, parallelism=1, walltime="00:10:00", overrides='', cmd_timeout=10, launcher=SingleNodeLauncher()): super().__init__(label, channel, script_dir, nodes_per_block, tasks_per_node, init_blocks, min_blocks, max_blocks, parallelism, walltime, cmd_timeout=cmd_timeout, launcher=launcher) self.partition = partition self.overrides = overrides
def __init__(self, channel=LocalChannel(), label='grid_engine', script_dir='parsl_scripts', nodes_per_block=1, tasks_per_node=1, init_blocks=1, min_blocks=0, max_blocks=10, parallelism=1, walltime="00:10:00", overrides='', launcher=SingleNodeLauncher()): super().__init__(label, channel, script_dir, nodes_per_block, tasks_per_node, init_blocks, min_blocks, max_blocks, parallelism, walltime, launcher) self.overrides = overrides if launcher in ['srun', 'srun_mpi']: logger.warning("Use of {} launcher is usually appropriate for Slurm providers. " "Recommended options include 'single_node' or 'aprun'.".format(launcher))
def __init__(self, channel=LocalChannel(), label='local', script_dir='parsl_scripts', tasks_per_node=1, nodes_per_block=1, launcher=SingleNodeLauncher(), init_blocks=4, min_blocks=0, max_blocks=10, walltime="00:15:00", parallelism=1): self.channel = channel self.label = label if not os.path.exists(script_dir): os.makedirs(script_dir) self.script_dir = script_dir self.provisioned_blocks = 0 self.nodes_per_block = nodes_per_block self.tasks_per_node = tasks_per_node self.launcher = launcher self.init_blocks = init_blocks self.min_blocks = min_blocks self.max_blocks = max_blocks self.parallelism = parallelism self.walltime = walltime # Dictionary that keeps track of jobs, keyed on job_id self.resources = {}
def __init__(self, channel=None, label='condor', nodes_per_block=1, tasks_per_node=1, init_blocks=1, min_blocks=0, max_blocks=10, parallelism=1, environment=None, script_dir='parsl_scripts', project='', overrides='', walltime="00:10:00", worker_setup='', launcher=SingleNodeLauncher(), requirements=''): super(CondorProvider, self).__init__(label, channel, script_dir, nodes_per_block, tasks_per_node, init_blocks, min_blocks, max_blocks, parallelism, walltime, launcher) self.provisioned_blocks = 0 self.environment = environment if environment is not None else {} for key, value in self.environment.items(): # To escape literal quote marks, double them # See: http://research.cs.wisc.edu/htcondor/manual/v8.6/condor_submit.html try: self.environment[key] = "'{}'".format( value.replace("'", '"').replace('"', '""')) except AttributeError: pass self.project = project self.overrides = overrides self.worker_setup = worker_setup self.requirements = requirements
# Untested from libsubmit.providers import CobaltProvider from libsubmit.launchers import SingleNodeLauncher from parsl.config import Config from parsl.executors.ipp import IPyParallelExecutor from parsl.executors.ipp_controller import Controller from parsl.tests.user_opts import user_opts from parsl.tests.utils import get_rundir config = Config( executors=[ IPyParallelExecutor(label='cooley_local_single_node', provider=CobaltProvider( launcher=SingleNodeLauncher(), nodes_per_block=1, tasks_per_node=1, init_blocks=1, max_blocks=1, walltime="00:05:00", overrides=user_opts['cooley']['overrides'], queue='debug', account=user_opts['cooley']['account']), controller=Controller(public_ip="10.230.100.210")) ], run_dir=get_rundir(), )
def __init__(self, image_id, label='ec2', init_blocks=1, min_blocks=0, max_blocks=10, tasks_per_node=1, nodes_per_block=1, parallelism=1, overrides='', instance_type='t2.small', region='us-east-2', spot_max_bid=0, key_name=None, key_file=None, profile=None, iam_instance_profile_arn='', state_file=None, walltime="01:00:00", linger=False, launcher=SingleNodeLauncher()): if not _boto_enabled: raise OptionalModuleMissing( ['boto3'], "AWS Provider requires the boto3 module.") self.image_id = image_id self.label = label self.init_blocks = init_blocks self.min_blocks = min_blocks self.max_blocks = max_blocks self.tasks_per_node = tasks_per_node self.nodes_per_block = nodes_per_block self.max_nodes = max_blocks * nodes_per_block self.parallelism = parallelism self.overrides = overrides self.instance_type = instance_type self.region = region self.spot_max_bid = spot_max_bid self.key_name = key_name self.key_file = key_file self.profile = profile self.iam_instance_profile_arn = iam_instance_profile_arn self.walltime = walltime self.launcher = launcher self.linger = linger self.resources = {} env_specified = os.getenv( "AWS_ACCESS_KEY_ID") is not None and os.getenv( "AWS_SECRET_ACCESS_KEY") is not None if profile is None and key_file is None and not env_specified: raise ConfigurationError( "Must specify either profile', 'key_file', or " "'AWS_ACCESS_KEY_ID' and 'AWS_SECRET_ACCESS_KEY' environment variables." ) try: self.initialize_boto_client() except Exception as e: logger.error("{} failed to initialize.".format(self)) raise e state_file_exists = False try: self.state_file = state_file if state_file is not None else '.ec2_{}.json'.format( label) self.read_state_file(self.state_file) state_file_exists = True except Exception: logger.info( "No state file found. Cannot load previous options. Creating new infrastructure." ) if not state_file_exists: try: self.create_vpc().id except Exception as e: logger.info( "Failed to create ec2 infrastructure: {0}".format(e)) raise else: self.write_state_file()