Esempio n. 1
0
def load_ir2_dc_config():
    """
    Load the parsl config for ad-hoc providers.
    """
    try:
        parsl.DataFlowKernelLoader.dfk()
        print("parsl config is already loaded.")
        return
    except RuntimeError:
        pass

    executors = []

    for host in WORKER_NODE_ADDRESSES:
        channel = SSHChannel(hostname=host, script_dir=script_dir(host))
        provider = LocalProvider(channel=channel,
                                 init_blocks=1,
                                 worker_init='source %s' % SETUP_SCRIPT)
        executors.append(
            HighThroughputExecutor(label=host,
                                   address=MOTHER_NODE_ADDRESS,
                                   worker_debug=False,
                                   provider=provider,
                                   heartbeat_period=2,
                                   heartbeat_threshold=10))

    config = Config(executors=executors, strategy=None, retries=3)

    parsl.load(config)
Esempio n. 2
0
def test_ssh_channel():
    with tempfile.TemporaryDirectory() as config_dir:
        sshd_thread, priv_key, server_port = _start_sshd(config_dir)
        try:
            with tempfile.TemporaryDirectory() as remote_script_dir:
                # The SSH library fails to add the new host key to the file if the file does not
                # already exist, so create it here.
                pathlib.Path(
                    '{}/known.hosts'.format(config_dir)).touch(mode=0o600)
                script_dir = tempfile.mkdtemp()
                p = LocalProvider(channel=SSHChannel(
                    '127.0.0.1',
                    port=server_port,
                    script_dir=remote_script_dir,
                    host_keys_filename='{}/known.hosts'.format(config_dir),
                    key_filename=priv_key),
                                  launcher=SingleNodeLauncher(debug=False))
                p.script_dir = script_dir
                _run_tests(p)
        finally:
            _stop_sshd(sshd_thread)
Esempio n. 3
0
from parsl.executors.ipp_controller import Controller

# This is an example config, make sure to
#        replace the specific values below with the literal values
#          (e.g., 'USERNAME' -> 'your_username')

config = Config(
    executors=[
        IPyParallelExecutor(
            label='swan_ipp',
            workers_per_node=2,
            provider=TorqueProvider(
                channel=SSHChannel(
                    hostname='swan.cray.com',
                    username=
                    '******',  # Please replace USERNAME with your username
                    script_dir=
                    '/home/users/USERNAME/parsl_scripts',  # Please replace USERNAME with your username
                ),
                nodes_per_block=2,
                init_blocks=1,
                max_blocks=1,
                launcher=AprunLauncher(),
                scheduler_options='',  # Input your scheduler_options if needed
                worker_init='',  # Input your worker_init if needed
            ),
            controller=Controller(
                public_ip='PUBLIC_IP'
            ),  # Please replace PUBLIC_IP with your public ip
        )
    ], )
Esempio n. 4
0
from parsl.tests.utils import get_rundir

# If you are a developer running tests, make sure to update parsl/tests/configs/user_opts.py
# If you are a user copying-and-pasting this as an example, make sure to either
#       1) create a local `user_opts.py`, or
#       2) delete the user_opts import below and replace all appearances of `user_opts` with the literal value
#          (i.e., user_opts['swan']['username'] -> 'your_username')
from .user_opts import user_opts

config = Config(
    executors=[
        IPyParallelExecutor(
            label='cori_ipp_single_node',
            workers_per_node=1,
            provider=SlurmProvider(
                'debug',
                channel=SSHChannel(hostname='cori.nersc.gov',
                                   username=user_opts['cori']['username'],
                                   script_dir=user_opts['cori']['script_dir']),
                nodes_per_block=1,
                init_blocks=1,
                max_blocks=1,
                scheduler_options=user_opts['cori']['scheduler_options'],
                worker_init=user_opts['cori']['worker_init'],
            ),
            controller=Controller(public_ip=user_opts['public_ip']),
        )
    ],
    run_dir=get_rundir(),
)
Esempio n. 5
0
from parsl.providers import SlurmProvider
from parsl.channels import SSHChannel
from parsl.launchers import SrunLauncher

# outside running on a Jupyter Notebook, submit via ssh channel
config = Config(
    executors=[
        IPyParallelExecutor(
            ...
            provider=SlurmProvider(
                'debug',
                channel=SSHChannel(
                    hostname='login.sdumont.lncc.br',
                    username='******',     
                    ...
Esempio n. 6
0
 address='js-17-185.jetstream-cloud.org',
 max_workers=864,
 #workers_per_node = 24, # Getting error for unexpexted argument
 cores_per_worker=1,
 worker_logdir_root='/home/aymen/parsl_scripts',
 interchange_address='comet-ln2.sdsc.edu',
 interchange_port_range=(50100, 50400),
 #client_ports=(50055, 50056, 50057), # Getting error for unexpexted argument
 worker_port_range=(50500, 51000),
 provider=SlurmProvider(
     launcher=SrunLauncher(),
     #'compute',
     channel=SSHChannel(
         hostname='comet-ln2.sdsc.edu',
         username=
         '******',  # Please replace USERNAME with your username
         password='******',
         script_dir=
         '/home/aymen/parsl_scripts',  # Please replace USERNAME with your username
     ),
     # launcher=SrunLauncher(),
     scheduler_options='',  # Input your scheduler_options if needed
     worker_init=
     'source /home/aymen/ve/parsl-env/bin/activate',  # Input your worker_init if needed
     partition="compute",
     walltime="00:30:00",
     init_blocks=1,
     max_blocks=1,
     #tasks_per_node = 24, # Getting error for unexpexted argument
     nodes_per_block=36,
     #cores_per_node=24, # Getting error for unexpexted argument
     parallelism=864,
Esempio n. 7
0
from parsl.config import Config
from parsl.executors import HighThroughputExecutor

# If you are a developer running tests, make sure to update parsl/tests/configs/user_opts.py
# If you are a user copying-and-pasting this as an example, make sure to either
#       1) create a local `user_opts.py`, or
#       2) delete the user_opts import below and replace all appearances of `user_opts` with the literal value
#          (i.e., user_opts['swan']['username'] -> 'your_username')
from .user_opts import user_opts

config = Config(
    executors=[
        HighThroughputExecutor(
            label='swan_htex',
            provider=TorqueProvider(
                channel=SSHChannel(
                    hostname='swan.cray.com',
                    username=user_opts['swan']['username'],
                    script_dir=user_opts['swan']['script_dir'],
                ),
                nodes_per_block=1,
                init_blocks=1,
                max_blocks=1,
                launcher=AprunLauncher(),
                scheduler_options=user_opts['swan']['scheduler_options'],
                worker_init=user_opts['swan']['worker_init'],
            ),
        )
    ]
)
Esempio n. 8
0
from parsl.executors import HighThroughputExecutor
from parsl.config import Config

user_opts = {'adhoc':
             {'username': '******',
              'script_dir': 'YOUR_SCRIPT_DIR',
              'remote_hostnames': ['REMOTE_HOST_URL_1', 'REMOTE_HOST_URL_2']
             }
}

config = Config(
    executors=[
        HighThroughputExecutor(
            label='remote_htex',
            max_workers=2,
            worker_logdir_root=user_opts['adhoc']['script_dir'],
            provider=AdHocProvider(
                # Command to be run before starting a worker, such as:
                # 'module load Anaconda; source activate parsl_env'.
                worker_init='',
                channels=[SSHChannel(hostname=m,
                                     username=user_opts['adhoc']['username'],
                                     script_dir=user_opts['adhoc']['script_dir'],
                ) for m in user_opts['adhoc']['remote_hostnames']]
            )
        )
    ],
    #  AdHoc Clusters should not be setup with scaling strategy.
    strategy=None,
)
from parsl.executors import HighThroughputExecutor

import os

remote_config = Config(executors=[
    HighThroughputExecutor(
        label='remote_htex',
        cores_per_worker=2,
        address=address_by_hostname(),
        provider=LocalProvider(
            min_blocks=1,
            init_blocks=1,
            max_blocks=4,
            nodes_per_block=1,
            parallelism=0.5,
            channel=SSHChannel(hostname="localhost"),
            #worker_init='source /phys/groups/tev/scratch3/users/gwatts/anaconda3/etc/profile.d/conda.sh && conda activate parsl',
            worker_init=
            'source /home/gwatts/anaconda3/etc/profile.d/conda.sh && export PYTHONPATH=$PYTHONPATH:{} && conda activate parsl_test'
            .format(os.getcwd()),
            move_files=False,
        ))
])
parsl.load(remote_config)

# Run this and print out the result
if os.path.isfile("all_hellos.txt"):
    os.unlink("all_hellos.txt")
r = run_cat_test()
with open(r.outputs[0].result(), 'r') as f:
    print(f.read())
Esempio n. 10
0
from parsl.executors.ipp_controller import Controller

# This is an example config, make sure to
#        replace the specific values below with the literal values
#          (e.g., 'USERNAME' -> 'your_username')

config = Config(
    executors=[
        IPyParallelExecutor(
            label='comet_ipp_multinode',
            provider=SlurmProvider(
                'debug',
                channel=SSHChannel(
                    hostname='comet.sdsc.xsede.org',
                    username=
                    '******',  # Please replace USERNAME with your username
                    script_dir=
                    '/home/USERNAME/parsl_scripts',  # Please replace USERNAME with your username
                ),
                launcher=SrunLauncher(),
                scheduler_options='',  # Input your scheduler_options if needed
                worker_init='',  # Input your worker_init if needed
                walltime="00:10:00",
                init_blocks=1,
                max_blocks=1,
                nodes_per_block=2,
            ),
            controller=Controller(
                public_ip='PUBLIC_IP'
            ),  # Please replace PUBLIC_IP with your public ip
        )
Esempio n. 11
0
def multisite_nwchem_config() -> Config:
    """Experimental multi-site configuration"""
    return Config(
        retries=1,
        executors=[
            HighThroughputExecutor(
                address=address_by_hostname(),
                label="qc",
                max_workers=8,  # One task per node
                provider=CobaltProvider(
                    cmd_timeout=120,
                    nodes_per_block=8,
                    account='CSC249ADCD08',
                    queue='debug-cache-quad',
                    walltime="1:00:00",
                    init_blocks=1,
                    max_blocks=1,
                    launcher=SimpleLauncher(),  # Places worker on the launch node
                    scheduler_options='#COBALT --attrs enable_ssh=1',
                    worker_init='''
module load miniconda-3
export PATH=~/software/psi4/bin:$PATH
conda activate /lus/theta-fs0/projects/CSC249ADCD08/edw/env

# NWChem settings
export PATH="/home/lward/software/nwchem-6.8.1/bin/LINUX64:$PATH"
module load atp
export MPICH_GNI_MAX_EAGER_MSG_SIZE=16384
export MPICH_GNI_MAX_VSHORT_MSG_SIZE=10000
export MPICH_GNI_MAX_EAGER_MSG_SIZE=131072
export MPICH_GNI_NUM_BUFS=300
export MPICH_GNI_NDREG_MAXSIZE=16777216
export MPICH_GNI_MBOX_PLACEMENT=nic
export MPICH_GNI_LMT_PATH=disabled
export COMEX_MAX_NB_OUTSTANDING=6
export LD_LIBRARY_PATH=/opt/intel/compilers_and_libraries_2018.0.128/linux/compiler/lib/intel64_lin:$LD_LIBRARY_PATH
''',
                ),
            ),
            HighThroughputExecutor(
                address='localhost',  # Using an SSH tunnel
                worker_ports=(54382, 54008),
                label="ml",
                max_workers=1,
                working_dir='/homes/lward/parsl',
                worker_logdir_root='/homes/lward/parsl',
                provider=LocalProvider(
                    channel=SSHChannel('lambda5.cels.anl.gov', script_dir='/home/lward/parsl'),
                    nodes_per_block=1,
                    init_blocks=1,
                    max_blocks=1,
                    launcher=SimpleLauncher(),
                    worker_init='''
source /homes/lward/miniconda3/etc/profile.d/conda.sh
conda activate colmena_full
export CUDA_VISIBLE_DEVICES=17  # Pins to a GPU worker
''',
                ),
            )
        ],
        strategy=None,
    )
Esempio n. 12
0
from parsl.config import Config
from parsl.executors.ipp import IPyParallelExecutor

# This is an example config, make sure to
#        replace the specific values below with the literal values
#          (e.g., 'USERNAME' -> 'your_username')

config = Config(
    executors=[
        IPyParallelExecutor(
            label='beagle_multinode_mpi',
            provider=TorqueProvider(
                'debug',
                channel=SSHChannel(
                    hostname='login4.beagle.ci.uchicago.edu',
                    username=
                    '******',  # Please replace USERNAME with your username
                    script_dir=
                    '/lustre/beagle2/USERNAME/parsl_scripts',  # Please replace USERNAME with your username
                ),
                nodes_per_block=1,
                tasks_per_node=1,
                init_blocks=1,
                max_blocks=1,
                launcher=AprunLauncher(),
                scheduler_options='',  # Input your scheduler_options if needed
                worker_init='',  # Input your worker_init if needed
            ))
    ], )
Esempio n. 13
0
from parsl.config import Config
from parsl.executors import HighThroughputExecutor, IPyParallelExecutor
from parsl.executors.ipp_controller import Controller
from parsl.addresses import address_by_hostname  #add this to your imports

mcullaghcluster = Config(
    executors=[
        HighThroughputExecutor(
            label='mccullagh_cluster',
            cores_per_worker=20,
            address='192.168.175.16',
            provider=SlurmProvider(
                channel=SSHChannel(
                    hostname='login1.rc.colostate.edu',
                    username=
                    '******',  # Please replace USERNAME with your username
                    script_dir=
                    '/home/kavotaw/lustrefs/z15-adfr/adfr_parsl_efficiency_test/',
                    password=''  # Password goes here (or use keys)
                ),
                launcher=SrunLauncher(),
                scheduler_options="""#SBATCH --ntasks-per-node=20
#SBATCH --output=/mnt/lustre_fs/users/kavotaw/z15-adfr/adfr_parsl_efficiency_test/test.out
#SBATCH --error=/mnt/lustre_fs/users/kavotaw/z15-adfr/adfr_parsl_efficiency_test/test.err""",
                worker_init="""source activate parsl_py36
export PYTHONPATH="/mnt/lustre_fs/users/kavotaw/z15-adfr/adfr_parsl_efficiency_test/"
export LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/mnt/lustre_fs/users/kavotaw/adfr-vs-vina/install_mgl/mgltools2_x86_64Linux2_1.1/lib/"
export MGL_ROOT="/mnt/lustre_fs/users/kavotaw/adfr-vs-vina/install_mgl/mgltools2_x86_64Linux2_1.1/"
PATH="$MGL_ROOT/bin:$PATH"
pwd
cd /mnt/lustre_fs/users/kavotaw/z15-adfr/adfr_parsl_efficiency_test/""",
                walltime="24:00:00",
Esempio n. 14
0
from parsl.tests.utils import get_rundir

# If you are a developer running tests, make sure to update parsl/tests/configs/user_opts.py
# If you are a user copying-and-pasting this as an example, make sure to either
#       1) create a local `user_opts.py`, or
#       2) delete the user_opts import below and replace all appearances of `user_opts` with the literal value
#          (i.e., user_opts['swan']['username'] -> 'your_username')
from .user_opts import user_opts

config = Config(executors=[
    IPyParallelExecutor(
        label='comet_ipp_multinode',
        workers_per_node=1,
        provider=SlurmProvider(
            'debug',
            channel=SSHChannel(hostname='comet.sdsc.xsede.org',
                               username=user_opts['comet']['username'],
                               script_dir=user_opts['comet']['script_dir']),
            launcher=SrunLauncher(),
            scheduler_options=user_opts['comet']['scheduler_options'],
            worker_init=user_opts['comet']['worker_init'],
            walltime="00:10:00",
            init_blocks=1,
            max_blocks=1,
            nodes_per_block=2,
        ),
        controller=Controller(public_ip=user_opts['public_ip']),
    )
],
                run_dir=get_rundir())
Esempio n. 15
0
from parsl.channels import SSHChannel
from parsl.launchers import SrunLauncher
from parsl.providers import SlurmProvider
from parsl.executors import HighThroughputExecutor
from parsl.addresses import address_by_query

config = Config(executors=[
    HighThroughputExecutor(
        label='Comet_HTEX_multinode',
        address=address_by_query(),
        worker_logdir_root='YOUR_LOGDIR_ON_COMET',
        provider=SlurmProvider(
            'debug',
            channel=SSHChannel(
                hostname='comet.sdsc.xsede.org',
                username='******',
                script_dir='YOUR_SCRIPT_DIR',
            ),
            launcher=SrunLauncher(),
            # string to prepend to #SBATCH blocks in the submit
            # script to the scheduler
            scheduler_options='',
            # Command to be run before starting a worker, such as:
            # 'module load Anaconda; source activate parsl_env'.
            worker_init='',
            walltime='00:10:00',
            init_blocks=1,
            max_blocks=1,
            nodes_per_block=2,
        ),
    )
Esempio n. 16
0
 node_count = int(node_counts[step])
 print("Requesting {} nodes for step \"{}\"".format(node_count, step))
 options = base_options
 options += f"#SBATCH -c {cores_per_node[step]}\n"
 if args.parsl_path is not None:
     options += "PATH=\"{}:$PATH\"\nexport PATH\n".format(args.parsl_path)
 if step in gpu_steps:
     options += str(args.gpu_options) + '\n'
 if args.local_host_only:
     channel = LocalChannel()
 else:
     if hostnames[step] is None:
         raise Exception('To run step {} on a remote host, please set the argument --{}_hostname'.format(step))
     channel = SSHChannel(
             hostname=hostnames[step],
             username=args.unix_username,
             gssapi_auth=args.gssapi,
             )
 if args.scheduler_name == 'slurm':
     executors.append(HighThroughputExecutor(
                 label=step,
                 worker_debug=True,
                 address=address_by_hostname(),
                 cores_per_worker=vCPUs_per_core*int(cores_per_task[step]),
                 provider=SlurmProvider(
                     args.scheduler_partition,
                     channel=channel,
                     launcher=SrunLauncher(),
                     nodes_per_block=node_count,
                     worker_init=worker_init,
                     init_blocks=1,
Esempio n. 17
0
from parsl.config import Config
from parsl.executors.ipp import IPyParallelExecutor
from parsl.executors.ipp_controller import Controller

# This is an example config, make sure to
#        replace the specific values below with the literal values
#          (e.g., 'USERNAME' -> 'your_username')

config = Config(
    executors=[
        IPyParallelExecutor(
            label='cori_ipp_multinode',
            provider=SlurmProvider(
                'debug',
                channel=SSHChannel(
                    hostname='cori.nersc.gov',
                    username='******',     # Please replace USERNAME with your username
                    script_dir='/global/homes/y/USERNAME/parsl_scripts',    # Please replace USERNAME with your username
                ),
                nodes_per_block=2,
                init_blocks=1,
                max_blocks=1,
                scheduler_options='',     # Input your scheduler_options if needed
                worker_init='',     # Input your worker_init if needed
                launcher=SrunLauncher(),
            ),
            controller=Controller(public_ip='PUBLIC_IP'),    # Please replace PUBLIC_IP with your public ip
        )
    ],
)
Esempio n. 18
0
from parsl.executors.ipp import IPyParallelExecutor
from parsl.tests.utils import get_rundir

# If you are a developer running tests, make sure to update parsl/tests/configs/user_opts.py
# If you are a user copying-and-pasting this as an example, make sure to either
#       1) create a local `user_opts.py`, or
#       2) delete the user_opts import below and replace all appearances of `user_opts` with the literal value
#          (i.e., user_opts['swan']['username'] -> 'your_username')
from .user_opts import user_opts

config = Config(executors=[
    IPyParallelExecutor(
        label='beagle_multinode_mpi',
        workers_per_node=1,
        provider=TorqueProvider(
            'debug',
            channel=SSHChannel(
                hostname='login4.beagle.ci.uchicago.edu',
                username=user_opts['beagle']['username'],
                script_dir="/lustre/beagle2/{}/parsl_scripts".format(
                    user_opts['beagle']['username'])),
            nodes_per_block=1,
            init_blocks=1,
            max_blocks=1,
            launcher=AprunLauncher(),
            scheduler_options=user_opts['beagle']['scheduler_options'],
            worker_init=user_opts['beagle']['worker_init'],
        ))
],
                run_dir=get_rundir())
Esempio n. 19
0
from parsl.executors import HighThroughputExecutor

remotes = [
    'midway2-login2.rcc.uchicago.edu', 'midway2-login1.rcc.uchicago.edu'
]

config = Config(executors=[
    HighThroughputExecutor(
        label='AdHoc',
        max_workers=2,
        worker_logdir_root="/scratch/midway2/yadunand/parsl_scripts",
        provider=AdHocProvider(
            worker_init="source /scratch/midway2/yadunand/parsl_env_setup.sh",
            channels=[
                SSHChannel(
                    hostname=m,
                    username="******",
                    script_dir="/scratch/midway2/yadunand/parsl_cluster")
                for m in remotes
            ]))
])


@python_app
def platform(sleep=2, stdout=None):
    import platform
    import time
    time.sleep(sleep)
    return platform.uname()


def test_raw_provider():
Esempio n. 20
0
from rynner.rynner import Rynner


username = input("Enter your username: "******"Enter the domain: ")

connect_ = input(f'Connect as {username} at {domain}? [Yy/Yes/No/Nn]:')
if connect_.lower() not in ('y', 'yes', ''):
    print('No connection established, exiting.')
    exit(1)
else:
    provider = SlurmProvider(
        'compute',
        channel=SSHChannel(
            hostname=domain,
            username=username,
            script_dir='/tmp'
        ),
        nodes_per_block=1,
        # tasks_per_node=1,  # fixme doesn't exist anymore?
        init_blocks=1,
        max_blocks=1
    )

    rynner = Rynner(provider)

    run = rynner.create_run(
        script='cat Makefile > tmps', uploads=['Makefile'], downloads=['tmps'])

    rynner.upload(run)
    print('upload')
Esempio n. 21
0
from parsl.config import Config
from parsl.executors.ipp import IPyParallelExecutor
from parsl.executors.ipp_controller import Controller

# This is an example config, make sure to
#        replace the specific values below with the literal values
#          (e.g., 'USERNAME' -> 'your_username')

config = Config(executors=[
    IPyParallelExecutor(
        provider=SlurmProvider(
            'westmere',
            channel=SSHChannel(
                hostname='swift.rcc.uchicago.edu',
                username=
                '******',  # Please replace USERNAME with your username
                script_dir=
                '/scratch/midway2/USERNAME/parsl_scripts',  # Please replace USERNAME with your username
            ),
            init_blocks=1,
            min_blocks=1,
            max_blocks=2,
            nodes_per_block=1,
            tasks_per_node=4,
            parallelism=0.5,
            scheduler_options='',  # Input your scheduler_options if needed
            worker_init='',  # Input your worker_init if needed
        ),
        label='midway_ipp',
        controller=Controller(
            public_ip='PUBLIC_IP'
Esempio n. 22
0
# If you are a user copying-and-pasting this as an example, make sure to either
#       1) create a local `user_opts.py`, or
#       2) delete the user_opts import below and replace all appearances of `user_opts` with the literal value
#          (i.e., user_opts['swan']['username'] -> 'your_username')
from .user_opts import user_opts

config = Config(
    executors=[
        IPyParallelExecutor(
            label='midway_ipp_multicore',
            workers_per_node=4,
            provider=SlurmProvider(
                'westmere',
                channel=SSHChannel(
                    hostname='swift.rcc.uchicago.edu',
                    username=user_opts['midway']['username'],
                    script_dir=user_opts['midway']['script_dir']
                ),
                scheduler_options=user_opts['midway']['scheduler_options'],
                worker_init=user_opts['midway']['worker_init'],
                nodes_per_block=1,
                walltime="00:05:00",
                init_blocks=1,
                max_blocks=1,
                launcher=SingleNodeLauncher(),
            ),
            controller=Controller(public_ip=user_opts['public_ip']),
        )

    ],
    run_dir=get_rundir()
Esempio n. 23
0
from parsl.providers import LocalProvider
from parsl.channels import SSHChannel
from parsl.executors import HighThroughputExecutor

from parsl.config import Config

username = "******"
remotes = [
    'midway2-login1.rcc.uchicago.edu', 'midway2-login2.rcc.uchicago.edu'
]

config = Config(executors=[
    HighThroughputExecutor(
        label='remote_htex_{}'.format(m),
        cores_per_worker=4,
        worker_debug=False,
        address="128.135.112.73",
        provider=LocalProvider(
            init_blocks=1,
            nodes_per_block=1,
            parallelism=0.5,
            worker_init="source /scratch/midway2/yadunand/parsl_env_setup.sh",
            channel=SSHChannel(
                hostname=m,
                username=username,
                script_dir="/scratch/midway2/{}/parsl_tests/".format(
                    username)))) for m in remotes
], )
Esempio n. 24
0
from parsl.config import Config
from parsl.providers import LocalProvider
from parsl.channels import SSHChannel
from parsl.addresses import address_by_hostname
from parsl.executors import HighThroughputExecutor

hostnames = ['host-1', 'host-2']

config = Config(
    executors=[
        HighThroughputExecutor(
            label='htex_{}'.format(h),
            worker_debug=False,
            address=address_by_hostname(),
            provider=LocalProvider(
                # The username on the machines depend on the distribution
                # used, for eg. on Ubuntu, username is 'ubuntu'
                channel=SSHChannel(hostname=h, username='******'),
                move_files=False,  # set to True if there is no shared filesystem
                nodes_per_block=1,
                init_blocks=1,
                min_blocks=1,
                max_blocks=1,
                # Command to be run before starting a worker, such as:
                # 'module load Anaconda; source activate parsl_env'.
                worker_init='',
            ),
        ) for h in hostnames
    ],
    strategy=None)