示例#1
0
文件: midway.py 项目: tuhz/parsl
def fresh_config():
    config = Config(
        executors=[
            HighThroughputExecutor(
                label='Midway_HTEX_multinode',
                worker_debug=False,
                address=address_by_hostname(),
                max_workers=1,
                provider=SlurmProvider(
                    'broadwl',  # Partition name, e.g 'broadwl'
                    launcher=SrunLauncher(),
                    nodes_per_block=2,
                    init_blocks=1,
                    min_blocks=1,
                    max_blocks=1,
                    # string to prepend to #SBATCH blocks in the submit
                    # script to the scheduler eg: '#SBATCH --constraint=knl,quad,cache'
                    scheduler_options='',
                    # Command to be run before starting a worker, such as:
                    # 'module load Anaconda; source activate parsl_env'.
                    worker_init=user_opts['midway']['worker_init'],
                    walltime='00:30:00',
                    cmd_timeout=120,
                ),
            )
        ], )
    return config
示例#2
0
文件: frontera.py 项目: tuhz/parsl
def fresh_config():
    return Config(
        executors=[
            HighThroughputExecutor(
                label="frontera_htex",
                address=address_by_hostname(),
                max_workers=1,
                provider=SlurmProvider(
                    cmd_timeout=
                    60,  # Add extra time for slow scheduler responses
                    channel=LocalChannel(),
                    nodes_per_block=2,
                    init_blocks=1,
                    min_blocks=1,
                    max_blocks=1,
                    partition='development',  # Replace with partition name
                    scheduler_options=user_opts['frontera']
                    ['scheduler_options'],

                    # Command to be run before starting a worker, such as:
                    # 'module load Anaconda; source activate parsl_env'.
                    worker_init=user_opts['frontera']['worker_init'],

                    # Ideally we set the walltime to the longest supported walltime.
                    walltime='00:10:00',
                    launcher=SrunLauncher(),
                ),
            )
        ], )
示例#3
0
def fresh_config():
    return Config(executors=[
        HighThroughputExecutor(
            label='Cori_HTEX_multinode',
            # This is the network interface on the login node to
            # which compute nodes can communicate
            # address=address_by_interface('bond0.144'),
            max_workers=1,
            address=address_by_interface('bond0.144'),
            provider=SlurmProvider(
                'debug',  # Partition / QOS
                nodes_per_block=2,
                init_blocks=1,
                # string to prepend to #SBATCH blocks in the submit
                # script to the scheduler eg: '#SBATCH --constraint=knl,quad,cache'
                scheduler_options=user_opts['cori']['scheduler_options'],

                # Command to be run before starting a worker, such as:
                # 'module load Anaconda; source activate parsl_env'.
                worker_init=user_opts['cori']['worker_init'],

                # We request all hyperthreads on a node.
                launcher=SrunLauncher(overrides='-c 272'),
                walltime='00:10:00',
                # Slurm scheduler on Cori can be slow at times,
                # increase the command timeouts
                cmd_timeout=120,
            ),
        )
    ])
示例#4
0
def slurm_config(
    cores_per_job=16,
    mem_per_core=2048,
    jobs_per_worker=1,
    initial_workers=4,
    max_workers=8,
    work_dir="./",
    grid_proxy_dir="/tmp",
    partition="",
    walltime="02:00:00",
    htex_label="coffea_parsl_slurm_htex",
):

    shutil.copy2(osp.join(grid_proxy_dir, x509_proxy), osp.join(work_dir, x509_proxy))

    wrk_init = """
    export XRD_RUNFORKHANDLER=1
    export X509_USER_PROXY=%s
    """ % (
        osp.join(work_dir, x509_proxy)
    )

    sched_opts = """
    #SBATCH --cpus-per-task=%d
    #SBATCH --mem-per-cpu=%d
    """ % (
        cores_per_job,
        mem_per_core,
    )

    slurm_htex = Config(
        executors=[
            HighThroughputExecutor(
                label=htex_label,
                address=address_by_hostname(),
                prefetch_capacity=0,
                max_workers=cores_per_job,
                provider=SlurmProvider(
                    channel=LocalChannel(),
                    launcher=SrunLauncher(),
                    init_blocks=initial_workers,
                    max_blocks=max_workers,
                    nodes_per_block=jobs_per_worker,
                    partition=partition,
                    scheduler_options=sched_opts,  # Enter scheduler_options if needed
                    worker_init=wrk_init,  # Enter worker_init if needed
                    walltime=walltime,
                ),
            )
        ],
        strategy=None,
    )

    return slurm_htex
示例#5
0
def configure(memory=2048, nprocs=8, nodes=15):
    '''Configure the parsl scheduler (is it the right name?)
    arguments: 
      * memory: amount of memory per node (default: 2GB)
      * nprocs: number of cores per node (default: 16)
      * nodes: number of nodes (default: 20)
    '''
    wrk_init = f'''
    export XRD_RUNFORKHANDLER=1
    export X509_USER_PROXY={os.environ['X509_USER_PROXY']}
    '''

    sched_opts = f'''
    #SBATCH --cpus-per-task={nprocs}
    #SBATCH --mem-per-cpu={memory}
    '''

    slurm_htex = Config(
        executors=[
            HighThroughputExecutor(
                label="coffea_parsl_slurm",
                address=address_by_hostname(),
                prefetch_capacity=0,
                max_workers=nprocs,
                provider=SlurmProvider(
                    channel=LocalChannel(),
                    launcher=SrunLauncher(),
                    init_blocks=nodes,
                    max_blocks=nodes * 2,
                    nodes_per_block=1,
                    partition='all',
                    scheduler_options=
                    sched_opts,  # Enter scheduler_options if needed
                    worker_init=wrk_init,  # Enter worker_init if needed
                    walltime='00:30:00'),
            )
        ],
        #retries=3,
        strategy=None,
    )

    # parsl.set_stream_logger() # <-- log everything to stdout, WAAAAY too much

    return parsl.load(slurm_htex)
示例#6
0
def fresh_config():
    return Config(executors=[
        HighThroughputExecutor(
            label='Comet_HTEX_multinode',
            max_workers=1,
            provider=SlurmProvider(
                'debug',
                launcher=SrunLauncher(),
                # string to prepend to #SBATCH blocks in the submit
                # script to the scheduler
                scheduler_options=user_opts['comet']['scheduler_options'],

                # Command to be run before starting a worker, such as:
                # 'module load Anaconda; source activate parsl_env'.
                worker_init=user_opts['comet']['worker_init'],
                walltime='00:10:00',
                init_blocks=1,
                max_blocks=1,
                nodes_per_block=2,
            ),
        )
    ])
示例#7
0
文件: cori.py 项目: LSSTDESC/ceci
    def configure_for_parsl(self):
        """Utility function to set parsl configuration parameters"""
        from parsl.executors import IPyParallelExecutor
        from parsl.providers import SlurmProvider

        # Get the site details that we need
        cpu_type = self.config.get("cpu_type", "haswell")
        queue = self.config.get("queue", "debug")
        max_slurm_jobs = self.config.get("max_jobs", 2)
        account = self.config.get("account")
        if account is None:
            print(
                "Using LSST DESC account. Specify 'account' in the site config to override"
            )
            account = "m1727"
        walltime = self.config.get("walltime", "00:30:00")
        setup_script = self.config.get(
            "setup",
            "/global/projecta/projectdirs/lsst/groups/WL/users/zuntz/setup-cori",
        )

        provider = SlurmProvider(
            partition=queue,  # Replace with partition name
            min_blocks=0,  # one slurm job to start with
            max_blocks=max_slurm_jobs,  # one slurm job to start with
            scheduler_options=f"#SBATCH --constraint={cpu_type}\n"
            f"#SBATCH --account={account}\n"
            f"#SBATCH --walltime={walltime}\n",
            nodes_per_block=1,
            init_blocks=1,
            worker_init=f"source {setup_script}",
        )

        executor = IPyParallelExecutor(  # pylint: disable=abstract-class-instantiated
            label="cori-batch",
            provider=provider,
        )

        self.info["executor"] = executor
示例#8
0
from parsl.config import Config

from parsl.providers import SlurmProvider
from parsl.channels import LocalChannel
from parsl.executors import HighThroughputExecutor
from parsl.launchers import SingleNodeLauncher
from parsl.addresses import address_by_hostname

from parsl.data_provider.scheme import GlobusScheme

config = Config(executors=[
    HighThroughputExecutor(
        label="midway_htex",
        worker_debug=True,
        address=address_by_hostname(),
        provider=SlurmProvider('broadwl',
                               launcher=SingleNodeLauncher(),
                               worker_init='source activate parsl_dev',
                               init_blocks=1,
                               max_blocks=1,
                               min_blocks=1,
                               nodes_per_block=1,
                               walltime='0:30:00'),
        storage_access=[
            GlobusScheme(endpoint_uuid="af7bda53-6d04-11e5-ba46-22000b92c6ec",
                         endpoint_path="/",
                         local_path="/")
        ])
], )
""" This config assumes that it is used to launch parsl tasks from the login nodes
of Frontera at TACC. Each job submitted to the scheduler will request 2 nodes for 10 minutes.
"""
config = Config(
    executors=[
        HighThroughputExecutor(
            label="frontera_htex",
            address=address_by_hostname(),
            max_workers=30,  # Set number of workers per node
            provider=SlurmProvider(
                cmd_timeout=120,  # Add extra time for slow scheduler responses
                nodes_per_block=40,
                walltime='02:00:00',
                partition='development',  # Replace with partition name
                init_blocks=1,
                min_blocks=1,
                max_blocks=3,
                scheduler_options=
                '''#SBATCH -A FTA-Jha''',  # Enter scheduler_options if needed

                # Command to be run before starting a worker, such as:
                # 'module load Anaconda; source activate parsl_env'.
                worker_init=
                'source ~/anaconda3/bin/activate; conda activate candle_py3.7',
                launcher=SrunLauncher(),
            ),
        )
    ],
    strategy='simple',
)
示例#10
0
from parsl.config import Config
from parsl.executors.ipp import IPyParallelExecutor
from parsl.executors.ipp_controller import Controller

# This is an example config, make sure to
#        replace the specific values below with the literal values
#          (e.g., 'USERNAME' -> 'your_username')

config = Config(
    executors=[
        IPyParallelExecutor(
            label='cori_ipp_multinode',
            provider=SlurmProvider(
                'debug',
                channel=SSHChannel(
                    hostname='cori.nersc.gov',
                    username='******',     # Please replace USERNAME with your username
                    script_dir='/global/homes/y/USERNAME/parsl_scripts',    # Please replace USERNAME with your username
                ),
                nodes_per_block=2,
                init_blocks=1,
                max_blocks=1,
                scheduler_options='',     # Input your scheduler_options if needed
                worker_init='',     # Input your worker_init if needed
                launcher=SrunLauncher(),
            ),
            controller=Controller(public_ip='PUBLIC_IP'),    # Please replace PUBLIC_IP with your public ip
        )
    ],
)
示例#11
0
        from parsl.config import Config
        from parsl.executors import HighThroughputExecutor
        from parsl.launchers import SrunLauncher
        from parsl.addresses import address_by_hostname, address_by_query

        if 'slurm' in args.executor:
            htex_config = Config(
                executors=[
                    HighThroughputExecutor(
                        label="coffea_parsl_slurm",
                        address=address_by_hostname(),
                        prefetch_capacity=0,
                        provider=SlurmProvider(
                            channel=LocalChannel(script_dir='logs_parsl'),
                            launcher=SrunLauncher(),
                            max_blocks=(args.scaleout) + 10,
                            init_blocks=args.scaleout,
                            partition='all',
                            worker_init="\n".join(env_extra),
                            walltime='00:120:00'),
                    )
                ],
                retries=20,
            )
        elif 'condor' in args.executor:
            htex_config = Config(executors=[
                HighThroughputExecutor(
                    label='coffea_parsl_condor',
                    address=address_by_query(),
                    # max_workers=1,
                    provider=CondorProvider(
                        nodes_per_block=1,
示例#12
0
from parsl.config import Config

from parsl.channels import LocalChannel
from parsl.providers import SlurmProvider
from parsl.executors import HighThroughputExecutor
from parsl.addresses import address_by_hostname

from parsl.data_provider.scheme import GlobusScheme

config = Config(executors=[
    HighThroughputExecutor(
        label="s2_htex",
        worker_debug=True,
        address=address_by_hostname(),
        provider=SlurmProvider(channel=LocalChannel(),
                               nodes_per_block=1,
                               init_blocks=1,
                               min_blocks=1,
                               max_blocks=1,
                               partition='skx-normal',
                               scheduler_options='''#SBATCH -A Parsl-Eval''',
                               worker_init='''source activate parsl-test''',
                               walltime='00:30:00'),
        storage_access=[
            GlobusScheme(endpoint_uuid="ceea5ca0-89a9-11e7-a97f-22000a92523b",
                         endpoint_path="/",
                         local_path="/")
        ])
], )
示例#13
0
import os

config = Config(
    executors=[
        HighThroughputExecutor(
            cores_per_worker=4,
            mem_per_worker=40,
            max_workers=4,
            worker_debug=True,
            address=address_by_hostname(),
            provider=SlurmProvider(
                'daenerys',
                worker_init=("source activate /cephfs/users/jbreynier/conda/parsl_env2 ; "
                            "export PYTHONPATH='{}:{{PYTHONPATH}}'").format(os.getcwd()),
                init_blocks=1,
                max_blocks=10,
                min_blocks=0,
                nodes_per_block=1,
                walltime='99:00:00',
                scheduler_options='#SBATCH --exclude=kg15-11 --cpus-per-task=16 --mem=160gb --time=99:00:00',
            ),
        ),
    ],
    monitoring=MonitoringHub(
       hub_address=address_by_hostname(),
       hub_port=55055,
       monitoring_debug=False,
       resource_monitoring_interval=10,
   ),
   checkpoint_mode='task_exit'
)
示例#14
0
sched_opts = '''
#SBATCH --cpus-per-task=%d
#SBATCH --mem-per-cpu=%d
''' % (
    nproc,
    twoGB,
)

slurm_htex = Config(
    executors=[
        HighThroughputExecutor(
            label="coffea_parsl_slurm",
            address=address_by_hostname(),
            prefetch_capacity=0,
            max_workers=nproc,
            provider=SlurmProvider(
                launcher=SrunLauncher(),
                init_blocks=4,
                max_blocks=4,
                nodes_per_block=1,
                partition='batch,guest,gpu',
                scheduler_options=
                sched_opts,  # Enter scheduler_options if needed
                worker_init=wrk_init,  # Enter worker_init if needed
                walltime='02:00:00'),
        )
    ],
    strategy=None,
)
示例#15
0
from parsl.config import Config
from parsl.providers import SlurmProvider
from parsl.addresses import address_by_hostname
from parsl.executors import HighThroughputExecutor

config = Config(executors=[
    HighThroughputExecutor(
        worker_debug=True,
        max_workers=7,
        address=address_by_hostname(),
        provider=SlurmProvider(
            'daenerys',
            nodes_per_block=1,
            init_blocks=15,
            max_blocks=15,
            # worker_init='docker stop $(docker ps -aq); export PYTHONPATH=$PYTHONPATH:/cephfs/users/annawoodard/.local/lib/python3.7/site-packages',
            # worker_init='docker stop $(docker ps -aq); export PYTHONPATH=$PYTHONPATH:/cephfs/users/annawoodard/.local/lib/python3.7/site-packages; docker pull olopadelab/polyfuse',
            worker_init=
            'docker stop $(docker ps -aq); export PYTHONPATH=$PYTHONPATH:/cephfs/users/annawoodard/.local/lib/python3.7/site-packages; docker load -i /cephfs/users/annawoodard/polyfuse/docker/polyfuse.tar',
            walltime='48:00:00'),
    )
])
示例#16
0
from parsl.config import Config
from parsl.providers import SlurmProvider
from parsl.launchers import SrunLauncher
from parsl.executors import HighThroughputExecutor

config = Config(
    executors=[
        HighThroughputExecutor(
            label='Midway_HTEX_multinode',
            worker_debug=False,
            max_workers=2,
            provider=SlurmProvider(
                'YOUR_PARTITION',  # Partition name, e.g 'broadwl'
                launcher=SrunLauncher(),
                nodes_per_block=2,
                init_blocks=1,
                min_blocks=1,
                max_blocks=1,
                # string to prepend to #SBATCH blocks in the submit
                # script to the scheduler eg: '#SBATCH --constraint=knl,quad,cache'
                scheduler_options='',
                # Command to be run before starting a worker, such as:
                # 'module load Anaconda; source activate parsl_env'.
                worker_init='',
                walltime='00:30:00'),
        )
    ], )
示例#17
0
    worker_debug=True,

    # this overrides the default HighThroughputExecutor process workers
    # with process workers run inside the appropriate shifter container
    # with lsst setup commands executed. That means that everything
    # running in those workers will inherit the correct environment.
    max_workers=8,
    heartbeat_period=25,
    heartbeat_threshold=75,
    provider=SlurmProvider(
        cori_queue,
        nodes_per_block=compute_nodes,
        exclusive=True,
        init_blocks=0,
        min_blocks=0,
        max_blocks=4,
        scheduler_options="""#SBATCH --constraint=haswell""",
        launcher=SrunLauncher(),
        cmd_timeout=60,
        walltime=walltime,
        worker_init=worker_init,
        parallelism=1.0),
)

cori_queue_executor_2 = HighThroughputExecutor(
    label='batch-2',
    address=address_by_hostname(),
    worker_debug=True,

    # this overrides the default HighThroughputExecutor process workers
    # with process workers run inside the appropriate shifter container
示例#18
0
"""
config = Config(
    executors=[
        HighThroughputExecutor(
            label="sdumont_htex_cpu_1w",
            address=address_by_hostname(),
            #address=address_by_interface('ib0'),
            max_workers=1,  # Set number of workers per node
            provider=SlurmProvider(
                cmd_timeout=120,  # Add extra time for slow scheduler responses
                nodes_per_block=38,
                walltime='96:00:00',
                partition='cpu',
                init_blocks=1,
                min_blocks=1,
                max_blocks=1,

                # Command to be run before starting a worker, such as:
                # 'module load Anaconda; source activate parsl_env'.
                worker_init=
                'module load samtools/1.9; module load bowtie2/2.3; module load bedtools/2.29.0; module load R/3.5.2_openmpi_2.0_gnu; module load perl/5.20',
                launcher=SrunLauncher(),
            ),
        ),
        HighThroughputExecutor(
            label="sdumont_htex_cpu_small_24w",
            address=address_by_hostname(),
            #address=address_by_interface('ib0'),
            max_workers=24,  # Set number of workers per node
            provider=SlurmProvider(
                cmd_timeout=120,  # Add extra time for slow scheduler responses
示例#19
0
# Do not modify:
TOTAL_WORKERS = int((NODES_PER_JOB*GPUS_PER_NODE)/GPUS_PER_WORKER)
WORKERS_PER_NODE = int(GPUS_PER_NODE / GPUS_PER_WORKER)
GPU_MAP = ','.join([str(x) for x in range(1,TOTAL_WORKERS + 1)])

config = Config(
    executors=[
        HighThroughputExecutor(
            label="fe.cs.uchicago",
            address=address_by_hostname(),
            max_workers=1,  # Sets #workers per manager.
            provider=SlurmProvider(
                channel=LocalChannel(),
                nodes_per_block=NODES_PER_JOB,
                init_blocks=1,
                partition='geforce',
                scheduler_options=f'#SBATCH --gpus-per-node=rtx2080ti:{GPUS_PER_NODE}',
                # Launch 4 managers per node, each bound to 1 GPU
                # This is a hack. We use hostname ; to terminate the srun command, and start our own
                # DO NOT MODIFY unless you know what you are doing.
                launcher=SrunLauncher(overrides=(f'hostname; srun --ntasks={TOTAL_WORKERS} '
                                                 f'--ntasks-per-node={WORKERS_PER_NODE} '
                                                 f'--gpus-per-task=rtx2080ti:{GPUS_PER_WORKER} '
                                                 f'--gpu-bind=map_gpu:{GPU_MAP}')
                ),
                walltime='01:00:00',
            ),
        )
    ],
)
示例#20
0
 managed=True,
 max_workers=1,
 mem_per_worker=None,
 poll_period=10,
 prefetch_capacity=0,
 provider=SlurmProvider(
     'debug',
     channel=OAuthSSHChannel(
         'dcde-ext.ornl.gov',
         envs={},
         port=2222,
         script_dir='/home/dcde1000006/ornl-parsl-scripts',
         username='******'),
     cmd_timeout=10,
     exclusive=True,
     init_blocks=1,
     # launcher=SingleNodeLauncher(),
     max_blocks=10,
     min_blocks=0,
     move_files=True,
     nodes_per_block=1,
     parallelism=1,
     scheduler_options=
     '#SBATCH --exclusive\naccounting_group = group_sdcc.main',
     walltime='00:10:00',
     worker_init='source /home/dcde1000001/dcdesetup.sh'),
 storage_access=[],
 suppress_failure=False,
 worker_debug=True,
 worker_logdir_root='/home/dcde1000006/parsl_scripts/logs',
 worker_port_range=(50000, 51000),
示例#21
0
from parsl.config import Config
from parsl.executors import FluxExecutor
from parsl.providers import SlurmProvider
from parsl.launchers import SrunLauncher


config = Config(
    executors=[
        FluxExecutor(
            provider=SlurmProvider(
                partition="YOUR_PARTITION",  # e.g. "pbatch", "pdebug"
                account="YOUR_ACCOUNT",
                launcher=SrunLauncher(overrides="--mpibind=off"),
                nodes_per_block=1,
                init_blocks=1,
                min_blocks=1,
                max_blocks=1,
                walltime="00:30:00",
                # string to prepend to #SBATCH blocks in the submit
                # script to the scheduler, e.g.: '#SBATCH -t 50'
                scheduler_options='',
                # Command to be run before starting a worker, such as:
                # 'module load Anaconda; source activate parsl_env'.
                worker_init='',
                cmd_timeout=120,
            ),
        )
    ]
)
示例#22
0
# This is an example config, make sure to
#        replace the specific values below with the literal values
#          (e.g., 'USERNAME' -> 'your_username')

config = Config(executors=[
    IPyParallelExecutor(
        provider=SlurmProvider(
            'westmere',
            channel=SSHChannel(
                hostname='swift.rcc.uchicago.edu',
                username=
                '******',  # Please replace USERNAME with your username
                script_dir=
                '/scratch/midway2/USERNAME/parsl_scripts',  # Please replace USERNAME with your username
            ),
            init_blocks=1,
            min_blocks=1,
            max_blocks=2,
            nodes_per_block=1,
            tasks_per_node=4,
            parallelism=0.5,
            scheduler_options='',  # Input your scheduler_options if needed
            worker_init='',  # Input your worker_init if needed
        ),
        label='midway_ipp',
        controller=Controller(
            public_ip='PUBLIC_IP'
        ),  # Please replace PUBLIC_IP with your public ip
    )
])
示例#23
0
""" This config assumes that it is used to launch parsl tasks from the login nodes
of Frontera at TACC. Each job submitted to the scheduler will request 2 nodes for 10 minutes.
"""
config = Config(
    executors=[
        HighThroughputExecutor(
            label="frontera_htex",
            address=address_by_hostname(),
            max_workers=1,  # Set number of workers per node
            provider=SlurmProvider(
                cmd_timeout=60,  # Add extra time for slow scheduler responses
                channel=LocalChannel(),
                nodes_per_block=2,
                init_blocks=1,
                min_blocks=1,
                max_blocks=1,
                partition='normal',  # Replace with partition name
                scheduler_options=
                '#SBATCH -A <YOUR_ALLOCATION>',  # Enter scheduler_options if needed

                # Command to be run before starting a worker, such as:
                # 'module load Anaconda; source activate parsl_env'.
                worker_init='',

                # Ideally we set the walltime to the longest supported walltime.
                walltime='00:10:00',
                launcher=SrunLauncher(),
            ),
        )
    ], )
示例#24
0
knlMj = HighThroughputExecutor(
    label='knlMj',
    address=address_by_hostname(
    ),  # node upon which the top-level parsl script is running
    cores_per_worker=25,  # threads per user task (managed by a 'worker')
    max_workers=2,  # user tasks/node
    poll_period=30,
    provider=SlurmProvider(  ## Dispatch tasks via SLURM
        partition='regular',  # SLURM job "queue"
        walltime='05:00:00',  # max time for batch job
        cmd_timeout=90,  # Extend time waited in response to 'sbatch' command
        nodes_per_block=4,  # Nodes per batch job
        init_blocks=
        0,  # of batch jobs to submit in anticipation of future demand
        min_blocks=1,  # limits on batch job requests
        max_blocks=1,
        parallelism=0.1,  # reduce "extra" batch jobs
        scheduler_options=
        "#SBATCH -L SCRATCH,projecta \n#SBATCH --constraint=knl",
        worker_init=os.environ['PT_ENVSETUP'],  # Initial ENV setup
        channel=LocalChannel(
        ),  # batch communication is performed on this local machine
        launcher=SrunLauncher(
        )  # SrunLauncher necessary for multi-node batch jobs
    ),
)

## This executor is intended for large-scale KNL batch work with
## *multiple* nodes & workers/node but with minimal parallelism within
## the DM code itself.

knlM = HighThroughputExecutor(
示例#25
0
文件: cori.py 项目: funcx-faas/funcX
            worker_debug=False,
            address=address_by_interface('bond0.144'),
            provider=SlurmProvider(
                partition='debug',  # Partition / QOS

                # We request all hyperthreads on a node.
                launcher=SrunLauncher(overrides='-c 272'),

                # string to prepend to #SBATCH blocks in the submit
                # script to the scheduler eg: '#SBATCH --constraint=knl,quad,cache'
                scheduler_options=user_opts['cori']['scheduler_options'],

                # Command to be run before starting a worker, such as:
                # 'module load Anaconda; source activate parsl_env'.
                worker_init=user_opts['cori']['worker_init'],

                # Increase timeout as Cori's scheduler may be slow
                # to respond
                cmd_timeout=120,

                # Scale between 0-1 blocks with 2 nodes per block
                nodes_per_block=2,
                init_blocks=0,
                min_blocks=0,
                max_blocks=1,

                # Hold blocks for 10 minutes
                walltime='00:10:00',
            ),
        ),
    ], )
示例#26
0
from parsl.config import Config
from parsl.providers import SlurmProvider
from parsl.executors import HighThroughputExecutor

config = Config(executors=[
    HighThroughputExecutor(
        worker_debug=True,
        max_workers=10,
        provider=SlurmProvider(
            'daenerys',
            init_blocks=1,
            max_blocks=1,
            worker_init=
            'source ~/.profile; source ~/.bashrc; conda activate alignment',
            walltime='2400000:00:00'),
    )
], )
示例#27
0

config = Config(
    executors=[
        HighThroughputExecutor(
            label='Cori_HTEX_multinode',
            # This is the network interface on the login node to
            # which compute nodes can communicate
            address=address_by_interface('bond0.144'),
            cores_per_worker=2,
            provider=SlurmProvider(
                'debug',  # Partition / QOS
                nodes_per_block=2,
                init_blocks=1,
                # string to prepend to #SBATCH blocks in the submit
                # script to the scheduler eg: '#SBATCH --constraint=knl,quad,cache'
                scheduler_options='',
                # Command to be run before starting a worker, such as:
                # 'module load Anaconda; source activate parsl_env'.
                worker_init='',
                # We request all hyperthreads on a node.
                launcher=SrunLauncher(overrides='-c 272'),
                walltime='00:20:00',
                # Slurm scheduler on Cori can be slow at times,
                # increase the command timeouts
                cmd_timeout=120,
            ),
        )
    ]
)
示例#28
0
from parsl.config import Config
from parsl.providers import SlurmProvider
from parsl.executors import HighThroughputExecutor
from parsl.launchers import SrunLauncher
from parsl.addresses import address_by_hostname
""" This config assumes that it is used to launch parsl tasks from the login nodes
of the Campus Cluster at UIUC. Each job submitted to the scheduler will request 2 nodes for 10 minutes.
"""
config = Config(
    executors=[
        HighThroughputExecutor(
            label="CC_htex",
            worker_debug=False,
            address=address_by_hostname(),
            cores_per_worker=16.0,  # each worker uses a full node
            provider=SlurmProvider(
                partition='secondary-fdr',  # partition
                nodes_per_block=2,  # number of nodes
                init_blocks=1,
                max_blocks=1,
                scheduler_options='',
                cmd_timeout=60,
                walltime='00:10:00',
                launcher=SrunLauncher(),
                worker_init=
                'conda activate envParsl',  # requires conda environment with parsl
            ),
        )
    ], )
示例#29
0
         interchange_address='comet-ln2.sdsc.edu',
         interchange_port_range=(50100, 50400),
         #client_ports=(50055, 50056, 50057), # Getting error for unexpexted argument
         worker_port_range=(50500, 51000),
         provider=SlurmProvider(
             launcher=SrunLauncher(),
             #'compute',
             channel=SSHChannel(
                 hostname='comet-ln2.sdsc.edu',
                 username=
                 '******',  # Please replace USERNAME with your username
                 password='******',
                 script_dir=
                 '/home/aymen/parsl_scripts',  # Please replace USERNAME with your username
             ),
             # launcher=SrunLauncher(),
             scheduler_options='',  # Input your scheduler_options if needed
             worker_init=
             'source /home/aymen/ve/parsl-env/bin/activate',  # Input your worker_init if needed
             partition="compute",
             walltime="00:30:00",
             init_blocks=1,
             max_blocks=1,
             #tasks_per_node = 24, # Getting error for unexpexted argument
             nodes_per_block=36,
             #cores_per_node=24, # Getting error for unexpexted argument
             parallelism=864,
         ),
         working_dir="/home/aymen/parsl_scripts",
     )
 ],
 strategy='simple',
示例#30
0
             hostname=hostnames[step],
             username=args.unix_username,
             gssapi_auth=args.gssapi,
             )
 if args.scheduler_name == 'slurm':
     executors.append(HighThroughputExecutor(
                 label=step,
                 worker_debug=True,
                 address=address_by_hostname(),
                 cores_per_worker=vCPUs_per_core*int(cores_per_task[step]),
                 provider=SlurmProvider(
                     args.scheduler_partition,
                     channel=channel,
                     launcher=SrunLauncher(),
                     nodes_per_block=node_count,
                     worker_init=worker_init,
                     init_blocks=1,
                     max_blocks=1,
                     walltime=walltimes[step],
                     scheduler_options=options,
                     move_files=False,
                 ),
             )
         )
 elif args.scheduler_name == 'grid_engine':
     executors.append(HighThroughputExecutor(
                 label=step,
                 worker_debug=True,
                 address=address_by_hostname(),
                 max_workers=int(cores_per_node[step]), # cap workers, or else defaults to infinity.
                 provider=GridEngineProvider(
                     channel=channel,