Exemple #1
0
def fresh_config():
    return Config(executors=[
        HighThroughputExecutor(
            label='Comet_HTEX_multinode',
            address=address_by_query(),
            max_workers=1,
            provider=SlurmProvider(
                'debug',
                launcher=SrunLauncher(),
                # string to prepend to #SBATCH blocks in the submit
                # script to the scheduler
                scheduler_options=user_opts['comet']['scheduler_options'],

                # Command to be run before starting a worker, such as:
                # 'module load Anaconda; source activate parsl_env'.
                worker_init=user_opts['comet']['worker_init'],
                walltime='00:10:00',
                init_blocks=1,
                max_blocks=1,
                nodes_per_block=2,
            ),
        )
    ])
Exemple #2
0
from parsl.executors import HighThroughputExecutor
from parsl.addresses import address_by_query
from parsl.tests.utils import get_rundir

# If you are a developer running tests, make sure to update parsl/tests/configs/user_opts.py
# If you are a user copying-and-pasting this as an example, make sure to either
#       1) create a local `user_opts.py`, or
#       2) delete the user_opts import below and replace all appearances of `user_opts` with the literal value
#          (i.e., user_opts['swan']['username'] -> 'your_username')
from .user_opts import user_opts

config = Config(
    executors=[
        HighThroughputExecutor(
            label='OSG_HTEX',
            address=address_by_query(),
            max_workers=1,
            provider=CondorProvider(
                nodes_per_block=1,
                init_blocks=4,
                max_blocks=4,
                # This scheduler option string ensures that the compute nodes provisioned
                # will have modules
                scheduler_options=
                'Requirements = OSGVO_OS_STRING == "RHEL 6" && Arch == "X86_64" &&  HAS_MODULES == True',
                # Command to be run before starting a worker, such as:
                # 'module load Anaconda; source activate parsl_env'.
                worker_init=user_opts['osg']['worker_init'],
                walltime="00:20:00",
            ),
        )
Exemple #3
0
from parsl.addresses import address_by_query

import getpass

vm_reference = {
    # All fields below are required
    "admin_username": '******',
    "password": '******',
    "vm_size": 'YOUR_VM_SIZE',
    "disk_size_gb": 'YOUR_VM_DISK_SIZE',
    "publisher": 'YOUR_IMAGE_PUBLISHER',
    "offer": 'YOUR_VM_OS_OFFER',
    "sku": 'YOUR_VM_OS_SKU',
    "version": 'YOUR_VM_OS_VERSION',
}

config = Config(executors=[
    HighThroughputExecutor(
        label='azure_single_node',
        provider=AzureProvider(
            vm_reference=vm_reference,
            key_file='azure_key_file.json',
        ),
        storage_access=[
            HTTPInTaskStaging(),
            FTPInTaskStaging(),
            RSyncStaging(getpass.getuser() + "@" + address_by_query())
        ],
    )
])