Esempio n. 1
0
def test_1():

    x = HTEX(
        label='htex',
        provider=LocalProvider(channel=LocalChannel),
        address="127.0.0.1",
    )
    task_p, result_p, command_p = x.start()
    print(task_p, result_p, command_p)
    print("Executor initialized : ", x)

    args = [2]
    kwargs = {}
    f1 = x.submit(double, *args, **kwargs)
    print("Sent task with :", f1)
    args = [2]
    kwargs = {}
    f2 = x.submit(fail, *args, **kwargs)

    print("hi")
    while True:
        stop = input("Stop ? (y/n)")
        if stop == "y":
            break

    print("F1: {}, f2: {}".format(f1.done(), f2.done()))
    x.shutdown()
Esempio n. 2
0
from funcx_endpoint.endpoint.utils.config import Config
from funcx_endpoint.executors import HighThroughputExecutor

CONDA_ENV = os.environ["WORKER_CONDA_ENV"]
print(f"Using conda env:{CONDA_ENV} for worker_init")

config = Config(
    executors=[
        HighThroughputExecutor(
            provider=LocalProvider(
                init_blocks=1,
                min_blocks=0,
                max_blocks=1,
                # FIX ME: Update conda.sh file to match your paths
                worker_init=(
                    "source ~/anaconda3/etc/profile.d/conda.sh; "
                    f"conda activate {CONDA_ENV}; "
                    "python3 --version"
                ),
            ),
        )
    ],
    funcx_service_address="https://api2.funcx.org/v2",
)

# For now, visible_to must be a list of URNs for globus auth users or groups, e.g.:
# urn:globus:auth:identity:{user_uuid}
# urn:globus:groups:id:{group_uuid}
meta = {
    "name": "0.3.3",
Esempio n. 3
0
        HighThroughputExecutor(
            max_workers_per_node=2,
            worker_debug=False,
            address=address_by_interface('bond0.144'),
            provider=SlurmProvider(
                partition='debug',  # Partition / QOS

                # We request all hyperthreads on a node.
                launcher=SrunLauncher(overrides='-c 272'),

                # string to prepend to #SBATCH blocks in the submit
                # script to the scheduler eg: '#SBATCH --constraint=knl,quad,cache'
                scheduler_options=user_opts['cori']['scheduler_options'],

                # Command to be run before starting a worker, such as:
                # 'module load Anaconda; source activate parsl_env'.
                worker_init=user_opts['cori']['worker_init'],

                # Increase timeout as Cori's scheduler may be slow
                # to respond
                cmd_timeout=120,

                # Scale between 0-1 blocks with 2 nodes per block
                nodes_per_block=2,
                init_blocks=0,
                min_blocks=0,
                max_blocks=1,

                # Hold blocks for 10 minutes
                walltime='00:10:00',
            ),
        ),
Esempio n. 4
0
from parsl.utils import RepresentationMixin

from funcx_endpoint.executors import HighThroughputExecutor

_DEFAULT_EXECUTORS = [HighThroughputExecutor()]


class Config(RepresentationMixin):
    """Specification of FuncX configuration options.

    Parameters
    ----------

    executors : list of Executors
        A list of executors which serve as the backend for function execution.
        As of 0.2.2, this list should contain only one executor.
        Default: [HighThroughtputExecutor()]

    funcx_service_address: str
        URL address string of the funcX service to which the Endpoint should connect.
        Default: 'https://api2.funcx.org/v2'

    heartbeat_period: int (seconds)
        The interval at which heartbeat messages are sent from the endpoint to the
        funcx-web-service
        Default: 30s

    heartbeat_threshold: int (seconds)
        Seconds since the last hearbeat message from the funcx-web-service after which
        the connection is assumed to be disconnected.
        Default: 120s
Esempio n. 5
0
config = Config(
    executors=[
        HighThroughputExecutor(
            max_workers_per_node=2,
            worker_debug=False,
            address=address_by_hostname(),
            provider=CobaltProvider(
                queue='default',
                account=user_opts['cooley']['account'],
                launcher=MpiExecLauncher(),
                # string to prepend to #COBALT blocks in the submit
                # script to the scheduler eg: '#COBALT -t 50'
                scheduler_options=user_opts['cooley']['scheduler_options'],

                # Command to be run before starting a worker, such as:
                # 'module load Anaconda; source activate funcx_env'.
                worker_init=user_opts['cooley']['worker_init'],

                # Scale between 0-1 blocks with 2 nodes per block
                nodes_per_block=2,
                init_blocks=0,
                min_blocks=0,
                max_blocks=1,

                # Hold blocks for 30 minutes
                walltime='00:30:00',
            ),
        )
    ], )

# fmt: onrom funcx_endpoint.endpoint.utils.config import Config
Esempio n. 6
0
config = Config(
    executors=[
        HighThroughputExecutor(
            max_workers_per_node=10,
            address=address_by_hostname(),
            scheduler_mode='soft',
            worker_mode='singularity_reuse',
            container_type='singularity',
            container_cmd_options="-H /home/$USER",
            provider=SlurmProvider(
                partition='broadwl',
                launcher=SrunLauncher(),

                # string to prepend to #SBATCH blocks in the submit
                # script to the scheduler eg: '#SBATCH --constraint=knl,quad,cache'
                scheduler_options=user_opts['midway']['scheduler_options'],

                # Command to be run before starting a worker, such as:
                # 'module load Anaconda; source activate parsl_env'.
                worker_init=user_opts['midway']['worker_init'],

                # Scale between 0-1 blocks with 2 nodes per block
                nodes_per_block=2,
                init_blocks=0,
                min_blocks=0,
                max_blocks=1,

                # Hold blocks for 30 minutes
                walltime='00:30:00'),
        )
    ], )
Esempio n. 7
0
from funcx_endpoint.endpoint.utils.config import Config
from funcx_endpoint.executors import HighThroughputExecutor

from parsl.providers import LocalProvider

...

config = Config(executors=[
    HighThroughputExecutor(
        label="fe.cs.uchicago",
        address=address_by_hostname(),
        provider=SlurmProvider(
            channel=LocalChannel(),
            nodes_per_block=NODES_PER_JOB,
            init_blocks=1,
            partition="general",
            launcher=SrunLauncher(
                overrides=(f"hostname; srun --ntasks={TOTAL_WORKERS} "
                           f"--ntasks-per-node={WORKERS_PER_NODE} "
                           f"--gpus-per-task=rtx2080ti:{GPUS_PER_WORKER} "
                           f"--gpu-bind=map_gpu:{GPU_MAP}")),
            walltime="01:00:00",
        ),
    )
], )
Esempio n. 8
0
}

config = Config(
    executors=[
        HighThroughputExecutor(
            max_workers_per_node=1,
            worker_debug=False,
            address=address_by_hostname(),
            provider=TorqueProvider(
                queue='normal',
                launcher=AprunLauncher(overrides="-b -- bwpy-environ --"),

                # string to prepend to #SBATCH blocks in the submit
                scheduler_options=user_opts['bluewaters']['scheduler_options'],

                # Command to be run before starting a worker, such as:
                # 'module load bwpy; source activate funcx env'.
                worker_init=user_opts['bluewaters']['worker_init'],

                # Scale between 0-1 blocks with 2 nodes per block
                nodes_per_block=2,
                init_blocks=0,
                min_blocks=0,
                max_blocks=1,

                # Hold blocks for 30 minutes
                walltime='00:30:00'),
        )
    ], )

# fmt: on
Esempio n. 9
0
config = Config(
    executors=[
        HighThroughputExecutor(
            label="fe.cs.uchicago",
            worker_debug=False,
            address=address_by_hostname(),
            provider=SlurmProvider(
                partition='general',

                # Launch 4 managers per node, each bound to 1 GPU
                # This is a hack. We use hostname ; to terminate the srun command, and
                # start our own
                #
                # DO NOT MODIFY unless you know what you are doing.
                launcher=SrunLauncher(
                    overrides=(f'hostname; srun --ntasks={TOTAL_WORKERS} '
                               f'--ntasks-per-node={WORKERS_PER_NODE} '
                               f'--gpus-per-task=rtx2080ti:{GPUS_PER_WORKER} '
                               f'--gpu-bind=map_gpu:{GPU_MAP}')),

                # Scale between 0-1 blocks with 2 nodes per block
                nodes_per_block=NODES_PER_JOB,
                init_blocks=0,
                min_blocks=0,
                max_blocks=1,

                # Hold blocks for 30 minutes
                walltime='00:30:00',
            ),
        )
    ], )
Esempio n. 10
0
config = Config(
    executors=[
        HighThroughputExecutor(
            max_workers_per_node=2,
            worker_debug=False,
            address=address_by_hostname(),
            provider=SlurmProvider(
                partition=user_opts['frontera']['partition'],
                launcher=SrunLauncher(),

                # Enter scheduler_options if needed
                scheduler_options=user_opts['frontera']['scheduler_options'],

                # Command to be run before starting a worker, such as:
                # 'module load Anaconda; source activate parsl_env'.
                worker_init=user_opts['frontera']['worker_init'],

                # Add extra time for slow scheduler responses
                cmd_timeout=60,

                # Scale between 0-1 blocks with 2 nodes per block
                nodes_per_block=2,
                init_blocks=0,
                min_blocks=0,
                max_blocks=1,

                # Hold blocks for 30 minutes
                walltime='00:30:00',
            ),
        )
    ], )
Esempio n. 11
0
        HighThroughputExecutor(
            max_workers_per_node=1,
            worker_debug=False,
            address=address_by_hostname(),
            scheduler_mode='soft',
            worker_mode='singularity_reuse',
            container_type='singularity',
            container_cmd_options="-H /home/$USER",
            provider=CobaltProvider(
                queue='debug-flat-quad',
                account=user_opts['theta']['account'],
                launcher=AprunLauncher(overrides="-d 64"),

                # string to prepend to #COBALT blocks in the submit
                # script to the scheduler eg: '#COBALT -t 50'
                scheduler_options=user_opts['theta']['scheduler_options'],

                # Command to be run before starting a worker, such as:
                # 'module load Anaconda; source activate funcx_env'.
                worker_init=user_opts['theta']['worker_init'],

                # Scale between 0-1 blocks with 2 nodes per block
                nodes_per_block=2,
                init_blocks=0,
                min_blocks=0,
                max_blocks=1,

                # Hold blocks for 30 minutes
                walltime='00:30:00'
            ),
        )
Esempio n. 12
0
    }
}

config = Config(
    executors=[
        HighThroughputExecutor(
            label='Kubernetes_funcX',
            max_workers_per_node=1,
            address=address_by_route(),
            scheduler_mode='hard',
            container_type='docker',
            strategy=KubeSimpleStrategy(max_idletime=3600),
            provider=KubernetesProvider(
                init_blocks=0,
                min_blocks=0,
                max_blocks=2,
                init_cpu=1,
                max_cpu=4,
                init_mem="1024Mi",
                max_mem="4096Mi",
                image=user_opts['kube']['image'],
                worker_init=user_opts['kube']['worker_init'],
                namespace=user_opts['kube']['namespace'],
                incluster_config=False,
            ),
        )
    ],
    heartbeat_period=15,
    heartbeat_threshold=200,
    log_dir='.',
)
Esempio n. 13
0
from parsl.providers import LocalProvider

from funcx_endpoint.endpoint.utils.config import Config
from funcx_endpoint.executors import HighThroughputExecutor

config = Config(
    executors=[
        HighThroughputExecutor(provider=LocalProvider(
            init_blocks=1,
            min_blocks=0,
            max_blocks=1,
        ), )
    ],
    funcx_service_address="https://api2.funcx.org/v2",
)

# For now, visible_to must be a list of URNs for globus auth users or groups, e.g.:
# urn:globus:auth:identity:{user_uuid}
# urn:globus:groups:id:{group_uuid}
meta = {
    "name": "$name",
    "description": "",
    "organization": "",
    "department": "",
    "public": False,
    "visible_to": [],
}
Esempio n. 14
0
        # Set ncpus=32, otherwise it defaults to 1 on Polaris
        'scheduler_options': '',
    }
}

config = Config(
    executors=[
        HighThroughputExecutor(
            max_workers_per_node=1,
            strategy=SimpleStrategy(max_idletime=300),
            # IP of Polaris testbed login node
            address='10.230.2.72',
            provider=PBSProProvider(
                launcher=SingleNodeLauncher(),
                queue='workq',
                scheduler_options=user_opts['polaris']['scheduler_options'],
                # Command to be run before starting a worker, such as:
                # 'module load Anaconda; source activate parsl_env'.
                worker_init=user_opts['polaris']['worker_init'],
                cpus_per_node=32,
                walltime='01:00:00',
                nodes_per_block=1,
                init_blocks=0,
                min_blocks=0,
                max_blocks=1,
            ),
        )
    ], )

# fmt: on