예제 #1
0
def build_thread_pool_executor(queues: TaskServerQueues) -> ParslTaskServer:
    """Builds a task server that runs a single task on a local thread.

    This server is primarily meant for testing, and has a single task,
    "execute," that receives a Callable and executes it remotely.

    Args:
        queues: Queues to use to communicate
    Returns:
        A configured task server
    """

    config = Config(executors=[ThreadPoolExecutor(max_threads=1)])
    return ParslTaskServer(queues=queues, methods=[_execute], config=config)
예제 #2
0
def test_1316_local_path_on_execution_side_sp2():
    """This test demonstrates the ability of a StagingProvider to set the
    local_path of a File on the execution side, but that the change does not
    modify the local_path of the corresponding submit side File, even when
    running in a single python process.
    """

    config = Config(executors=[ThreadPoolExecutor(storage_access=[SP2()])])

    file = File("sp2://test")

    parsl.load(config)
    p = observe_input_local_path(file).result()

    assert p == "./test1.tmp", "File object on the execution side gets the local_path set by the staging provider"

    assert not file.local_path, "The local_path on the submit side should not be set"

    parsl.clear()
예제 #3
0
def test_1316_local_path_setting_preserves_dependency_sp2():
    config = Config(executors=[ThreadPoolExecutor(storage_access=[SP2()])])

    file = File("sp2://test")

    parsl.load(config)

    wc_app_future = wait_and_create(outputs=[file])
    data_future = wc_app_future.outputs[0]

    p = observe_input_local_path(data_future).result()

    assert wc_app_future.done(), "wait_and_create should finish before observe_input_local_path finishes"

    assert p == "./test1.tmp", "File object on the execution side gets the local_path set by the staging provider"

    assert not file.local_path, "The local_path on the submit side should not be set"

    parsl.dfk().cleanup()
    parsl.clear()
예제 #4
0
def basic_threads(workers=8):
    return Config(
        executors=[ThreadPoolExecutor(max_threads=workers)],
        retries=3
    )
예제 #5
0
Foreach .. from .csv
    100K app calls in parallel. Needs softImage

<Bash> wait on all for loop

"""

import parsl
from parsl import bash_app, python_app, DataFlowKernel, ThreadPoolExecutor
import os
import shutil
import random
import argparse

workers = ThreadPoolExecutor(max_workers=8)
dfk = DataFlowKernel(executors=[workers])


def create_dirs(cwd):

    for dir in ['relax.01', 'relax.02', 'relax.03']:
        rel_dir = '{0}/{1}'.format(cwd, dir)
        if os.path.exists(rel_dir):
            shutil.rmtree(rel_dir)
        os.makedirs(rel_dir)
        for i in range(0, random.randint(1, 5)):
            rdir = '{0}/{1}'.format(rel_dir, i)
            os.makedirs(rdir)
            with open('{0}/results'.format(rdir, i), 'w') as f:
                f.write("{0} {1} - test data\n".format(i, dir))
예제 #6
0
import logging

from parsl import ThreadPoolExecutor
from parsl.config import Config
from parsl.monitoring import MonitoringHub

config = Config(executors=[ThreadPoolExecutor(label='threads', max_threads=4)],
                monitoring=MonitoringHub(
                    hub_address="localhost",
                    hub_port=55055,
                    logging_level=logging.INFO,
                    resource_monitoring_interval=3,
                ))
예제 #7
0
"""Testing bash apps
"""
import parsl
from parsl import App, DataFlowKernel, ThreadPoolExecutor

print("Parsl version: ", parsl.__version__)

import time
import argparse

workers = ThreadPoolExecutor(max_threads=10)
dfk = DataFlowKernel(executors=[workers])


@App('bash', dfk)
def sleep_foo(sleepdur, stdout=None):
    return """sleep {0}"""


if __name__ == '__main__':

    parser = argparse.ArgumentParser()
    parser.add_argument("-c",
                        "--count",
                        default="10",
                        help="Count of apps to launch")
    parser.add_argument("-d",
                        "--debug",
                        action='store_true',
                        help="Count of apps to launch")
    args = parser.parse_args()
예제 #8
0
"""Use local compute for the Parsl executor"""
from colmena.redis.queue import TaskServerQueues
from colmena.task_server import ParslTaskServer
from parsl import Config, ThreadPoolExecutor

from planner import run_inference

config = Config(executors=[ThreadPoolExecutor(max_threads=1)])


def make_task_server(queues: TaskServerQueues) -> ParslTaskServer:
    """Make the task server

    Has a single method: run_inference

    Args:
        queues: Queues to be used. Expects a single compute queue, named "compute"
    Returns:
        Initialized task server
    """
    return ParslTaskServer(queues=queues,
                           methods=[run_inference],
                           config=config)
예제 #9
0
from parsl.launchers import AprunLauncher, SimpleLauncher
from parsl.providers import LocalProvider, CobaltProvider
from parsl.channels import SSHChannel

config = Config(
    executors=[
        HighThroughputExecutor(
            address="localhost",
            label="htex",
            max_workers=2,
            provider=LocalProvider(
                init_blocks=1,
                max_blocks=1
            ),
        ),
        ThreadPoolExecutor(label="local_threads", max_threads=4)
    ],
    strategy=None
)

local_interleaved_config = Config(
    executors=[
        HighThroughputExecutor(
            address="localhost",
            label="qc",
            max_workers=2,
            provider=LocalProvider(
                init_blocks=1,
                max_blocks=1
            ),
        ),