Пример #1
0
def run_checkpointed(n=2, mode="task_exit", sleep_dur=0):
    """ This test runs n apps that will fail with Division by zero error,
    followed by 1 app that will succeed. The checkpoint should only have 1 task
    """

    from parsl.tests.configs.local_threads import config
    config['globals']['runDir'] = 'runinfo'
    config["globals"]["checkpointMode"] = mode
    dfk = DataFlowKernel(config=config)

    @App('python', dfk, cache=True)
    def cached_rand(x, sleep_dur=0):
        import random
        import time
        time.sleep(sleep_dur)
        return random.randint(0, 10000)

    items = []
    for i in range(0, n):
        x = cached_rand(i, sleep_dur=sleep_dur)
        items.append(x)

    # Barrier
    [i.result() for i in items]
    with open("test.txt", 'w') as f:
        f.write("done")

    time.sleep(10)
Пример #2
0
def test_220():
    """Test async usage_tracking behavior for issue #220 """

    print("This test assumes /etc/resolv being misconfigured")
    with open("/etc/resolv.conf", 'r') as f:
        for line in f.readlines():
            line = line.strip()
            print("Line: [{}]".format(line))
            if line.startswith("nameserver") and line != "nameserver 2.2.2.2":
                assert False, "/etc/resolv.conf should be misconfigured"

    start = time.time()
    set_stream_logger()
    dfk = DataFlowKernel(config=config)
    delta = time.time() - start
    print("Time taken : ", delta)
    assert delta < 1, "DFK took too much time to start, delta:{}".format(delta)
    dfk.cleanup()
Пример #3
0
def run_checkpointed(n=2, mode="task_exit"):
    """ This test runs n apps that will fail with Division by zero error,
    followed by 1 app that will succeed. The checkpoint should only have 1 task.
    """

    from parsl.tests.configs.local_threads import config
    config["globals"]["checkpointMode"] = mode
    dfk = DataFlowKernel(config=config)

    @App('python', dfk, cache=True)
    def cached_rand(x):
        import random
        return random.randint(0, 10000)

    @App('python', dfk, cache=True)
    def cached_failing(x):
        5 / 0
        return 1

    items = []
    for i in range(0, n):
        x = cached_failing(0)
        items.append(x)
        try:
            x.result()
        except Exception as e:
            print("Ignoring failure of task")
            pass

    x = cached_rand(1)
    print(x.result())
    rundir = dfk.rundir
    # Call cleanup *only* for dfk_exit to ensure that a checkpoint is written
    # at all
    if mode == "dfk_exit":
        dfk.cleanup()
    return rundir
Пример #4
0
def test_z_cleanup():
    dfk = DataFlowKernel(config=config)
    dfk.cleanup()
    pass
Пример #5
0
The time to finish the 10 apps should be ~10s.

In the second run, start the parsl script, and as soon as the run starts,
start additional ipengines. The time to finish the 10 apps should still be ~10s.

This shows that the LoadBalanced View simply routes tasks to the available engines
at the time the apps were submitted to it. It is not capable of rebalancing the apps
among the engine once it has been sent to the the engine's queue.


"""
from parsl import App, DataFlowKernel
import time

from parsl.tests.configs.local_ipp import config
dfk = DataFlowKernel(config=config)


@App('python', dfk)
def sleep_double(x):
    import time
    time.sleep(1)
    return x * 2


def test_z_cleanup():
    dfk.cleanup()


if __name__ == "__main__":
Пример #6
0
Foreach .. from .csv
    100K app calls in parallel. Needs softImage

<Bash> wait on all for loop

"""

import parsl
from parsl import bash_app, python_app, DataFlowKernel, ThreadPoolExecutor
import os
import shutil
import random
import argparse

workers = ThreadPoolExecutor(max_workers=8)
dfk = DataFlowKernel(executors=[workers])


def create_dirs(cwd):

    for dir in ['relax.01', 'relax.02', 'relax.03']:
        rel_dir = '{0}/{1}'.format(cwd, dir)
        if os.path.exists(rel_dir):
            shutil.rmtree(rel_dir)
        os.makedirs(rel_dir)
        for i in range(0, random.randint(1, 5)):
            rdir = '{0}/{1}'.format(rel_dir, i)
            os.makedirs(rdir)
            with open('{0}/results'.format(rdir, i), 'w') as f:
                f.write("{0} {1} - test data\n".format(i, dir))
Пример #7
0
    such directories, with similar VASP runtimes as before.
6 - Once these are done, we need to run some more python code that we don't actually
    have yet, but that a student here supposedly does have written and tested.

We will be working on Stampede 2. we haven't put our code in a repo (though we should - Qingyi...)
   and everything we used can be installed via pip.

"""

from parsl import bash_app, python_app, DataFlowKernel, ThreadPoolExecutor
import os
import shutil
import random

workers = ThreadPoolExecutor(max_workers=8)
dfk = DataFlowKernel(workers)


def create_dirs(cwd):

    for dir in ['relax.01', 'relax.02', 'relax.03']:
        rel_dir = '{0}/{1}'.format(cwd, dir)
        if os.path.exists(rel_dir):
            shutil.rmtree(rel_dir)
        os.makedirs(rel_dir)
        for i in range(0, random.randint(1, 5)):
            rdir = '{0}/{1}'.format(rel_dir, i)
            os.makedirs(rdir)
            with open('{0}/results'.format(rdir), 'w') as f:
                f.write("{0} {1} - test data\n".format(i, dir))
Пример #8
0
import parsl
from parsl import App, DataFlowKernel
from parsl.monitoring.db_logger import MonitoringConfig

threads_config = parsl.config.Config(executors=[
    parsl.executors.threads.ThreadPoolExecutor(label='threads', max_threads=4)
],
                                     monitoring_config=MonitoringConfig(
                                         database_type='local_database',
                                         logger_name='parsl_db_logger',
                                         eng_link='sqlite:///parsl.db',
                                         web_app_host='http://localhost',
                                         web_app_port=8899,
                                         resource_loop_sleep_duration=15))
dfk = DataFlowKernel(config=threads_config)


@App('python', dfk)
def cpu_stress_fail(workers=1,
                    timeout=10,
                    inputs=[],
                    stdout='stdout_for_fail.txt',
                    stderr='stderr_for_fail.txt'):
    raise AssertionError("Just an Error")
    cpu_stress()


@App('python', dfk)
def cpu_stress(workers=1, timeout=10, inputs=[], outputs=[]):
    s = 0
    for i in range(10**8):
Пример #9
0
# parsl.set_stream_logger()

threads_config = Config(
    executors=[ThreadPoolExecutor(
        label='threads',
        max_threads=4)
    ],
    monitoring=MonitoringHub(
        hub_address="127.0.0.1",
        hub_port=55055,
        logging_level=logging.INFO,
        resource_monitoring_interval=10,
    )
)

dfk = DataFlowKernel(config=threads_config)


@App('python', dfk)
def sleeper(dur=25):
    import time
    time.sleep(dur)


@App('python', dfk)
def cpu_stress(dur=30):
    import time
    s = 0
    start = time.time()
    for i in range(10**8):
        s += i