def __init__( self, provider=LocalProvider(), label='ipp', engine_file='~/.ipython/profile_default/security/ipcontroller-engine.json', engine_dir='.', working_dir=None, controller=Controller(), container_image=None, storage_access=None, engine_debug_level=None, managed=True): self.provider = provider self.label = label self.engine_file = engine_file self.engine_dir = engine_dir self.working_dir = working_dir self.controller = controller self.engine_debug_level = engine_debug_level self.container_image = container_image self.storage_access = storage_access if storage_access is not None else [] if len(self.storage_access) > 1: raise ConfigurationError( 'Multiple storage access schemes are not yet supported') self.managed = managed self.debug_option = "" if self.engine_debug_level: self.debug_option = "--log-level={}".format( self.engine_debug_level)
from parsl.config import Config from parsl.executors.ipp import IPyParallelExecutor from libsubmit.providers import LocalProvider config = Config(executors=[ IPyParallelExecutor( label='tiny_config', provider=LocalProvider( init_blocks=1, min_blocks=0, max_blocks=4, walltime='00:03:00')) ])
from libsubmit.providers import LocalProvider from libsubmit.channels import LocalChannel from parsl.config import Config from parsl.executors.ipp import IPyParallelExecutor config = Config(executors=[ IPyParallelExecutor(label="local_ipp", provider=LocalProvider( channel=LocalChannel(), init_blocks=2, max_blocks=2, )) ])
from parsl.config import Config from libsubmit.providers import LocalProvider from parsl.executors.ipp import IPyParallelExecutor from libsubmit.providers import SlurmProvider from libsubmit.channels import LocalChannel localNode = Config( executors=[ IPyParallelExecutor( provider=LocalProvider( init_blocks=2, min_blocks=1, max_blocks=4, ), label='ipp', ), ], app_cache=False, ) rccNodeExclusive = Config( executors=[ IPyParallelExecutor( label='midway_ipp', provider=SlurmProvider( 'broadwl', channel=LocalChannel(), #launcher=SrunLauncher(), init_blocks=1, # 1 min_blocks=0, # 0 max_blocks=3, # 10
"""The following config uses threads say for local lightweight apps and IPP workers for heavy weight applications. The app decorator has a parameter `executors=[<list of executors>]` to specify the executor to which apps should be directed. """ from libsubmit.providers import LocalProvider from parsl.config import Config from parsl.executors.ipp import IPyParallelExecutor from parsl.executors.threads import ThreadPoolExecutor from parsl.tests.utils import get_rundir config = Config(executors=[ ThreadPoolExecutor(max_threads=4, label='local_threads'), IPyParallelExecutor(label='local_ipp', provider=LocalProvider(walltime="00:05:00", nodes_per_block=1, tasks_per_node=1, init_blocks=4)) ], run_dir=get_rundir())
"""The following config uses two IPP executors designed for python apps which may not show any performance improvements on local threads. This also allows you to send work to two separate remote executors, or to two separate partitions. """ from parsl.config import Config from libsubmit.providers import LocalProvider from parsl.executors.ipp import IPyParallelExecutor from parsl.tests.utils import get_rundir config = Config( executors=[ IPyParallelExecutor(label='local_ipp_1', provider=LocalProvider( nodes_per_block=1, tasks_per_node=1, walltime="00:15:00", init_blocks=4, )), IPyParallelExecutor(label='local_ipp_2', provider=LocalProvider( nodes_per_block=1, tasks_per_node=1, walltime="00:15:00", init_blocks=2, )) ], run_dir=get_rundir(), )
""" import argparse import random import shutil import time import pytest from libsubmit.providers import LocalProvider from parsl.app.app import App from parsl.config import Config from parsl.dataflow.dflow import DataFlowKernel from parsl.executors.ipp import IPyParallelExecutor config = Config(executors=[ IPyParallelExecutor(label='pool_app1', provider=LocalProvider(init_blocks=1), container_image='app1_v0.1'), IPyParallelExecutor(label='pool_app2', provider=LocalProvider(init_blocks=1), container_image='app2_v0.1') ]) dfk = DataFlowKernel(config=config) @App('python', dfk, executors=['pool_app1'], cache=True) def app_1(data): import app1 return app1.predict(data) @App('python', dfk, executors=['pool_app2'], cache=True)