async def A(): async with Scheduler(host='tcp://127.0.0.1', port=8786, protocol='tcp') as s: async with Worker(s.address, resources={'CPU':8}) as w0, \ Worker(s.address, resources={'GPU':128, 'CPU':128}) as w1: async with Client(s.address, asynchronous=True) as client: future = client.submit(lambda x: x + 1, 10, resources={'GPU': 16, 'CPU':16}) result = await future print(result)
def start_worker(): """Starts a single worker on the current process""" # Needs to be an other process or you get: RuntimeError: IOLoop is already running #loop = IOLoop.current() t = Thread(target=loop.start) t.daemon = True t.start() w = Worker('tcp://127.0.0.1:8786', loop=loop) w.start() # choose randomly assigned port print 'Starting a worker' return
def __init__(self, *args, n=1, name=None, nthreads=None, **kwargs): self.workers = [ Worker(*args, name=str(name) + "-" + str(i), nthreads=nthreads // n, **kwargs) for i in range(n) ]
def __init__(self, *args, n=1, name=None, nthreads=None, **kwargs): self.workers = [ Worker(*args, name=str(name) + "-" + str(i), nthreads=nthreads // n, **kwargs) for i in range(n) ] self._startup_lock = asyncio.Lock()
async def _run_test(): with instance_for_test() as instance: async with Scheduler() as scheduler: async with Worker(scheduler.address) as _: result = await asyncio.get_event_loop().run_in_executor( None, _execute, scheduler.address, instance) assert result.success assert result.result_for_solid( "simple").output_value() == 1
def get(self, key: str, worker: Worker) -> Any: assert hasattr(worker, "_preloader_data") if key not in self.initializers: raise Exception(f"Attempted to get unregistered key {key}") if key not in worker._preloader_data: with worker._lock: if key not in worker._preloader_data: worker._preloader_data[key] = self.initializers[key]() return worker._preloader_data[key]
def scheduler_and_workers(n=2): s = Scheduler() workers = [Worker(s.address_to_workers) for i in range(n)] try: yield s, workers finally: s.close() for w in workers: w.close()
def __init__(self, work_dir, scheduler_ip, scheduler_port): self.logger = logging.getLogger(self.__class__.__name__) self.worker = Worker(scheduler_ip=scheduler_ip, scheduler_port=scheduler_port, nthreads=get_nthreads(), dashboard=True, local_directory=work_dir, http_prefix="/dask")
def scheduler_and_workers(n=2): s = Scheduler(hostname='127.0.0.1') workers = [ Worker(s.address_to_workers, hostname='127.0.0.1', nthreads=10) for i in range(n) ] while len(s.workers) < n: sleep(1e-6) try: yield s, workers finally: for w in workers: w.close() s.close()
def setup(self, worker: Worker): worker._preloader_data = {}
def __init__(self, *args, delay=0, **kwargs): self.worker = Worker(*args, **kwargs) self.delay = delay self.status = None
def setup(self, worker: Worker): if not hasattr(worker, '_ema_models'): worker._ema_models = {} for msi in self._msis: worker._ema_models[msi.name] = msi
def start_worker(scheduler_address): from dask.distributed import Worker worker = Worker(scheduler_address)