def runtime_sched(): try: from parla.cuda import gpu except: pass with Parla() as p: yield get_scheduler_context().scheduler
def _reserve_persistent_memory(memsize, device): resource_pool = get_scheduler_context().scheduler._available_resources resource_pool.allocate_resources(device, {'memory': memsize}, blocking=True) try: yield finally: resource_pool.deallocate_resources(device, {'memory': memsize})
def decorator(body): nonlocal placement, memory if data is not None: if placement is not None or memory is not None: raise ValueError("The data parameter cannot be combined with placement or memory paramters.") placement = data memory = array.storage_size(*data) devices = get_placement_for_any(placement) resources = {} if memory is not None: resources["memory"] = memory if vcus is not None: resources["vcus"] = vcus req = DeviceSetRequirements(resources, ndevices, devices, tags) if inspect.isgeneratorfunction(body): raise TypeError("Spawned tasks must be normal functions or coroutines; not generators.") # Compute the flat dependency set (including unwrapping TaskID objects) deps = tasks(*dependencies)._flat_tasks if inspect.iscoroutine(body): # An already running coroutine does not need changes since we assume # it was changed correctly when the original function was spawned. separated_body = body else: # Perform a horrifying hack to build a new function which will # not be able to observe changes in the original cells in the # tasks outer scope. To do this we build a new function with a # replaced closure which contains new cells. separated_body = type(body)( body.__code__, body.__globals__, body.__name__, body.__defaults__, closure=body.__closure__ and tuple(_make_cell(x.cell_contents) for x in body.__closure__)) separated_body.__annotations__ = body.__annotations__ separated_body.__doc__ = body.__doc__ separated_body.__kwdefaults__ = body.__kwdefaults__ separated_body.__module__ = body.__module__ taskid.dependencies = dependencies # Spawn the task via the Parla runtime API task = task_runtime.get_scheduler_context().spawn_task( _task_callback, (separated_body,), deps, taskid=taskid, req=req) logger.debug("Created: %s %r", taskid, body) for scope in _task_locals.task_scopes: scope.append(task) # Return the task object return task