Beispiel #1
0
    def __set_name__(self, owner, name):
        # Called by type.__new__ during class creation to allow customization.
        # Let's start with strict naming requirements for early implementations,
        # and explicitly forbid multiple instances of this data descriptor implementation
        # in the same class.
        # Note that __set_name__ is only called at most once, by type.__new__
        # for a class definition in which the descriptor is instantiated.
        # In other words, __set_name__ is called for the base class, only, and
        # __init_subclass__ is called for derived classes, only.
        if name != '_dtype':
            raise ProtocolError(
                'TypeDataDescriptor has a strict naming protocol. Only use for a '
                '`_dtype` attribute.')
        self.name = name
        if hasattr(owner, self.attr_name):
            raise ProtocolError(
                f'No storage for data descriptor. {repr(owner)} already has an '
                f'attribute named {self.attr_name}.')

        assert owner not in self.base
        assert len(self.base) == 0
        logger.debug(
            f'Initializing base class {owner} ownership of TypeDataDescriptor.'
        )
        self._original_owner = weakref.ref(owner)
        if self._original_owner_type is None:
            self._original_owner_type = TypeIdentifier.copy_from(
                [str(owner.__module__)] + owner.__qualname__.split('.'))
        self.base[owner] = TypeIdentifier.copy_from(self._original_owner_type)
Beispiel #2
0
def scope(context):
    """Set the current workflow management within a clear scope.

    Restore the previous workflow management scope on exiting the context manager.

    Within the context managed by *scope*, get_context() will return *context*.

    Not thread-safe. In general, this context manage should only be used in the
    root thread.
    """
    from scalems.workflow import Scope

    parent = get_context()
    dispatcher = _dispatcher.get(None)
    if dispatcher is not None and parent is not dispatcher:
        raise ProtocolError(
            'It is unsafe to use concurrent scope() context managers in an asynchronous '
            'context.')
    logger.debug('Entering scope of {}'.format(str(context)))
    current = context
    token = current_scope.set(
        Scope(
            parent=parent,
            current=current)
    )
    if token.var.get().parent is current:
        logger.warning('Unexpected re-entrance. Workflow is already managed by '
                       f'{repr(current)}')
    if token.old_value is not token.MISSING and token.old_value.current != \
            token.var.get().parent:
        raise ProtocolError(
            'Unrecoverable race condition: multiple threads are updating global context '
            'unsafely.')
    # Try to confirm that current_scope is not already subject to modification by another
    #  context manager in a shared asynchronous context.
    # This nesting has to have LIFO semantics both in and out of coroutines,
    # and cannot block.
    # One option would be to refuse to nest if the current scope is not the root scope and
    # the root scope has an active dispatcher. Note that a dispatcher should use
    # contextvars.copy_context().run() and set a new root context.
    # Alternatively, we could try to make sure that no asynchronous yields are allowed
    # when the current context is a nested scope within a dispatcher context,
    # but technically this is okay as long as a second scope is not nested within the
    # first from within a coroutine that might not finish until after the first scope
    # finishes.
    try:
        yield current
    finally:
        """Exit context manager without processing exceptions."""
        logger.debug('Leaving scope of {}'.format(str(context)))
        # Restore context module state since we are not using contextvars.Context.run()
        # or equivalent.
        if token.var.get().parent is not parent or token.var.get().current is not current:
            raise ProtocolError(
                'Unexpected re-entrance. Workflow scope changed while in context '
                f'manager {repr(current)}.')
        else:
            token.var.reset(token)
Beispiel #3
0
 def __init__(self, context, uid: bytes):
     self._context = weakref.ref(context)
     if isinstance(uid, bytes) and len(uid) == 32:
         self._uid = uid
     else:
         raise ProtocolError(
             'uid should be a 32-byte binary digest (bytes).')
Beispiel #4
0
 def __init__(self, manager, uid: bytes):
     self._workflow_manager = weakref.ref(manager)
     if isinstance(uid, bytes) and len(uid) == 32:
         self._uid = uid
     else:
         raise ProtocolError('uid should be a 32-byte binary digest (bytes). '
                             f'Got {repr(uid)}')
Beispiel #5
0
def get_context():
    """Get a reference to the manager of the current workflow scope."""
    # TODO: Redocument and adjust semantics.
    # The contextvars and get_context should only be used in conjunction with
    # a workflow_scope() context manager that is explicitly not thread-safe, but
    # which can employ some checks for non-multi-threading access assumptions.
    # get_context() is used to determine the default workflow manager when *context*
    # is not provided to scalems object factories, scalems.run(), scalems.wait() and
    # (non-async) `result()` methods. Default *context* values are a user convenience
    # and so should only occur in the root thread for the UI / high-level scripting
    # interface.
    # Async coroutines can safely use get_context(), but should not use the
    # non-async workflow_scope() context manager for nested scopes without wrapping
    # in a contextvars.run().
    from scalems.workflow import Scope

    try:
        _scope: Scope = current_scope.get()
        current_context = _scope.current
        logger.debug(
            f'Scope queried with get_context() {repr(current_context)}')
        # This check is in case we use weakref.ref:
        if current_context is None:
            raise ProtocolError(
                'Context for current scope seems to have disappeared.')
    except LookupError:
        logger.debug('Scope was queried, but has not yet been set.')
        current_context = None
    return current_context
Beispiel #6
0
 def constructor_proxy_director(*args, **kwargs) -> ItemView:
     if not isinstance(item_type, type):
         raise ProtocolError(
             'This function is intended for a dispatching path on which *item_type* '
             'is a `type` object.')
     item = item_type(*args, **kwargs)
     director = workflow_item_director_factory(item, manager=manager, label=label)
     return director()
Beispiel #7
0
 def set_result(self, result):
     # Not thread-safe.
     if self._done.is_set():
         raise ProtocolError('Result is already set for {}.'.format(repr(self)))
     self._result = result
     self._done.set()
     logger.debug('Result set for {} in {}'.format(self.uid().hex(),
                                                   str(self._context())))
Beispiel #8
0
    def __init_subclass__(cls, **kwargs):
        assert cls is not BasicSerializable

        # Handle SCALE-MS Type registration.
        base = kwargs.pop('base_type', None)
        if base is not None:
            typeid = TypeIdentifier.copy_from(base)
        else:
            typeid = [str(cls.__module__)] + cls.__qualname__.split('.')
        registry = BasicSerializable._dtype.base
        if cls in registry and registry[cls] is not None:
            # This may be a customization or extension point in the future, but not today...
            raise ProtocolError(
                'Subclassing BasicSerializable for a Type that is already registered.'
            )
        BasicSerializable._dtype.base[cls] = typeid

        # Register encoder for all subclasses. Register the default encoder if not overridden.
        # Note: This does not allow us to retain the identity of *cls* for when we call the helpers.
        # We may require such information for encoder functions to know why they are being called.
        encoder = getattr(cls, 'encode', BasicSerializable.encode)
        PythonEncoder.register(cls, encoder)

        # Optionally, register a new decoder.
        # If no decoder is provided, use the basic decoder.
        if hasattr(cls, 'decode') and callable(cls.decode):
            _decoder = weakref.WeakMethod(cls.decode)

            # Note that we do not require that the decoded object is actually
            # an instance of cls.

            def _decode(encoded: dict):
                decoder = _decoder()
                if decoder is None:
                    raise ProtocolError(
                        'Decoding a type that has already been de-registered.')
                return decoder(encoded)

            PythonDecoder.register(cls._dtype, _decode)

        # TODO: Register optional instance initializer / input processor.
        # Allow instances to be created with something other than a single-argument
        # of the registered Input type.

        # TODO: Register/generate UI helper.
        # From the user's perspective, an importable module function interacts
        # with the WorkflowManager to add workflow items and return a handle.
        # Do we want to somehow generate an entry-point command

        # TODO: Register result dispatcher(s).
        # An AbstractDataSource must register a dispatcher to an implementation
        # that produces a ConcreteDataSource that provides the registered Result type.
        # A ConcreteDataSource must provide support for checksum calculation and verification.
        # Optionally, ConcreteDataSource may provide facilities to convert to/from
        # native Python objects or other types (such as .npz files).

        # Proceed dispatching along the MRO, per documented Python data model.
        super().__init_subclass__(**kwargs)
Beispiel #9
0
 def register(cls, dtype: typing.Type[DispatchT],
              handler: typing.Callable[[DispatchT], BaseEncodable]):
     # Note that we don't expect references to bound methods to extend the life of the type.
     # TODO: confirm this assumption in a unit test.
     if not isinstance(dtype, type):
         raise TypeError(
             'We use `isinstance(obj, dtype)` for dispatching, so *dtype* must be a `type` object.'
         )
     if dtype in cls._dispatchers:
         raise ProtocolError(
             f'Encodable type {dtype} appears to be registered already.')
     cls._dispatchers[dtype] = handler
Beispiel #10
0
    def __init__(self, manager: 'WorkflowManager', record: str):
        self._serialized_record = str(record)
        decoded_record = json.loads(self._serialized_record)

        self._uid = bytes.fromhex(decoded_record['uid'])
        if not len(self._uid) == 256 // 8:
            raise ProtocolError('UID is supposed to be a 256-bit hash digest. '
                                f'Got {repr(self._uid)}')
        self._done = asyncio.Event()
        self._result = None

        # As long as we are storing Tasks in the workflow, we cannot store
        # workflows in Tasks.
        self._context = weakref.ref(manager)
Beispiel #11
0
    def __init__(self, context, record):
        self._encoded = str(record)
        decoded_record = json.loads(self._encoded)

        self._uid = bytes.fromhex(decoded_record['uid'])
        if not len(self._uid) == 256 // 8:
            raise ProtocolError(
                'UID is supposed to be a 256-bit hash digest. Got {}'.format(
                    repr(self._uid)))
        self._done = asyncio.Event()
        self._result = None

        # As long as we are storing Tasks in the context, we cannot store contexts in Tasks.
        self._context = weakref.ref(context)
Beispiel #12
0
    def runtime_startup(self, runner_started: asyncio.Event) -> asyncio.Task:
        configuration: Configuration = self.configuration()
        self.runtime = _connect_rp(configuration)

        if self.runtime is None or self.runtime.session.closed:
            raise ProtocolError('Cannot process queue without a RP Session.')

        # Launch queue processor (proxy executor).
        # TODO: Make runtime_startup optional. Let it return a resource that is
        #  provided to the normalized run_executor(), or maybe use it to configure the
        #  Submitter that will be provided to the run_executor.
        runner_task = asyncio.create_task(
            scalems.execution.manage_execution(
                self, processing_state=runner_started))
        # TODO: Note the expected scope of the runner_task lifetime with respect to
        #  the global state changes (i.e. ContextVars and locks).
        return runner_task
Beispiel #13
0
    def __init__(self, *, loop: asyncio.AbstractEventLoop, executor_factory):
        """
        The event loop for the program should be launched in the root thread,
        preferably early in the application launch.
        Whether the WorkflowManager uses it directly,
        it is useful to require the client to provide the event loop,
        if for no other reason than to ensure that one exists.

        Args:
            loop: event loop, such as from asyncio.new_event_loop()
            executor_factory: Implementation-specific callable to get a run time work
            manager.
        """
        # We are moving towards a composed rather than a derived WorkflowManager Context.
        # Note that we can require the super().__init__() to be called in derived classes,
        # so it is not non-sensical for an abc.ABC to have an __init__ method.
        if not isinstance(loop, asyncio.AbstractEventLoop):
            raise TypeError(
                'Workflow manager requires an event loop object compatible with '
                'asyncio.AbstractEventLoop.')
        if loop.is_closed():
            raise ProtocolError(
                'Event loop does not appear to be ready to use.')
        logger.debug(
            f'{repr(self)} acquired event loop {repr(loop)} at loop time '
            f'{loop.time()}.')
        self._asyncio_event_loop = loop

        if not callable(executor_factory):
            raise TypeError('*executor_factory* argument must be a callable.')
        self._executor_factory = executor_factory

        # Basic Context implementation details
        # TODO: Tasks should only writable within a WorkflowEditor context.
        self.tasks = TaskMap()  # Map UIDs to task Futures.

        self._dispatcher: typing.Union[weakref.ref, None] = None
        self._dispatcher_lock = asyncio.Lock()

        self._event_hooks: typing.Mapping[
            str, typing.MutableSet[AddItemCallback]] = {
                'add_item': set()
            }
Beispiel #14
0
    async def _single_iteration_queue(self,
                                      source: _queue.SimpleQueue,
                                      target: asyncio.Queue):
        """Transfer one queue item.

        If a *stop* command is encountered, self-cancel after transfering command.

        To avoid race conditions while stopping queue processing,
        place a *stop* command in *source* and asyncio.shield() a call
        to this coroutine in a *try: ... except: ...* block.

        Note that the caller will then receive CancelledError after *stop* command has
        been transferred.

        Raises:
            queue.Empty if *source* is empty
            asyncio.CancelledError when cancelled or *stop* is received.

        """
        command: QueueItem = source.get_nowait()
        logger.debug(f'Processing command {repr(command)}')

        await target.put(command)

        # TODO: Use formal RPC protocol.
        if 'control' in command:
            # Note that we don't necessarily need to stop managing the dispatcher queue
            # at this point, but the Executor will be directed to shut down,
            # so we must not put anything else onto the command queue until we have a
            # new command queue or a new executor.
            if command['control'] == 'stop':
                raise asyncio.CancelledError()
            else:
                raise ProtocolError('Unknown command: {}'.format(command['control']))
        else:
            if 'add_item' not in command:
                # TODO: We might want a call-back or Event to force errors before the
                #  queue-runner task is awaited.
                raise MissingImplementationError(
                    f'Executor has no implementation for {str(command)}'
                )
        return command
Beispiel #15
0
    async def dispatch(self):
        """Enter the execution dispatching state."""
        if self.session is not None:
            raise ProtocolError('Dispatching context is not reentrant.')
        logger.debug('Entering PR dispatching context.')

        try:
            self.session = self.rp.Session()
            pmgr = self.rp.PilotManager(session=self.session)
            self.umgr = self.rp.UnitManager(session=self.session)
            pilot = pmgr.submit_pilots(
                self.rp.ComputePilotDescription(self.pilot_description))
            self.umgr.add_pilots(pilot)
            # Note: We should have an active session now, ready to receive tasks, but
            # no tasks have been submitted.
            # TODO: Process the queue of tasks submitted before entering the dispatcher context.
            yield self
        finally:
            logger.debug('Awaiting RP tasks.')
            await asyncio.wait(self.task_map.values())
            self.shutdown()
Beispiel #16
0
 def _decode(encoded: dict):
     decoder = _decoder()
     if decoder is None:
         raise ProtocolError(
             'Decoding a type that has already been de-registered.')
     return decoder(encoded)
Beispiel #17
0
 def register(cls, typeid: TypeIdentifier, handler: typing.Callable):
     # Normalize typeid
     typeid = TypeIdentifier.copy_from(typeid)
     if typeid in cls._dispatchers:
         raise ProtocolError('Type appears to be registered already.')
     cls._dispatchers[typeid] = handler
Beispiel #18
0
    async def dispatch(self):
        """Start the executor task, then provide a scope for concurrent activity.

        Provide the executor with any currently-managed work in a queue.
        While the scope is active, new work added to the queue will be picked up
        by the executor.

        When leaving the `with` block, trigger the executor clean-up and wait for its task to complete.

        .. todo:: Clarify re-entrance policy, thread-safety, etcetera, and enforce.

        .. todo:: Allow an externally provided dispatcher factory, or even a running dispatcher?

        """
        # 1. Install a hook to catch new calls to add_item (the dispatcher_queue) and try not to yield until the current workflow state is obtained.
        # 2. Get snapshot of current workflow state with which to initialize the dispatcher. (It is now okay to yield.)
        # 3. Bind a new executor to its queue.
        # 4. Bind a dispatcher to the executor and the dispatcher_queue.
        # 5. Allow the executor and dispatcher to start using the event loop.

        # Avoid race conditions while checking for a running dispatcher.
        async with self._dispatcher_lock:
            # Dispatching state may be reentrant, but it does not make sense to re-enter through this call structure.
            if self._dispatcher is not None:
                raise ProtocolError('Already dispatching through {}.'.format(repr(self._dispatcher())))
            # For an externally-provided dispatcher:
            #     else:
            #         self._dispatcher = weakref.ref(dispatcher)

            # 1. Install a hook to catch new calls to add_item
            if self._queue is not None:
                raise ProtocolError('Found unexpected dispatcher queue.')
            dispatcher_queue = queue.SimpleQueue()
            self._queue = dispatcher_queue

            # 2. Get snapshot of current workflow state with which to initialize the dispatcher.
            # TODO: Topologically sort DAG!
            initial_task_list = list(self.task_map.keys())
            #  It is now okay to yield.

            # 3. Bind a new executor to its queue.
            # Note: if there were a reason to decouple the executor lifetime from this scope,
            # we could consider a more object-oriented interface with it.
            executor_queue = asyncio.Queue()
            for key in initial_task_list:
                await executor_queue.put({'add_item': key})
            executor = run_executor(source_context=self, command_queue=executor_queue)

            # 4. Bind a dispatcher to the executor_queue and the dispatcher_queue.
            # TODO: We should bind the dispatcher directly to the executor, but that requires
            #  that we make an Executor class with concurrency-safe methods.
            # dispatcher = run_dispatcher(dispatcher_queue, executor_queue)
            # self._dispatcher = weakref.ref(dispatcher)
            # TODO: Toggle active dispatcher state.
            # scalems.context._dispatcher.set(...)

            # 5. Allow the executor and dispatcher to start using the event loop.
            executor_task = asyncio.create_task(executor)
            # asyncio.create_task(dispatcher)

        try:
            # We can surrender control here and leave the executor and dispatcher tasks running
            # while evaluating a `with` block suite for the `dispatch` context manager.
            yield

        except Exception as e:
            logger.exception('Uncaught exception while in dispatching context: {}'.format(str(e)))
            raise e

        finally:
            async with self._dispatcher_lock:
                self._dispatcher = None
                self._queue = None
            # dispatcher_queue.put({'control': 'stop'})
            # await dispatcher
            # TODO: Make sure the dispatcher hasn't died. Look for acknowledgement
            #  of receipt of the Stop command.
            # TODO: Check status...
            if not dispatcher_queue.empty():
                logger.error('Dispatcher finished while items remain in dispatcher queue. Approximate size: {}'.format(dispatcher_queue.qsize()))

            # Stop the executor.
            executor_queue.put_nowait({'control': 'stop'})
            await executor_task
            if executor_task.exception() is not None:
                raise executor_task.exception()

            # Check that the queue drained.
            # WARNING: The queue will never finish draining if executor_task fails.
            #  I.e. don't `await executor_queue.join()`
            if not executor_queue.empty():
                raise InternalError('Bug: Executor left tasks in the queue without raising an exception.')

            logger.debug('Exiting {} dispatch context.'.format(type(self).__name__))
Beispiel #19
0
    async def dispatch(self, dispatcher: 'Queuer' = None, params=None):
        """Enter the execution dispatching state.

        Attach to a dispatching executor, then provide a scope for concurrent activity.
        This is also the scope during which the RADICAL Pilot Session exists.

        Provide the executor with any currently-managed work in a queue.
        While the context manager is active, new work added to the queue will be picked up
        by the executor. When the context manager is exited, new work will resume
        queuing locally and the remote tasks will be resolved, then the dispatcher
        will be disconnected.

        Currently, we tie the lifetime of the dispatcher to this context manager.
        When leaving the `with` block, we trigger the executor to clean-up and wait for
        its task to complete.
        We may choose some other relationship in the future.

        Args:
            dispatcher: A queue processor that will subscribe to the add_item hook to
            feed the executor.
            params: a parameters object relevant to the execution back-end

        .. todo:: Clarify re-entrance policy, thread-safety, etcetera, and enforce.

        """

        # 1. Bind a new executor to its queue.
        # 2. Bind a dispatcher to the executor.
        # 3. Enter executor context.
        # 4. Enter dispatcher context.
        #         # 1. (While blocking event loop in UI thread) Install a hook
        #              for the queuer to catch new calls to add_item (the
        #              dispatcher_queue).
        #         # 2. Get snapshot of current workflow state with which to initialize
        #              the executor. (Unblock.)
        #         # 3. Spool workflow snapshot to executor.
        #         # 4. Start dispatcher queue runner.
        #         # 5. Yield.
        # 5. Exit dispatcher context.
        # 6. Exit executor context.
        # TODO: Add lock context for WorkflowManager event hooks
        #  rather than assume the UI and event loop are always in the same thread.

        executor = self._executor_factory(manager=self, params=params)

        # Avoid race conditions while checking for a running dispatcher.
        # TODO: Clarify dispatcher state machine and remove/replace assertions.
        # Warning: The dispatching protocol is immature.
        # Initially, we don't expect contention for the lock,
        # and if there is contention, it probably represents
        # an unintended race condition or systematic dead-lock.
        assert not self._dispatcher_lock.locked()
        async with self._dispatcher_lock:
            # Dispatching state may be reentrant, but it does not make sense to
            # re-enter through this call structure.
            if self._dispatcher is not None:
                raise ProtocolError(
                    f'Already dispatching through {repr(self._dispatcher())}.')
            if dispatcher is None:
                dispatcher = Queuer(source=self,
                                    command_queue=executor.queue(),
                                    dispatcher_lock=self._dispatcher_lock)
                self._dispatcher = dispatcher
            else:
                self._dispatcher = weakref.proxy(dispatcher)

        try:
            # Manage scope of executor operation with a context manager.
            # RP does not yet use an event loop, but we can use async context manager
            # for future compatibility with asyncio management of network connections,
            # etc.
            #
            # Note: the executor owns a rp.Session during operation.
            async with executor as dispatching_session:
                async with dispatcher:
                    # We can surrender control here and leave the executor and
                    # dispatcher tasks active while evaluating a `with` block suite
                    # for the `dispatch` context manager.
                    yield dispatching_session
                # Executor receives a *stop* command in __aexit__.

        except Exception as e:
            logger.exception(
                f'Uncaught exception while in dispatching context: {str(e)}')
            raise e

        finally:
            # Warning: The dispatching protocol is immature.
            # Initially, we don't expect contention for the lock,
            # and if there is contention, it probably represents
            # an unintended race condition or systematic dead-lock.
            # TODO: Clarify dispatcher state machine and remove/replace assertions.
            #       Be on the look-out for nested context managers and usage in
            #       `finally` blocks.
            assert not self._dispatcher_lock.locked()
            async with self._dispatcher_lock:
                self._dispatcher = None

            dispatcher_exception = dispatcher.exception()
            if dispatcher_exception:
                if isinstance(dispatcher_exception, asyncio.CancelledError):
                    logger.info('Dispatching queue processor cancelled.')
                else:
                    assert not isinstance(dispatcher_exception,
                                          asyncio.CancelledError)
                    logger.exception('Queuer encountered exception.',
                                     exc_info=dispatcher_exception)
            else:
                if not dispatcher.queue().empty():
                    logger.error(
                        'Queuer finished while items remain in dispatcher queue. '
                        'Approximate size: {}'.format(
                            dispatcher.queue().qsize()))

            executor_exception = executor.exception()
            if executor_exception:
                if isinstance(executor_exception, asyncio.CancelledError):
                    logger.info('Executor cancelled.')
                else:
                    assert not isinstance(executor_exception,
                                          asyncio.CancelledError)
                    logger.exception('Executor task finished with exception',
                                     exc_info=executor_exception)
            else:
                if not executor.queue().empty():
                    # TODO: Handle non-empty queue.
                    # There are various reasons that the queue might not be empty and
                    # we should clean up properly instead of bailing out or compounding
                    # exceptions.
                    # TODO: Check for extraneous extra *stop* commands.
                    logger.error(
                        'Bug: Executor left tasks in the queue without raising an '
                        'exception.')

            logger.debug('Exiting {} dispatch context.'.format(
                type(self).__name__))
Beispiel #20
0
async def run_executor(source_context: AsyncWorkflowManager, command_queue: asyncio.Queue):
    """Process workflow messages until a stop message is received.

    Initial implementation processes commands serially without regard for possible
    concurrency.

    Towards concurrency:
        We can create all tasks without awaiting any of them.

        Some tasks will be awaiting results from other tasks.

        All tasks will be awaiting a asyncio.Lock or asyncio.Condition for each
        required resource, but must do so indirectly.

        To avoid dead-locks, we can't have a Lock object for each resource unless
        they are managed by an intermediary that can do some serialization of requests.
        In other words, we need a Scheduler that tracks the resource pool, packages
        resource locks only when they can all be acquired without race conditions or blocking,
        and which then notifies the Condition for each task that it is allowed to run.

        It should not do so until the dependencies of the task are known to have
        all of the resources they need to complete (running with any dynamic dependencies
        also running) and, preferably, complete.

        Alternatively, the Scheduler can operate in blocks, allocating all resources,
        offering the locks to tasks, waiting for all resources to be released, then repeating.
        We can allow some conditions to "wake up" the scheduler to back fill a block
        of resources, but we should be careful with that.

        (We still need to consider dynamic tasks that
        generate other tasks. I think the only way to distinguish tasks which can't be
        dynamic from those which might be would be with the `def` versus `async def` in
        the implementing function declaration. If we abstract `await` with `scalems.wait`,
        we can throw an exception at execution time after checking a ContextVar.
        It may be better to just let implementers use `await` for dynamically created tasks,
        but we need to make the same check if a function calls `.result()` or otherwise
        tries to create a dependency on an item that was not allocated resources before
        the function started executing. In a conservative first draft, we can simply
        throw an exception if a non-`async def` function attempts to call a scalems workflow
        command like add_item while in an executing context.)

    """
    # Could also accept a "stop" Event object, but we would need some other way to yield
    # on an empty queue.
    while True:
        command = await command_queue.get()
        try:
            logger.debug('Executor is handling {}'.format(repr(command)))

            # TODO: Use formal RPC protocol.
            if 'control' in command:
                if command['control'] == 'stop':
                    return
                else:
                    raise ProtocolError('Unknown command: {}'.format(command['control']))
            if 'add_item' not in command:
                raise MissingImplementationError('Executor has no implementation for {}'.format(str(command)))
            key = command['add_item']
            item = source_context.item(key)
            if not isinstance(item, scalems.context.Task):
                raise InternalError('Expected {}.item() to return a scalems.context.Task'.format(repr(source_context)))

            # TODO: Ensemble handling
            item_shape = item.description().shape()
            if len(item_shape) != 1 or item_shape[0] != 1:
                raise MissingImplementationError('Executor cannot handle multidimensional tasks yet.')

            # TODO: Automatically resolve resource types.
            task_type_identifier = item.description().type().identifier()
            if task_type_identifier != 'scalems.subprocess.SubprocessTask':
                raise MissingImplementationError('Executor does not have an implementation for {}'.format(str(task_type_identifier)))
            task_type = scalems.subprocess.SubprocessTask()

            # TODO: Use abstract input factory.
            logger.debug('Resolving input for {}'.format(str(item)))
            input_type = task_type.input_type()
            input_record = input_type(**item.input)
            input_resources = operations.input_resource_scope(context=source_context, task_input=input_record)

            # We need to provide a scope in which we guarantee the availability of resources,
            # such as temporary files provided for input, or other internally-generated
            # asyncio entities.
            async with input_resources as subprocess_input:
                logger.debug('Creating coroutine for {}'.format(task_type.__class__.__name__))
                # TODO: Use abstract task factory.
                coroutine = operations.subprocessCoroutine(subprocess_input)
                logger.debug('Creating asyncio Task for {}'.format(repr(coroutine)))
                awaitable = asyncio.create_task(coroutine)

                # TODO: Use abstract results handler.
                logger.debug('Waiting for task to complete.')
                result = await awaitable
                subprocess_exception = awaitable.exception()
                if subprocess_exception is not None:
                    logger.exception('subprocess task raised exception {}'.format(str(subprocess_exception)))
                    raise subprocess_exception
                logger.debug('Setting result for {}'.format(str(item)))
                item.set_result(result)
        finally:
            logger.debug('Releasing "{}" from command queue.'.format(str(command)))
            command_queue.task_done()