Ejemplo n.º 1
0
def mdrun_dispatcher(context, *, input, label: str = None, **kwargs) -> typing.Type['AbstractOperation']:
    """Dispatch to an appropriate director based on the context and input.

    Runs appropriate director code to set up an operation, returning a handle to
    a simulation operation node.

    Arguments:
        context: Execution context in which to attempt to instantiate an operation from input
        input: operation input. Argument type is context dependent.
        label: human readable tag for the work node to be created.

    Additional key word arguments are passed on to the dispatched factory.

    """
    # TODO: This is a legacy import that should be updated.
    from .context import get_context
    if label is not None:
        raise exceptions.NotImplementedError('sorry... no labels yet')
    try:
        legacy_context = get_context(work=input)
    except Exception:
        legacy_context = None

    def run_session():
        with legacy_context as session:
            session.run()
        return True

    if context is not None and context is legacy_context:
        helper = function_wrapper(output={'data': bool})(run_session)
        return helper(**kwargs)
    else:
        raise exceptions.ValueError('Could not dispatch MD input {} with context {}'.format(input, legacy_context))
Ejemplo n.º 2
0
 def __init__(self, data=None):
     self._values = []
     self.dtype = None
     self.shape = ()
     if data is not None:
         if hasattr(data, 'result') or (
                 isinstance(data, collections.abc.Iterable)
                 and any([hasattr(item, 'result') for item in data])):
             raise exceptions.ValueError(
                 'Make a Future of type NDArray instead of NDArray of type Future, or call result() first.'
             )
         if isinstance(data, (str, bytes)):
             data = [data]
             length = 1
         else:
             try:
                 length = len(data)
             except TypeError:
                 # data is a scalar
                 length = 1
                 data = [data]
         self._values = data
         if length > 0:
             self.dtype = type(data[0])
             self.shape = (length, )
Ejemplo n.º 3
0
 def __init__(self, resource_manager, name, dtype):
     self.name = name
     if not isinstance(dtype, type):
         raise exceptions.ValueError('dtype argument must specify a type.')
     self.dtype = dtype
     # This abstraction anticipates that a Future might not retain a strong
     # reference to the resource_manager, but only to a facility that can resolve
     # the result() call. Additional aspects of the Future interface can be
     # developed without coupling to a specific concept of the resource manager.
     self._result = ResultGetter(resource_manager, name, dtype)
Ejemplo n.º 4
0
def concatenate_lists(sublists: list = ()):
    """Combine data sources into a single list.

    A trivial data flow restructuring operation
    """
    if isinstance(sublists, (str, bytes)):
        raise exceptions.ValueError('Input must be a list of lists.')
    if len(sublists) == 0:
        return []
    else:
        return append_list(sublists[0], concatenate_lists(sublists[1:]))
Ejemplo n.º 5
0
 def deserialize(serialized):
     import json
     workspec = WorkSpec()
     dict_representation = json.loads(to_string(serialized))
     ver_in = dict_representation['version']
     ver_out = workspec.version
     if ver_in != ver_out:
         message = "Expected work spec version {}. Got work spec version {}.".format(
             ver_out, ver_in)
         raise exceptions.ValueError(message)
     for element in dict_representation['elements']:
         workspec.elements[element] = dict_representation['elements'][
             element]
     return workspec
Ejemplo n.º 6
0
        def future(self, name: str = None, dtype=None):
            """Retrieve a Future for a named output.

            TODO: (FR5+) Normalize this part of the interface between operation definitions and
             resource managers.
            """
            if not isinstance(name, str) or name not in self._data:
                raise exceptions.ValueError('"name" argument must name an output.')
            assert dtype is not None
            if dtype != self._data[name].dtype:
                message = 'Requested Future of type {} is not compatible with available type {}.'
                message = message.format(dtype, self._data[name].dtype)
                raise exceptions.ApiError(message)
            return Future(self, name, dtype)
Ejemplo n.º 7
0
def append_list(a: list = (), b: list = ()):
    """Operation that consumes two lists and produces a concatenated single list."""
    # TODO: (FR4) Returned list should be an NDArray.
    if isinstance(a, (str, bytes)) or isinstance(b, (str, bytes)):
        raise exceptions.ValueError('Input must be a pair of lists.')
    try:
        list_a = list(a)
    except TypeError:
        list_a = list([a])
    try:
        list_b = list(b)
    except TypeError:
        list_b = list([b])
    return list_a + list_b
Ejemplo n.º 8
0
def to_string(input) -> str:
    """Return a Unicode string representation of ``input``.

    If ``input`` or its string representation is not already a Unicode object, attempt to decode as utf-8.

    Returns a native string, decoding utf-8 encoded byte sequences if necessary.
    """
    if isinstance(input, str):
        value = input
    else:
        try:
            value = input.decode('utf-8')
        except Exception:
            try:
                value = str(input)
            except Exception as e:
                raise exceptions.ValueError(
                    "Cannot find a string representation of input.") from e
    return value
Ejemplo n.º 9
0
def to_utf8(input) -> bytes:
    """Return a utf8 encoded byte sequence of the Unicode ``input`` or its string representation.

    Returns:
         :py:bytes byte sequence.
    """
    if isinstance(input, str):
        value = input.encode('utf-8')
    elif isinstance(input, bytes):
        value = input
    else:
        try:
            string = str(input)
            value = string.encode('utf-8')
        except Exception as e:
            raise exceptions.ValueError(
                "Input cannot be interpreted as a UTF-8 compatible string."
            ) from e
    return value
Ejemplo n.º 10
0
    def add_element(self, element):
        """Add an element to a work specification if possible.

        Adding an element to a WorkSpec must preserve the validity of the workspec, which involves several checks.
        We do not yet check for element uniqueness beyond a string name.

        If an element is added that was previously in another WorkSpec, it must first be removed from the
        other WorkSpec.
        """
        if hasattr(element, "namespace") and hasattr(
                element, "operation") and hasattr(element, "serialize"):
            if not hasattr(element, "name") or element.name is None or len(
                    str(element.name)) < 1:
                raise exceptions.UsageError(
                    "Only named elements may be added to a WorkSpec.")
            if element.name in self.elements:
                raise exceptions.UsageError(
                    "Elements in WorkSpec must be uniquely identifiable.")
            if hasattr(element, "depends"):
                for dependency in element.depends:
                    if not dependency in self.elements:
                        raise exceptions.UsageError(
                            "Element dependencies must already be specified before an Element may be added."
                        )
            # Okay, it looks like we have an element we can add
            if hasattr(
                    element, "workspec"
            ) and element.workspec is not None and element.workspec is not self:
                raise exceptions.Error(
                    "Element must be removed from its current WorkSpec to be added to this WorkSpec, but element "
                    "removal is not yet implemented.")
            self.elements[element.name] = element.serialize()
            element.workspec = self
        else:
            raise exceptions.ValueError(
                "Provided object does not appear to be compatible with gmx.workflow.WorkElement."
            )
        logger.info("Added element {} to workspec.".format(element.name))
Ejemplo n.º 11
0
def cli(command: NDArray,
        shell: bool,
        output: OutputCollectionDescription,
        stdin: str = ''):
    """Execute a command line program in a subprocess.

    Configure an executable in a subprocess. Executes when run in an execution
    Context, as part of a work graph or via gmx.run(). Runs in the current
    working directory.

    Shell processing is not enabled, but can be considered for a future version.
    This means that shell expansions such as environment variables, globbing (`*`),
    and other special symbols (like `~` for home directory) are not available.
    This allows a simpler and more robust implementation, as well as a better
    ability to uniquely identify the effects of a command line operation. If you
    think this disallows important use cases, please let us know.

    Arguments:
         command: a tuple (or list) to be the subprocess arguments, including `executable`
         output: mapping of command line flags to output filename arguments
         shell: unused (provides forward-compatibility)
         stdin (str): String input to send to STDIN (terminal input) of the executable.

    Multi-line text sent to *stdin* should be joined into a single string
    (e.g. ``'\n'.join(list_of_strings) + '\n'``).
    If multiple strings are provided to *stdin*, gmxapi will assume an ensemble,
    and will run one operation for each provided string.

    Only string input (:py:func:str) to *stdin* is currently supported.
    If you have a use case that requires streaming input or binary input,
    please open an issue or contact the author(s).

    Arguments are iteratively added to the command line with standard Python
    iteration, so you should use a tuple or list even if you have only one parameter.
    I.e. If you provide a string with `arguments="asdf"` then it will be passed as
    `... "a" "s" "d" "f"`. To pass a single string argument, `arguments=("asdf")`
    or `arguments=["asdf"]`.

    `input` and `output` should be a dictionary with string keys, where the keys
    name command line "flags" or options.

    Example:
        Execute a command named `exe` that takes a flagged option for file name
        (stored in a local Python variable `my_filename`) and an `origin` flag
        that uses the next three arguments to define a vector.

            >>> my_filename = "somefilename"
            >>> result = cli(('exe', '--origin', 1.0, 2.0, 3.0, '-f', my_filename), shell=False)
            >>> assert hasattr(result, 'file')
            >>> assert hasattr(result, 'erroroutput')
            >>> assert hasattr(result, 'returncode')

    Returns:
        A data structure with attributes for each of the results `file`, `erroroutput`, and `returncode`

    Result object attributes:
        * `file`: the mapping of CLI flags to filename strings resulting from the `output` kwarg
        * `erroroutput`: A string of error output (if any) if the process failed.
        * `returncode`: return code of the subprocess.

    """
    # Note: we could make provisions for stdio filehandles in a future version. E.g.
    # * STDOUT is available if a consuming operation is bound to `output.stdout`.
    # * STDERR is available if a consuming operation is bound to `output.stderr`.
    # * Otherwise, STDOUT and/or STDERR is(are) closed when command is called.

    # In the operation implementation, we expect the `shell` parameter to be intercepted by the
    # wrapper and set to False.
    if shell:
        raise exceptions.UsageError(
            "Operation does not support shell processing.")

    if stdin == '':
        stdin = None

    if isinstance(command, (str, bytes)):
        command = [command]
    command = list([arg for arg in command])

    executable = shutil.which(command[0])
    if executable is None:
        raise exceptions.ValueError(
            '"{}" is not found or not executable.'.format(command[0]))
    command[0] = executable

    # TODO: (FR9) Can OS input/output filehandles be a responsibility of
    #  the code providing 'resources'?

    erroroutput = ''
    logger.debug('executing subprocess')
    try:
        completed_process = subprocess.run(command,
                                           shell=shell,
                                           input=stdin,
                                           check=True,
                                           stdout=subprocess.PIPE,
                                           stderr=subprocess.STDOUT,
                                           encoding='utf-8',
                                           universal_newlines=True)
        returncode = completed_process.returncode
        # TODO: Resource management code should manage a safe data object for `output`.
        for line in completed_process.stdout.split('\n'):
            logger.debug(line)
    except subprocess.CalledProcessError as e:
        logger.info(
            "commandline operation had non-zero return status when calling {}".
            format(e.cmd))
        erroroutput = e.output
        returncode = e.returncode
    # Publish outputs.
    output.erroroutput = erroroutput
    output.returncode = returncode
Ejemplo n.º 12
0
def cli(command: NDArray, shell: bool, output: OutputCollectionDescription):
    """Execute a command line program in a subprocess.

    Configure an executable in a subprocess. Executes when run in an execution
    Context, as part of a work graph or via gmx.run(). Runs in the current
    working directory.

    Shell processing is not enabled, but can be considered for a future version.
    This means that shell expansions such as environment variables, globbing (`*`),
    and other special symbols (like `~` for home directory) are not available.
    This allows a simpler and more robust implementation, as well as a better
    ability to uniquely identify the effects of a command line operation. If you
    think this disallows important use cases, please let us know.

    Arguments:
         command : a tuple (or list) to be the subprocess arguments, including `executable`
         output : mapping of command line flags to output filename arguments
         shell : unused (provides forward-compatibility)

    Arguments are iteratively added to the command line with standard Python
    iteration, so you should use a tuple or list even if you have only one parameter.
    I.e. If you provide a string with `arguments="asdf"` then it will be passed as
    `... "a" "s" "d" "f"`. To pass a single string argument, `arguments=("asdf")`
    or `arguments=["asdf"]`.

    `input` and `output` should be a dictionary with string keys, where the keys
    name command line "flags" or options.

    Example:
        Execute a command named `exe` that takes a flagged option for file name
        (stored in a local Python variable `my_filename`) and an `origin` flag
        that uses the next three arguments to define a vector.

            >>> my_filename = "somefilename"
            >>> result = cli(('exe', '--origin', 1.0, 2.0, 3.0, '-f', my_filename), shell=False)
            >>> assert hasattr(result, 'file')
            >>> assert hasattr(result, 'erroroutput')
            >>> assert hasattr(result, 'returncode')

    Returns:
        A data structure with attributes for each of the results `file`, `erroroutput`, and `returncode`

    Result object attributes:
        * `file`: the mapping of CLI flags to filename strings resulting from the `output` kwarg
        * `erroroutput`: A string of error output (if any) if the process failed.
        * `returncode`: return code of the subprocess.

    """
    # Note: we could make provisions for stdio filehandles in a future version. E.g.
    # * STDOUT is available if a consuming operation is bound to `output.stdout`.
    # * STDERR is available if a consuming operation is bound to `output.stderr`.
    # * Otherwise, STDOUT and/or STDERR is(are) closed when command is called.
    #
    # Warning:
    #     Commands relying on STDIN cannot be used and is closed when command is called.

    # In the operation implementation, we expect the `shell` parameter to be intercepted by the
    # wrapper and set to False.
    if shell:
        raise exceptions.UsageError("Operation does not support shell processing.")

    if isinstance(command, (str, bytes)):
        command = [command]
    command = list([arg for arg in command])
    try:
        command[0] = shutil.which(command[0])
    except Exception:
        raise exceptions.ValueError('command argument could not be resolved to an executable file path.')

    # TODO: (FR9) Can OS input/output filehandles be a responsibility of
    #  the code providing 'resources'?

    erroroutput = ''
    logger.debug('executing subprocess')
    try:
        # TODO: If Python >=3.5 is required, switch to subprocess.run()
        command_output = subprocess.check_output(command,
                                                 shell=shell,
                                                 stdin=subprocess.DEVNULL,
                                                 stderr=subprocess.STDOUT,
                                                 )
        returncode = 0
        # TODO: Resource management code should manage a safe data object for `output`.
        # WARNING: We have no reason to assume the output is utf-8 encoded text!!!
        for line in command_output.decode('utf-8').split('\n'):
            logger.debug(line)
    except subprocess.CalledProcessError as e:
        logger.info("commandline operation had non-zero return status when calling {}".format(e.cmd))
        erroroutput = e.output.decode('utf-8')
        returncode = e.returncode
    # resources.output.erroroutput.publish(erroroutput)
    # resources.output.returncode.publish(returncode)
    # `publish` is descriptive, but redundant. Access to the output data handler is
    # assumed to coincide with publishing, and we assume data is published when the
    # handler is released. A class with a single `publish` method is overly complex
    # since we can just use the assignment operator.
    output.erroroutput = erroroutput
    output.returncode = returncode
Ejemplo n.º 13
0
def from_tpr(input=None, **kwargs):
    """Create a WorkSpec from a (list of) tpr file(s).

    Generates a work specification based on the provided simulation input and returns a handle to the
    MD simulation element of the workflow. Key word arguments can override simulation behavior from
    ``input``.

    If the MD operation discovers artifacts from a previous simulation that was launched from the same input,
    the simulation resumes from the last checkpointed step. If ``append_output`` is set ``False``, existing
    artifacts are kept separate from new output with the standard file naming convention,
    and new output begins from the last checkpointed step, if any.

    Setting ``end_time`` redefines the end point of the simulation trajectory from what was provided in
    ``input``. It is equivalent to changing the number of steps requested in the MDP (or TPR) input, but
    the time is provided as picoseconds instead of a number of time steps.

    .. deprecated:: 0.0.7
        If ``steps=N`` is provided and N is an integer
        greater than or equal to 1, the MD operation advances the trajectory by ``N`` steps, regardless of the number
        of simulation steps specified in ``input`` or ``end_time``. For convenience, setting ``steps=None`` does not override
        ``input``.
        Note that when it is not ``None``, ``steps`` takes precedence over ``end_time`` and ``input``, but can still be
        superceded by a signal, such as if an MD plugin or other code has a simulation completion condition that occurs
        before ``N`` additional steps have run.

    Where key word arguments correspond to ``gmx mdrun`` command line options, the corresponding flags are noted below.

    Keyword Arguments:
        input (str): *Required* string or list of strings giving the filename(s) of simulation input
        append_output (bool): Append output for continuous trajectories if True, truncate existing output data if False. (default True)
        end_time (float): Specify the final time in the simulation trajectory, overriding input read from TPR.
        grid (tuple): Domain decomposition grid divisions (nx, ny, nz). (-dd)
        max_hours (float): Terminate after 0.99 times this many hours if simulation is still running. (-maxh)
        pme_ranks (int): number of separate ranks to be used for PME electrostatics. (-npme)
        threads_per_pme_rank (int): Number of OpenMP threads per PME rank. (-ntomp_pme)
        steps (int): Override input files and run for this many steps. (-nsteps; deprecated)
        threads (int): Total number of threads to start. (-nt)
        threads_per_rank (int): number of OpenMP threads to start per MPI rank. (-ntomp)
        tmpi (int): number of thread-MPI ranks to start. (-ntmpi)

    ..  versionchanged:: 0.1
        *pme_threads_per_rank* renamed to *threads_per_pme_rank*.

    Returns:
        simulation member of a gmx.workflow.WorkSpec object

    Produces a WorkSpec with the following data::

        version: gmxapi_workspec_0_1
        elements:
            tpr_input:
                namespace: gromacs
                operation: load_tpr
                params: {'input': ['tpr_filename1', 'tpr_filename2', ...]}
            md_sim:
                namespace: gmxapi
                operation: md
                depends: ['tpr_input']
                params: {'kw1': arg1, 'kw2': arg2, ...}

    Bugs: version 0.0.6
        * There is not a way to programatically check the current step number on disk.
          See https://github.com/kassonlab/gmxapi/issues/56 and https://github.com/kassonlab/gmxapi/issues/85
    """
    import os

    usage = "argument to from_tpr() should be a valid filename or list of filenames, followed by optional key word arguments."

    # Normalize to tuple input type.
    if isinstance(input, list) or isinstance(input, tuple):
        tpr_list = tuple([to_string(element) for element in input])
    else:
        try:
            tpr_list = (to_string(input), )
        except:
            raise exceptions.UsageError(usage)

    # Check for valid filenames
    for arg in tpr_list:
        if not (os.path.exists(arg) and os.path.isfile(arg)):
            arg_path = os.path.abspath(arg)
            raise exceptions.UsageError(usage + " Got {}".format(arg_path))

    # Note: These are runner parameters, not MD parameters, and should be in the call to gmx.run() instead of here.
    # Reference https://github.com/kassonlab/gmxapi/issues/95
    params = {}
    for arg_key in kwargs:
        if arg_key == 'grid' or arg_key == 'dd':
            params['grid'] = tuple(kwargs[arg_key])
        elif arg_key == 'pme_ranks' or arg_key == 'npme':
            params['pme_ranks'] = int(kwargs[arg_key])
        elif arg_key == 'threads' or arg_key == 'nt':
            params['threads'] = int(kwargs[arg_key])
        elif arg_key == 'tmpi' or arg_key == 'ntmpi':
            params['tmpi'] = int(kwargs[arg_key])
        elif arg_key == 'threads_per_rank' or arg_key == 'ntomp':
            params['threads_per_rank'] = int(kwargs[arg_key])
        elif arg_key == 'pme_threads_per_rank' or arg_key == 'threads_per_pme_rank' or arg_key == 'ntomp_pme':
            # TODO: Remove this temporary accommodation.
            assert not gmx.version.api_is_at_least(0, 2)
            if arg_key == 'pme_threads_per_rank':
                warnings.warn(
                    "Key word pme_threads_per_rank has been renamed to threads_per_pme_rank.",
                    DeprecationWarning)
            params['threads_per_pme_rank'] = int(kwargs[arg_key])
        elif arg_key == 'steps' or arg_key == 'nsteps':
            if kwargs[arg_key] is None:
                # None means "don't override the input" which is indicated by a parameter value of -2 in GROMACS 2019
                steps = -2
            else:
                # Otherwise we require steps to be a positive integer
                try:
                    steps = int(kwargs[arg_key])
                    if steps < 1:
                        raise exceptions.ValueError(
                            'steps to run must be at least 1')
                except (TypeError, ValueError) as e:
                    # steps is not an integer.
                    raise exceptions.TypeError(
                        '"steps" could not be interpreted as an integer.')
                # The "nsteps" command line flag will be removed in GROMACS 2020
                # and so "steps" is deprecated in gmxapi 0.0.7
                warnings.warn(
                    "`steps` keyword argument is deprecated. Consider `end_time` instead.",
                    DeprecationWarning)
            params['steps'] = steps
        elif arg_key == 'max_hours' or arg_key == 'maxh':
            params['max_hours'] = float(kwargs[arg_key])
        elif arg_key == 'append_output':
            # Try not to encourage confusion with the `mdrun` `-noappend` flag, which would be a confusing double negative if represented as a bool.
            params['append_output'] = bool(kwargs[arg_key])
        elif arg_key == 'end_time':
            params[arg_key] = float(kwargs[arg_key])
        else:
            raise exceptions.UsageError(
                "Invalid key word argument: {}. {}".format(arg_key, usage))

    # Create an empty WorkSpec
    workspec = WorkSpec()

    # Create and add the Element for the tpr file(s)
    inputelement = WorkElement(namespace='gromacs',
                               operation='load_tpr',
                               params={'input': tpr_list})
    inputelement.name = 'tpr_input'
    if inputelement.name not in workspec.elements:
        # Operations such as this need to be replaced with accessors or properties that can check the validity of the WorkSpec
        workspec.elements[inputelement.name] = inputelement.serialize()
        inputelement.workspec = workspec

    # Create and add the simulation element
    # We can add smarter handling of the `depends` argument, but it is only critical to check when adding the element
    # to a WorkSpec.
    mdelement = WorkElement(operation='md',
                            depends=[inputelement.name],
                            params=params)
    mdelement.name = 'md_sim'
    # Check that the element has not already been added, but that its dependency has.
    workspec.add_element(mdelement)

    return mdelement
Ejemplo n.º 14
0
    def __chase_deps(self, source_set: Set[str], name_list: Iterable[Text]):
        """Helper to recursively generate dependencies before dependents.

        Given a set of WorkElement objects and a list of element names, generate WorkElements for
        the members of name_list plus their dependencies in an order such that dependencies are
        guaranteed to occur before their dependent elements.

        For example, to sequence an entire work specification into a reasonable order for instantiation, use

            >>> workspec.__chase_deps(set(workspec.elements.keys()), list(workspec.elements.keys()))

        Note: as a member function of WorkSpec, we have access to the full WorkSpec elements data at all
        times, giving us extra flexibility in implementation and arguments.

        Args:
            source_set: a (super)set of element names from the current work spec (will be consumed)
            name_list: subset of *sources* to be sequenced

        Returns:
            Sequence of WorkElement objects drawn from the names in *source_set*

        Requires that WorkElements named in *name_list* and any elements on which
        they depend are all named in *source_list* and available in the current
        work spec.

        Warning: *source_set* is a reference to an object that is modified arbitrarily.
        The caller should not re-use the object after calling _chase_deps().
        (Make a copy first, if needed.)

        TODO: Separate out DAG topology operations from here and Context.__enter__()
        Our needs are simple enough that we probably don't need an external dependency
        like networkx...
        """
        # Recursively (depth-first) generate a topologically valid serialized DAG from source_set.
        assert isinstance(source_set, set)
        if isinstance(name_list, (str, bytes)):
            warnings.warn(
                'name_list appears to be a single name. Disambiguate a string by passing a list or tuple.'
            )
        assert isinstance(name_list, collections.abc.Iterable)

        # Make a copy of name_list in case the input reference is being used elsewhere during
        # iteration, such as for source_set, which is modified during the loop.
        for name in tuple(name_list):
            assert isinstance(name, str)
            if name in source_set:
                source_set.remove(name)
                element = WorkElement.deserialize(self.elements[name],
                                                  name=name,
                                                  workspec=self)
                dependencies = element.depends
                # items in element.depends are either element names or ensembles of element names.
                for item in dependencies:
                    if isinstance(item, (list, tuple, set)):
                        dependency_list = item
                    else:
                        if not isinstance(item, str):
                            raise exceptions.ValueError(
                                'Dependencies should be a string or sequence of strings. Got {}'
                                .format(type(item)))
                        dependency_list = [item]
                    for dependency in dependency_list:
                        for recursive_dep in self.__chase_deps(
                                source_set, (dependency, )):
                            yield recursive_dep
                yield element
            else:
                # Note: The user is responsible for ensuring that source_set is complete.
                # Otherwise, we would need to maintain a list of elements previously yielded.
                pass