def F(doc, type, initial_fn=None):
    """ shortcut for creating fields """
    if type is list:
        return pod.Field(
            doc, type, initial_fn=type,
            assign_filter_list=afn_typed_const)
    else:
        return pod.Field(
            doc, type, pod.MANDATORY,
            assign_filter_list=afn_typed_const)
示例#2
0
class UnitValidationContext(pod.POD):
    """
    Helper class for validating units in a bigger context

    This class has two purposes:

    1) to allow the validated object to see "everything" (other units)
    2) to allow validators to share temporary data structures
       and to prevent O(N**2) complexity of some checks.
    """

    provider_list = pod.Field(
        "list of all the providers",
        list,
        pod.MANDATORY,
        assign_filter_list=[pod.typed,
                            pod.typed.sequence(IProvider1)])

    shared_cache = pod.Field("cached computations",
                             dict,
                             initial_fn=dict,
                             assign_filter_list=[pod.typed])

    def compute_shared(self, cache_key, func, *args, **kwargs):
        """
        Compute a shared helper.

        :param cache_key:
            Key to use to lookup the helper value
        :param func:
            Function that computes the helper value. The function is called
            with the context as the only argument
        :returns:
            Return value of func(self, *args, **kwargs) (possibly computed
            earlier).

        Compute something that can be shared by all the validation classes
        and units within one context. This allows certain validators to
        only compute expensive 'global' transformations of the context at most
        once.

        .. note::
            The caller is responsible for ensuring that ``args`` and ``kwargs``
            match the `cache_key` each time this function is called.
        """
        if cache_key not in self.shared_cache:
            self.shared_cache[cache_key] = func(*args, **kwargs)
        return self.shared_cache[cache_key]
class WordList(Node):
    """ node representing a list of words"""

    entries = pod.Field("a list of words", list, initial_fn=list,
                        assign_filter_list=[pod.typed,
                                            pod.typed.sequence(Node),
                                            pod.const])

    @staticmethod
    def parse(
        text: str, lineno: int=1, col_offset: int=0
    ) -> "WordList":
        """
        Parse a list of words.

        Words are naturally separated by whitespace. Words can be quoted using
        double quotes. Words can be optionally separated with commas although
        those are discarded and entirely optional.

        Some basic examples:

            >>> WordList.parse("foo, bar")
            WordList(entries=[Text(text='foo'), Text(text='bar')])
            >>> WordList.parse("foo,bar")
            WordList(entries=[Text(text='foo'), Text(text='bar')])
            >>> WordList.parse("foo,,,,bar")
            WordList(entries=[Text(text='foo'), Text(text='bar')])
            >>> WordList.parse("foo,,,,bar,,")
            WordList(entries=[Text(text='foo'), Text(text='bar')])

        Words can be quoted, this allows us to include all kinds of characters
        inside:

            >>> WordList.parse('"foo bar"')
            WordList(entries=[Text(text='foo bar')])

        One word of caution, since we use one (and not a very smart one at
        that) scanner, the equals sign is recognized and rejected as incorrect
        input.

            >>> WordList.parse("=")
            WordList(entries=[Error(msg="Unexpected input: '='")])

        """
        entries = []
        scanner = WordScanner(text)
        while True:
            token, lexeme = scanner.get_token()
            if token == scanner.TOKEN_EOF:
                break
            elif token == scanner.TokenEnum.COMMA:
                continue
            elif token == scanner.TokenEnum.WORD:
                entries.append(Text(lineno, col_offset, lexeme))
            else:
                entries.append(
                    Error(lineno, col_offset,
                          "Unexpected input: {!r}".format(lexeme)))
        return WordList(lineno, col_offset, entries)
class Node(pod.POD):
    """ base node type """
    lineno = pod.Field("Line number (1-based)",
                       int,
                       0,
                       assign_filter_list=[pod.typed, not_negative, pod.const])
    col_offset = pod.Field(
        "Column offset (0-based)",
        int,
        0,
        assign_filter_list=[pod.typed, not_negative, pod.const])

    def __repr__(self):
        return "{}({})".format(
            self.__class__.__name__, ', '.join([
                '{}={!r}'.format(field.name, getattr(self, field.name))
                for field in self.__class__.field_list
                if field.name not in ('lineno', 'col_offset')
            ]))

    def visit(self, visitor: 'Visitor'):
        """
        Visit all of the sub-nodes reachable from this node

        :param visitor:
            Visitor object that gets to explore this and all the other nodes
        :returns:
            The return value of the visitor's :meth:`Visitor.visit()` method,
            if any.  The default visitor doesn't return anything.

        """
        return visitor.visit(self)

    def enumerate_entries(self) -> "Generator[node]":
        for field in self.__class__.field_list:
            obj = field.__get__(self, self.__class__)
            if isinstance(obj, Node):
                yield obj
            elif isinstance(obj, list):
                for list_item in obj:
                    if isinstance(list_item, Node):
                        yield list_item
class OverrideFieldList(Node):
    """ node representing a whole plainbox field override list"""

    entries = pod.Field("a list of comments and patterns", list,
                        initial_fn=list, assign_filter_list=[
                            pod.typed, pod.typed.sequence(Node), pod.const])

    @staticmethod
    def parse(
        text: str, lineno: int=1, col_offset: int=0
    ) -> "OverrideFieldList":
        entries = []
        initial_lineno = lineno
        # NOTE: lineno is consciously shadowed below
        for lineno, line in enumerate(text.splitlines(), lineno):
            entries.append(FieldOverride.parse(line, lineno, col_offset))
        return OverrideFieldList(initial_lineno, col_offset, entries)
示例#6
0
class JobResultBuilder(pod.POD):

    """A builder for job result objects."""

    outcome = pod.Field(
        'outcome of a test',
        str, pod.UNSET, assign_filter_list=[pod.unset_or_typed])
    execution_duration = pod.Field(
        'time of test execution',
        float, pod.UNSET, assign_filter_list=[pod.unset_or_typed])
    comments = pod.Field(
        'comments from the test operator',
        str, pod.UNSET, assign_filter_list=[pod.unset_or_typed])
    return_code = pod.Field(
        'return code from the (optional) test process',
        int, pod.UNSET, assign_filter_list=[pod.unset_or_typed])
    io_log = pod.Field(
        'history of the I/O log of the (optional) test process',
        list, pod.UNSET, assign_filter_list=[
            pod.unset_or_typed, pod.unset_or_typed.sequence(tuple)])
    io_log_filename = pod.Field(
        'path to a structured I/O log file of the (optional) test process',
        str, pod.UNSET, assign_filter_list=[pod.unset_or_typed])

    def add_comment(self, comment):
        """
        Add a new comment.

        The comment is safely combined with any prior comments.
        """
        if self.comments is pod.UNSET:
            self.comments = comment
        else:
            self.comments += '\n' + comment

    @raises(ValueError)
    def get_result(self):
        """
        Use the current state of the builder to create a new result.

        :returns:
            A new MemoryJobResult or DiskJobResult with all the data
        :raises ValueError:
            If both io_log and io_log_filename were used.
        """
        if not (self.io_log_filename is pod.UNSET or self.io_log is pod.UNSET):
            raise ValueError(
                "you can use only io_log or io_log_filename at a time")
        if self.io_log_filename is not pod.UNSET:
            cls = DiskJobResult
        else:
            cls = MemoryJobResult
        return cls(self.as_dict())
class IncludeStmtList(Node):
    """ node representing a list of include statements"""

    entries = pod.Field(
        "a list of include statements",
        list,
        initial_fn=list,
        assign_filter_list=[pod.typed,
                            pod.typed.sequence(Node), pod.const])

    @staticmethod
    def parse(text: str,
              lineno: int = 1,
              col_offset: int = 0) -> "IncludeStmtList":
        """
        Parse a multi-line ``include`` field.

        This field is a simple list of :class:`IncludeStmt` with the added
        twist that empty lines (including lines containing just irrelevant
        white-space or comments) are silently ignored.


        Example:
            >>> IncludeStmtList.parse('''
            ...                       foo
            ...                       # comment
            ...                       bar''')
            ... # doctest: +NORMALIZE_WHITESPACE
            IncludeStmtList(entries=[IncludeStmt(pattern=ReFixed(text='foo'),
                                                 overrides=[]),
                                     IncludeStmt(pattern=ReFixed(text='bar'),
                                                 overrides=[])])
        """
        entries = []
        initial_lineno = lineno
        # NOTE: lineno is consciously shadowed below
        for lineno, line in enumerate(text.splitlines(), lineno):
            if WordScanner(line).get_token()[0] == WordScanner.TOKEN_EOF:
                # XXX: hack to work around the fact that each line is scanned
                # separately so there is no way to naturally progress to the
                # next line yet.
                continue
            entries.append(IncludeStmt.parse(line, lineno, col_offset))
        return IncludeStmtList(initial_lineno, col_offset, entries)
class CompositeQualifier(pod.POD):
    """
    A JobQualifier that has qualifies jobs matching any inclusive qualifiers
    while not matching all of the exclusive qualifiers
    """

    qualifier_list = pod.Field("qualifier_list", list, pod.MANDATORY)

    @property
    def is_primitive(self):
        return False

    def designates(self, job):
        return self.get_vote(job) == IJobQualifier.VOTE_INCLUDE

    def get_vote(self, job):
        """
        Get one of the ``VOTE_IGNORE``, ``VOTE_INCLUDE``, ``VOTE_EXCLUDE``
        votes that this qualifier associated with the specified job.

        :param job:
            A IJobDefinition instance that is to be visited
        :returns:
            * ``VOTE_INCLUDE`` if the job matches at least one qualifier voted
              to select it and no qualifiers voted to deselect it.
            * ``VOTE_EXCLUDE`` if at least one qualifier voted to deselect it
            * ``VOTE_IGNORE`` otherwise or if the list of qualifiers is empty.

        .. versionadded: 0.5
        """
        if self.qualifier_list:
            return min([
                qualifier.get_vote(job)
                for qualifier in self.qualifier_list])
        else:
            return IJobQualifier.VOTE_IGNORE

    def get_primitive_qualifiers(self):
        return get_flat_primitive_qualifier_list(self.qualifier_list)

    @property
    def origin(self):
        raise NonPrimitiveQualifierOrigin
class WellKnownDirsHelper(pod.POD):
    """
    Helper class that knows about well known directories for SessionStorage.

    This class simply gets rid of various magic directory names that we
    associate with session storage. It also provides a convenience utility
    method :meth:`populate()` to create all of those directories, if needed.
    """

    storage = pod.Field(
        doc="SessionStorage associated with this helper",
        type=SessionStorage,
        initial=pod.MANDATORY,
        assign_filter_list=[pod.const, pod.typed])

    def populate(self):
        """
        Create all of the well known directories that are expected to exist
        inside a freshly created session storage directory
        """
        for dirname in self.all_directories:
            if not os.path.exists(dirname):
                os.makedirs(dirname)

    @property
    def all_directories(self):
        """
        a list of all well-known directories
        """
        return [self.io_log_pathname]

    @property
    def io_log_pathname(self):
        """
        full path of the directory where per-job IO logs are stored
        """
        return os.path.join(self.storage.location, "io-logs")
示例#10
0
class SessionManager(pod.POD):
    """
    Manager class for coupling SessionStorage with SessionState.

    This class allows application code to manage disk state of sessions. Using
    the :meth:`checkpoint()` method applications can create persistent
    snapshots of the :class:`~plainbox.impl.session.state.SessionState`
    associated with each :class:`SessionManager`.
    """

    device_context_list = pod.Field(
        doc="""
        A list of session device context objects

        .. note::
            You must not modify this field directly.

            This is not enforced but please use the
            :meth:`add_device_context()` or :meth:`remove_device_context()` if
            you want to manipulate the list.  Currently you cannot reorder the
            list of context objects.
        """,
        type=list,
        initial=pod.MANDATORY,
        assign_filter_list=[
            pod.typed,
            pod.typed.sequence(SessionDeviceContext), pod.const,
            at_most_one_context_filter
        ])

    storage = pod.Field(doc="A SesssionStorage instance",
                        type=SessionStorage,
                        initial=pod.MANDATORY,
                        assign_filter_list=[pod.typed, pod.const])

    def _on_test_plans_changed(self, old: "Any", new: "Any") -> None:
        self._propagate_test_plans()

    test_plans = pod.Field(doc="""
        Test plans that this session is processing.

        This field contains a tuple of test plans that are active in the
        session. Any changes here are propagated to each device context
        participating in the session. This in turn makes all of the overrides
        defined by those test plans effective.

        .. note::
            Currently there is no facitly that would allow to use this field to
            drive test execution. Such facility is likely to be added later.
        """,
                           type=tuple,
                           initial=(),
                           notify=True,
                           notify_fn=_on_test_plans_changed,
                           assign_filter_list=[
                               pod.typed,
                               pod.typed.sequence(TestPlanUnit), pod.unique
                           ])

    @property
    def default_device_context(self):
        """
        The default (first) session device context if available

        In single-device sessions this is the context that is used to execute
        every single job definition. Applications that use multiple devices
        must access and use the context list directly.

        .. note:
            The default context may be none if there are no context objects
            present in the session. This is never the case for applications
            using the single-device APIs.
        """
        return (self.device_context_list[0]
                if len(self.device_context_list) > 0 else None)

    @property
    def state(self):
        """
        :class:`~plainbox.impl.session.state.SessionState` associated with this
        manager
        """
        if self.default_device_context is not None:
            return self.default_device_context.state

    @classmethod
    def create(cls, repo=None, legacy_mode=False, prefix='pbox-'):
        """
        Create an empty session manager.

        This method creates an empty session manager. This is the most generic
        API that allows applications to freely work with any set of devices.

        Typically applications will use the :meth:`add_device_context()` method
        to add additional context objects at a later time. This method creates
        and populates the session storage with all of the well known
        directories (using :meth:`WellKnownDirsHelper.populate()`).

        :param repo:
            If specified then this particular repository will be used to create
            the storage for this session. If left out, a new repository is
            constructed with the default location.
        :ptype repo:
            :class:`~plainbox.impl.session.storage.SessionStorageRepository`.
        :param legacy_mode:
            Propagated to
            :meth:`~plainbox.impl.session.storage.SessionStorage.create()` to
            ensure that legacy (single session) mode is used.
        :ptype legacy_mode:
            bool
        :return:
            fresh :class:`SessionManager` instance
        """
        logger.debug("SessionManager.create()")
        if repo is None:
            repo = SessionStorageRepository()
        storage = SessionStorage.create(repo.location, legacy_mode, prefix)
        WellKnownDirsHelper(storage).populate()
        return cls([], storage)

    @classmethod
    def create_with_state(cls, state, repo=None, legacy_mode=False):
        """
        Create a session manager by wrapping existing session state.

        This method populates the session storage with all of the well known
        directories (using :meth:`WellKnownDirsHelper.populate()`)

        :param stage:
            A pre-existing SessionState object.
        :param repo:
            If specified then this particular repository will be used to create
            the storage for this session. If left out, a new repository is
            constructed with the default location.
        :ptype repo:
            :class:`~plainbox.impl.session.storage.SessionStorageRepository`.
        :param legacy_mode:
            Propagated to
            :meth:`~plainbox.impl.session.storage.SessionStorage.create()`
            to ensure that legacy (single session) mode is used.
        :ptype legacy_mode:
            bool
        :return:
            fresh :class:`SessionManager` instance
        """
        logger.debug("SessionManager.create_with_state()")
        if repo is None:
            repo = SessionStorageRepository()
        storage = SessionStorage.create(repo.location, legacy_mode)
        WellKnownDirsHelper(storage).populate()
        context = SessionDeviceContext(state)
        return cls([context], storage)

    @classmethod
    def create_with_unit_list(cls,
                              unit_list=None,
                              repo=None,
                              legacy_mode=False):
        """
        Create a session manager with a fresh session.

        This method populates the session storage with all of the well known
        directories (using :meth:`WellKnownDirsHelper.populate()`)

        :param unit_list:
            If specified then this will be the initial list of units known by
            the session state object.
        :param repo:
            If specified then this particular repository will be used to create
            the storage for this session. If left out, a new repository is
            constructed with the default location.
        :ptype repo:
            :class:`~plainbox.impl.session.storage.SessionStorageRepository`.
        :param legacy_mode:
            Propagated to
            :meth:`~plainbox.impl.session.storage.SessionStorage.create()`
            to ensure that legacy (single session) mode is used.
        :ptype legacy_mode:
            bool
        :return:
            fresh :class:`SessionManager` instance
        """
        logger.debug("SessionManager.create_with_unit_list()")
        if unit_list is None:
            unit_list = []
        state = SessionState(unit_list)
        if repo is None:
            repo = SessionStorageRepository()
        storage = SessionStorage.create(repo.location, legacy_mode)
        context = SessionDeviceContext(state)
        WellKnownDirsHelper(storage).populate()
        return cls([context], storage)

    @classmethod
    def load_session(cls, unit_list, storage, early_cb=None, flags=None):
        """
        Load a previously checkpointed session.

        This method allows one to re-open a session that was previously
        created by :meth:`SessionManager.checkpoint()`

        :param unit_list:
            List of all known units. This argument is used to reconstruct the
            session from a dormant state. Since the suspended data cannot
            capture implementation details of each unit reliably, actual units
            need to be provided externally. Unlike in :meth:`create_session()`
            this list really needs to be complete, it must also include any
            generated units.
        :param storage:
            The storage that should be used for this particular session.
            The storage object holds references to existing directories
            in the file system. When restoring an existing dormant session
            it is important to use the correct storage object, the one that
            corresponds to the file system location used be the session
            before it was saved.
        :ptype storage:
            :class:`~plainbox.impl.session.storage.SessionStorage`
        :param early_cb:
            A callback that allows the caller to "see" the session object
            early, before the bulk of resume operation happens. This method can
            be used to register callbacks on the new session before this method
            call returns. The callback accepts one argument, session, which is
            being resumed. This is being passed directly to
            :meth:`plainbox.impl.session.resume.SessionResumeHelper.resume()`
        :param flags:
            An optional set of flags that may influence the resume process.
            Currently this is an internal implementation detail and no "public"
            flags are provided. Passing None here is a safe equvalent of using
            this API before it was introduced.
        :raises:
            Anything that can be raised by
            :meth:`~plainbox.impl.session.storage.SessionStorage.
            load_checkpoint()` and :meth:`~plainbox.impl.session.suspend.
            SessionResumeHelper.resume()`
        :returns:
            Fresh instance of :class:`SessionManager`
        """
        logger.debug("SessionManager.load_session()")
        try:
            data = storage.load_checkpoint()
        except IOError as exc:
            if exc.errno == errno.ENOENT:
                state = SessionState(unit_list)
            else:
                raise
        else:
            state = SessionResumeHelper(unit_list, flags,
                                        storage.location).resume(
                                            data, early_cb)
        context = SessionDeviceContext(state)
        return cls([context], storage)

    def checkpoint(self):
        """
        Create a checkpoint of the session.

        After calling this method you can later reopen the same session with
        :meth:`SessionManager.load_session()`.
        """
        logger.debug("SessionManager.checkpoint()")
        data = SessionSuspendHelper().suspend(self.state,
                                              self.storage.location)
        logger.debug(
            ngettext("Saving %d byte of checkpoint data to %r",
                     "Saving %d bytes of checkpoint data to %r", len(data)),
            len(data), self.storage.location)
        try:
            self.storage.save_checkpoint(data)
        except LockedStorageError:
            self.storage.break_lock()
            self.storage.save_checkpoint(data)

    def destroy(self):
        """
        Destroy all of the filesystem artifacts of the session.

        This basically calls
        :meth:`~plainbox.impl.session.storage.SessionStorage.remove()`
        """
        logger.debug("SessionManager.destroy()")
        self.storage.remove()

    def add_device_context(self, context):
        """
        Add a device context to the session manager

        :param context:
            The :class:`SessionDeviceContext` to add.
        :raises ValueError:
            If the context is already in the session manager or the device
            represented by that context is already present in the session
            manager.

        This method fires the :meth:`on_device_context_added()` signal
        """
        if any(other_context.device == context.device
               for other_context in self.device_context_list):
            raise ValueError(
                _("attmpting to add a context for device {} which is"
                  " already represented in this session"
                  " manager").format(context.device))
        if len(self.device_context_list) > 0:
            self._too_many_device_context_objects()
        self.device_context_list.append(context)
        self.on_device_context_added(context)
        return context

    def add_local_device_context(self):
        """
        Create and add a SessionDeviceContext that describes the local device.

        The local device is always the device executing plainbox. Other devices
        may execute jobs or parts of plainbox but they don't need to store or
        run the full plainbox code.
        """
        return self.add_device_context(SessionDeviceContext())

    def remove_device_context(self, context):
        """
        Remove an device context from the session manager

        :param unit:
            The :class:`SessionDeviceContext` to remove.

        This method fires the :meth:`on_device_context_removed()` signal
        """
        if context not in self.device_context_list:
            raise ValueError(
                _("attempting to remove a device context not present in this"
                  " session manager"))
        self.device_context_list.remove(context)
        self.on_device_context_removed(context)

    @morris.signal
    def on_device_context_added(self, context):
        """
        Signal fired when a session device context object is added
        """
        logger.debug(_("Device context %s added to session manager %s"),
                     context, self)
        self._propagate_test_plans()

    @morris.signal
    def on_device_context_removed(self, context):
        """
        Signal fired when a session device context object is removed
        """
        logger.debug(_("Device context %s removed from session manager %s"),
                     context, self)
        self._propagate_test_plans()

    def _too_many_device_context_objects(self):
        raise ValueError(
            _("session manager currently doesn't support sessions"
              " involving multiple devices (a.k.a multi-node testing)"))

    def _propagate_test_plans(self):
        logger.debug(_("Propagating test plans to all devices"))
        test_plans = self.test_plans
        for context in self.device_context_list:
            context.set_test_plan_list(test_plans)

    @property
    def exporter_map(self):
        """ Map from exporter id to the corresponding exporter unit. """
        exporter_map = OrderedDict()
        for unit in self.state.unit_list:
            if unit.Meta.name == 'exporter':
                support = unit.support
                if support:
                    exporter_map[unit.id] = support
        # Patch exporter map to expose short names
        legacy_mapping = {
            '2013.com.canonical.plainbox::global': 'global',
            '2013.com.canonical.plainbox::hexr': 'xml',
            '2013.com.canonical.plainbox::html': 'html',
            '2013.com.canonical.plainbox::json': 'json',
            '2013.com.canonical.plainbox::junit': 'junit',
            '2013.com.canonical.plainbox::rfc822': 'rfc822',
            '2013.com.canonical.plainbox::tar': 'tar',
            '2013.com.canonical.plainbox::text': 'text',
            '2013.com.canonical.plainbox::xlsx': 'xlsx'
        }
        for new_id, legacy_id in legacy_mapping.items():
            if new_id in exporter_map:
                exporter_map[legacy_id] = exporter_map[new_id]
        return exporter_map

    def create_exporter(self, exporter_id, option_list=(), strict=True):
        """
        Create an exporter object with the specified name and options.

        :param exporter_id:
            Identifier of the exporter unit (which must have been loaded
            into the session device context of the first device). For
            backwards compatibility this can also be any of the legacy
            identifiers ``xml``, ``html``, ``json``, ``rfc822``, ``text`` or
            ``xlsx``.
        :param option_list:
            (optional) A list of options to pass to the exporter. Each option
            is a string. Some strings may be of form 'key=value' but those are
            handled by each exporter separately. By default an empty tuple is
            used so no special options are enabled.
        :param strict:
            (optional) Strict mode, in this mode ``option_list`` must not
            contain any options that are unrecognized by the exporter. Since
            many options (but not all) are shared among various exporters,
            using non-strict mode might make it easier to use a single superset
            of options to all exporters and let them silently ignore those that
            they don't understand.
        :raises LookupError:
            If the exporter identifier cannot be found. Note that this might
            indicate that appropriate provider has not been loaded yet.
        :returns:
            A ISessionStateExporter instance with appropriate configuration.
        """
        exporter_support = self.exporter_map[exporter_id]
        if not strict:
            # In non-strict mode silently discard unsupported options.
            supported_options = frozenset(
                exporter_support.exporter_cls.supported_option_list)
            option_list = [
                item for item in option_list if item in supported_options
            ]
        return exporter_support.exporter_cls(option_list,
                                             exporter_unit=exporter_support)

    @classmethod
    @contextlib.contextmanager
    def get_throwaway_manager(cls, provider_list=None):
        """
        Create a temporary session manager.

        :param provider_list:
            (optional) A list of providers to put into the session manager. By
            default all known providers are added. You can use this argument to
            customize the behaviour beyond defaults.
        :returns:
            A new SessionManager object that will be destroyed when the context
            manager is left.

        This method can be used to create a throw-away session manager which is
        not really meant for running jobs but can be useful to access exporters
        and other objects stored in providers.
        """
        with tempfile.TemporaryDirectory() as tmp:
            repo = SessionStorageRepository(tmp)
            if provider_list is None:
                provider_list = get_providers()
            try:
                manager = cls.create(repo=repo)
                manager.add_local_device_context()
                device_context = manager.default_device_context
                for provider in provider_list:
                    device_context.add_provider(provider)
                yield manager
            finally:
                manager.destroy()
class IncludeStmt(Node):
    """ node representing a single include statement """

    pattern = F("the pattern used for selecting jobs", Re)
    overrides = pod.Field("list of overrides to apply", list, initial_fn=list,
                          assign_filter_list=[
                              pod.typed,
                              pod.typed.sequence(OverrideExpression),
                              pod.const])

    @staticmethod
    def parse(
        text: str, lineno: int=1, col_offset: int=0
    ) -> "Union[IncludeStmt, Error]":
        """
        Parse a single test plan include line

        Using correct syntax will result in a IncludeStmt node with
        appropriate data in the ``pattern`` and ``overrides`` fields. Note that
        ``pattern`` may be either a :class:`RePattern` or a :class:`ReFixed` or
        :class:`ReErr` which is not a valid pattern and cannot be used.
        Overrides are a list of :class:`OverrideExpression`. The list may
        contain incorrect, or duplicate values but that's up to higher-level
        analysis to check for.

        The whole overrides section is optional so a single pattern is a good
        include statement:

            >>> IncludeStmt.parse("usb.*")
            ... # doctest: +NORMALIZE_WHITESPACE
            IncludeStmt(pattern=RePattern(text='usb.*',
                                          re=re.compile('usb.*')),
                        overrides=[])

        Any number of key=value override pairs can be used using commas in
        between each pair:

            >>> IncludeStmt.parse("usb.* f1=o1")
            ... # doctest: +NORMALIZE_WHITESPACE
            IncludeStmt(pattern=RePattern(text='usb.*',
                                          re=re.compile('usb.*')),
                        overrides=[OverrideExpression(field=Text(text='f1'),
                                                      value=Text(text='o1'))])
            >>> IncludeStmt.parse("usb.* f1=o1, f2=o2")
            ... # doctest: +NORMALIZE_WHITESPACE
            IncludeStmt(pattern=RePattern(text='usb.*',
                                          re=re.compile('usb.*')),
                        overrides=[OverrideExpression(field=Text(text='f1'),
                                                      value=Text(text='o1')),
                                   OverrideExpression(field=Text(text='f2'),
                                                      value=Text(text='o2'))])
            >>> IncludeStmt.parse("usb.* f1=o1, f2=o2, f3=o3")
            ... # doctest: +NORMALIZE_WHITESPACE
            IncludeStmt(pattern=RePattern(text='usb.*',
                                          re=re.compile('usb.*')),
                        overrides=[OverrideExpression(field=Text(text='f1'),
                                                      value=Text(text='o1')),
                                   OverrideExpression(field=Text(text='f2'),
                                                      value=Text(text='o2')),
                                   OverrideExpression(field=Text(text='f3'),
                                                      value=Text(text='o3'))])

        Obviously some things can fail, the following examples show various
        error states that are possible. In each state an Error node is returned
        instead of the whole statement.

            >>> IncludeStmt.parse("")
            Error(msg='expected pattern')
            >>> IncludeStmt.parse("pattern field")
            Error(msg="expected '='")
            >>> IncludeStmt.parse("pattern field=")
            Error(msg='expected override value')
            >>> IncludeStmt.parse("pattern field=override junk")
            Error(msg="expected ','")
            >>> IncludeStmt.parse("pattern field=override, ")
            Error(msg='expected override field')
        """
        scanner = WordScanner(text)
        # PATTERN ...
        token, lexeme = scanner.get_token()
        if token != scanner.TokenEnum.WORD:
            return Error(lineno, col_offset, _("expected pattern"))
        pattern = Re.parse(lexeme, lineno, col_offset)
        overrides = []
        for i in itertools.count():
            # PATTERN FIELD ...
            token, lexeme = scanner.get_token()
            if token == scanner.TokenEnum.EOF and i == 0:
                # The whole override section is optional so the sequence may
                # end with EOF on the first iteration of the loop.
                break
            elif token != scanner.TokenEnum.WORD:
                return Error(lineno, col_offset, _("expected override field"))
            field = Text(lineno, col_offset, lexeme)
            # PATTERN FIELD = ...
            token, lexeme = scanner.get_token()
            if token != scanner.TokenEnum.EQUALS:
                return Error(lineno, col_offset, _("expected '='"))
            # PATTERN FIELD = VALUE ...
            token, lexeme = scanner.get_token()
            if token != scanner.TokenEnum.WORD:
                return Error(lineno, col_offset, _("expected override value"))
            value = Text(lineno, col_offset, lexeme)
            expr = OverrideExpression(lineno, col_offset, field, value)
            overrides.append(expr)
            # is there any more?
            # PATTERN FIELD = VALUE , ...
            token, lexeme = scanner.get_token()
            if token == scanner.TokenEnum.COMMA:
                # (and again)
                continue
            elif token == scanner.TokenEnum.EOF:
                break
            else:
                return Error(lineno, col_offset, _("expected ','"))
        return IncludeStmt(lineno, col_offset, pattern, overrides)
示例#12
0
class JobReadinessInhibitor(pod.POD):
    """
    Class representing the cause of a job not being ready to execute.

    It is intended to be consumed by UI layers and to provide them with enough
    information to render informative error messages or other visual feedback
    that will aid the user in understanding why a job cannot be started.

    There are four possible not ready causes:

        UNDESIRED:
            This job was not selected to run in this session

        PENDING_DEP:
           This job depends on another job which was not started yet

        FAILED_DEP:
            This job depends on another job which was started and failed

        PENDING_RESOURCE:
            This job has a resource requirement expression that uses a resource
            produced by another job which was not started yet

        FAILED_RESOURCE:
            This job has a resource requirement that evaluated to a false value

    All causes apart from UNDESIRED use the related_job property to encode a
    job that is related to the problem. The PENDING_RESOURCE and
    FAILED_RESOURCE causes also store related_expression that describes the
    relevant requirement expression.

    There are three attributes that can be accessed:

        cause:
            Encodes the reason why a job is not ready, see
            :class:`InhibitionCause`.

        related_job:
            Provides additional context for the problem. This is not the job
            that is affected, rather, the job that is causing the problem.

        related_expression:
            Provides additional context for the problem caused by a failing
            resource expression.
    """

    # XXX: PENDING_RESOURCE is not strict, there are multiple states that are
    # clumped here which is something I don't like. A resource may be still
    # "pending" as in PENDING_DEP (it has not ran yet) or it could have ran but
    # failed to produce any data, it could also be prevented from running
    # because it has unmet dependencies. In essence it tells us nothing about
    # if related_job.can_start() is true or not.
    #
    # XXX: FAILED_RESOURCE is "correct" but somehow misleading, FAILED_RESOURCE
    # is used to represent a resource expression that evaluated to a non-True
    # value

    cause = pod.Field(doc="cause (constant) of the inhibitor",
                      type=InhibitionCause,
                      initial=pod.MANDATORY,
                      assign_filter_list=[
                          cause_convert_assign_filter,
                          pod.read_only_assign_filter
                      ])

    related_job = pod.Field(doc="an (optional) job reference",
                            type=JobDefinition,
                            assign_filter_list=[pod.read_only_assign_filter])

    related_expression = pod.Field(
        doc="an (optional) resource expression reference",
        type=ResourceExpression,
        assign_filter_list=[pod.read_only_assign_filter])

    def __init__(self, cause, related_job=None, related_expression=None):
        """
        Initialize a new inhibitor with the specified cause.

        If cause is other than UNDESIRED a related_job is necessary. If cause
        is either PENDING_RESOURCE or FAILED_RESOURCE related_expression is
        necessary as well. A ValueError is raised when this is violated.
        """
        super().__init__(cause, related_job, related_expression)
        if (self.cause != InhibitionCause.UNDESIRED
                and self.related_job is None):
            raise ValueError(
                # TRANSLATORS: please don't translate related_job, None and
                # cause
                _("related_job must not be None when cause is {}").format(
                    self.cause.name))
        if (self.cause in (InhibitionCause.PENDING_RESOURCE,
                           InhibitionCause.FAILED_RESOURCE)
                and self.related_expression is None):
            raise ValueError(
                _(
                    # TRANSLATORS: please don't translate related_expression, None
                    # and cause.
                    "related_expression must not be None when cause is {}").
                format(self.cause.name))

    def __repr__(self):
        """Get a custom debugging representation of an inhibitor."""
        return "<{} cause:{} related_job:{!r} related_expression:{!r}>".format(
            self.__class__.__name__, self.cause.name, self.related_job,
            self.related_expression)

    def __str__(self):
        """Get a human-readable text representation of an inhibitor."""
        if self.cause == InhibitionCause.UNDESIRED:
            # TRANSLATORS: as in undesired job
            return _("undesired")
        elif self.cause == InhibitionCause.PENDING_DEP:
            return _("required dependency {!r} did not run yet").format(
                self.related_job.id)
        elif self.cause == InhibitionCause.FAILED_DEP:
            return _("required dependency {!r} has failed").format(
                self.related_job.id)
        elif self.cause == InhibitionCause.PENDING_RESOURCE:
            return _("resource expression {!r} could not be evaluated because"
                     " the resource it depends on did not run yet").format(
                         self.related_expression.text)
        else:
            assert self.cause == InhibitionCause.FAILED_RESOURCE
            return _("resource expression {!r} evaluates to false").format(
                self.related_expression.text)
示例#13
0
class JobState(pod.POD):
    """
    Class representing the state of a job in a session.

    Contains the following basic properties of each job:

        * the readiness_inhibitor_list that prevent the job form starting
        * the result (outcome) of the run (IJobResult)
        * the effective category identifier
        * the effective certification status
        * the job that was used to create it (via_job)

    For convenience (to SessionState implementation) it also has a reference to
    the job itself.  This class is a pure state holder an will typically
    collaborate with the SessionState class and the UI layer.
    """

    job = pod.Field(doc="the job associated with this state",
                    type=JobDefinition,
                    initial=pod.MANDATORY,
                    assign_filter_list=[job_assign_filter])

    readiness_inhibitor_list = pod.Field(
        doc="the list of readiness inhibitors of the associated job",
        type="List[JobReadinessInhibitor]",
        initial_fn=lambda: [UndesiredJobReadinessInhibitor])

    result = pod.Field(doc="the result of running the associated job",
                       type=IJobResult,
                       initial_fn=lambda: MemoryJobResult({}),
                       notify=True)

    result_history = pod.Field(
        doc="a tuple of result_history of the associated job",
        type=tuple,
        initial=(),
        notify=True,
        assign_filter_list=[pod.typed,
                            pod.typed.sequence(IJobResult)])

    via_job = pod.Field(doc="the parent job definition",
                        type=JobDefinition,
                        assign_filter_list=[job_via_assign_filter])

    effective_category_id = OverridableJobField(
        job_field="category_id",
        doc="the effective categorization of this test in a session",
        type=str)

    effective_certification_status = OverridableJobField(
        job_field="certification_status",
        doc="the effective certification status of this job",
        type=str)

    # NOTE: the `result` property just exposes the last result from the
    # `result_history` tuple above. The API is used everywhere so it should not
    # be broken in any way but the way forward is the sequence stored in
    # `result_history`.
    #
    # The one particularly annoying part of this implementation is that each
    # job state always has at least one result. Even if there was no testing
    # done yet. This OUTCOME_NONE result needs to be filtered out at various
    # times. I think it would be better if we could not have it in the
    # sequence-based API anymore. Otherwise each test will have two
    # result_history (more if you count things like resuming a session).

    @result.change_notifier
    def _result_changed(self, old, new):
        # Don't track the initial assignment over UNSET
        if old is pod.UNSET:
            return
        assert new != old
        assert isinstance(new, IJobResult)
        if new.is_hollow:
            return
        logger.debug("Appending result %r to history: %r", new,
                     self.result_history)
        self.result_history += (new, )

    def can_start(self):
        """Quickly check if the associated job can run right now."""
        return len(self.readiness_inhibitor_list) == 0

    def get_readiness_description(self):
        """Get a human readable description of the current readiness state."""
        if self.readiness_inhibitor_list:
            return _("job cannot be started: {}").format(", ".join(
                (str(inhibitor)
                 for inhibitor in self.readiness_inhibitor_list)))
        else:
            return _("job can be started")

    def apply_overrides(self, override_list: "List[Tuple[str, Any]]"):
        """
        Apply overrides to effective jop values.

        This method is automatically called by :class:`SessionDeviceContext`
        to implement effective overrides originating from test plan data.

        :param override_list:
            A list, as exposed by values of
            :attr:`TestPlanUnitSupport.override_list`, composed of a sequence
            of pairs ``(field, value)``, where ``field`` is the name of the
            field to override (without the prefix ``effective_``) and value is
            any valid value of that field.
        :raises AttributeError:
            If any of the ``field``s refer to an unknown field.
        :raises ValueError:
            If any of the ``field``s refer to fields that are not designated
            as overridable.
        :raises ValueError:
            If the ``value`` supplied is incorrect for the given field.
        :raises TypeError:
            If the type of the ``value`` supplied is incorrect for the given
            field.

        .. note::
            Consult field specification for details on what types and values
            are valid for that field.

        Example:

            >>> from plainbox.vendor.mock import Mock
            >>> job = Mock(spec=JobDefinition)
            >>> job_state = JobState(job)
            >>> job_state.apply_overrides([
            ...     ('category_id', 'new-category-id'),
            ...     ('certification_status', 'blocker')])
            >>> job_state.effective_category_id
            'new-category-id'
            >>> job_state.effective_certification_status
            'blocker'
        """
        for field, value in override_list:
            effective_field = 'effective_{}'.format(field)
            effective_field_obj = getattr(self.__class__, effective_field)
            if not isinstance(effective_field_obj, OverridableJobField):
                raise ValueError(_('{!r} is not overridable').format(field))
            setattr(self, effective_field, value)
        logger.debug("Applied overrides %r to job %r", override_list, self.job)
示例#14
0
class WhiteList(Node):
    """ node representing a whole plainbox whitelist """

    entries = pod.Field(
        "a list of comments and patterns",
        list,
        initial_fn=list,
        assign_filter_list=[pod.typed,
                            pod.typed.sequence(Node), pod.const])

    @staticmethod
    def parse(text: str, lineno: int = 1, col_offset: int = 0) -> "WhiteList":
        """
        Parse a plainbox *whitelist*

        Empty string is still a valid (though empty) whitelist

        >>> WhiteList.parse("")
        WhiteList(entries=[])

        White space is irrelevant and gets ignored if it's not of any
        semantic value. Since whitespace was never a part of the de-facto
        allowed pattern syntax one cannot create a job with " ".

        >>> WhiteList.parse("   ")
        WhiteList(entries=[])

        As soon as there's something interesting though, it starts to have
        meaning. Note that we differentiate the raw text ' a ' from the
        pattern object is represents '^namespace::a$' but at this time,
        when we parse the text this contextual, semantic information is not
        available and is not a part of the AST.

        >>> WhiteList.parse(" data ")
        WhiteList(entries=[ReFixed(text=' data ')])

        Data gets separated into line-based records.  Any number of lines
        may exist in a single whitelist.

        >>> WhiteList.parse("line")
        WhiteList(entries=[ReFixed(text='line')])

        >>> WhiteList.parse("line 1\\nline 2\\n")
        WhiteList(entries=[ReFixed(text='line 1'), ReFixed(text='line 2')])

        Empty lines are just ignored. You can re-create them by observing lack
        of continuity in the values of the ``lineno`` field.

        >>> WhiteList.parse("line 1\\n\\nline 3\\n")
        WhiteList(entries=[ReFixed(text='line 1'), ReFixed(text='line 3')])

        Data can be mixed with comments. Note that col_offset is finally
        non-zero here as the comments starts on the fourth character into the
        line:

        >>> WhiteList.parse("foo # pick foo")
        ... # doctest: +NORMALIZE_WHITESPACE
        WhiteList(entries=[ReFixed(text='foo '),
                           Comment(comment='# pick foo')])

        Comments can also exist without any data:

        >>> WhiteList.parse("# this is a comment")
        WhiteList(entries=[Comment(comment='# this is a comment')])

        Lastly, there are no *exceptions* at this stage, broken patterns are
        represented as such but no exceptions are ever raised:

        >>> WhiteList.parse("[]")
        ... # doctest: +ELLIPSIS
        WhiteList(entries=[ReErr(text='[]', exc=error('un...',))])
        """
        entries = []
        initial_lineno = lineno
        # NOTE: lineno is consciously shadowed below
        for lineno, line in enumerate(text.splitlines(), lineno):
            if '#' in line:
                cindex = line.index('#')
                comment = line[cindex:]
                data = line[:cindex]
            else:
                cindex = None
                comment = None
                data = line
            if not data.strip():
                data = None
            if data:
                entries.append(Re.parse(data, lineno, col_offset))
            if comment:
                entries.append(Comment(lineno, col_offset + cindex, comment))
        return WhiteList(initial_lineno, col_offset, entries)