Exemplo n.º 1
0
def _hr(signum):
    name = None
    for key, value in six.iteritems(signal.__dict__):
        if value == signum:
            name = key
            break
    return name or "signal {0}".format(signum)
Exemplo n.º 2
0
 def _yield_result(self, attname, command):
     try:
         obj = getattr(self, attname)
         # Dicts need to try direct lookup or regex matching
         if isinstance(obj, dict):
             try:
                 obj = obj[command]
             except KeyError:
                 # TODO: could optimize by skipping this if not any regex
                 # objects in keys()?
                 for key, value in iteritems(obj):
                     if hasattr(key, "match") and key.match(command):
                         obj = value
                         break
                 else:
                     # Nope, nothing did match.
                     raise KeyError
         # Here, the value was either never a dict or has been extracted
         # from one, so we can assume it's an iterable of Result objects due
         # to work done by __init__.
         result = next(obj)
         # Populate Result's command string with what matched unless
         # explicitly given
         if not result.command:
             result.command = command
         return result
     except (AttributeError, IndexError, KeyError, StopIteration):
         raise_from(NotImplementedError(command), None)
Exemplo n.º 3
0
def _hr(signum):
    name = None
    for key, value in six.iteritems(signal.__dict__):
        if value == signum:
            name = key
            break
    return name or "signal {0}".format(signum)
Exemplo n.º 4
0
 def teardown(self):
     import sys
     from invoke.vendor.six import iteritems
     from _util import support
     # Strip any test-support task collections from sys.modules to prevent
     # state bleed between tests; otherwise tests can incorrectly pass
     # despite not explicitly loading/cd'ing to get the tasks they call
     # loaded.
     for name, module in iteritems(sys.modules.copy()):
         if module and support in getattr(module, '__file__', ''):
             del sys.modules[name]
Exemplo n.º 5
0
 def teardown(self):
     import sys
     from invoke.vendor.six import iteritems
     from _util import support
     # Strip any test-support task collections from sys.modules to prevent
     # state bleed between tests; otherwise tests can incorrectly pass
     # despite not explicitly loading/cd'ing to get the tasks they call
     # loaded.
     for name, module in iteritems(sys.modules.copy()):
         if module and support in getattr(module, '__file__', ''):
             del sys.modules[name]
Exemplo n.º 6
0
def clean_sys_modules():
    # TODO: _arguably_ it might be cleaner to register this as a 'finalizer'?
    # it's not like the yield isn't readable here - it's a fixture that only
    # performs teardown.
    yield
    # Strip any test-support task collections from sys.modules to prevent
    # state bleed between tests; otherwise tests can incorrectly pass
    # despite not explicitly loading/cd'ing to get the tasks they call
    # loaded.
    for name, module in iteritems(sys.modules.copy()):
        if module and support in getattr(module, '__file__', ''):
            del sys.modules[name]
Exemplo n.º 7
0
def clean_sys_modules():
    # TODO: _arguably_ it might be cleaner to register this as a 'finalizer'?
    # it's not like the yield isn't readable here - it's a fixture that only
    # performs teardown.
    yield
    # Strip any test-support task collections from sys.modules to prevent
    # state bleed between tests; otherwise tests can incorrectly pass
    # despite not explicitly loading/cd'ing to get the tasks they call
    # loaded.
    for name, module in iteritems(sys.modules.copy()):
        if module and support in getattr(module, '__file__', ''):
            del sys.modules[name]
Exemplo n.º 8
0
 def teardown(self):
     # Chdir back to project root to avoid problems
     os.chdir(os.path.join(os.path.dirname(__file__), '..'))
     # Nuke changes to environ
     os.environ.clear()
     os.environ.update(self.old_environ)
     # Strip any test-support task collections from sys.modules to prevent
     # state bleed between tests; otherwise tests can incorrectly pass
     # despite not explicitly loading/cd'ing to get the tasks they call
     # loaded.
     for name, module in iteritems(sys.modules.copy()):
         if module and support in getattr(module, '__file__', ''):
             del sys.modules[name]
Exemplo n.º 9
0
Arquivo: _util.py Projeto: yws/invoke
 def teardown(self):
     # Chdir back to project root to avoid problems
     os.chdir(os.path.join(os.path.dirname(__file__), '..'))
     # Nuke changes to environ
     os.environ.clear()
     os.environ.update(self.old_environ)
     # Strip any test-support task collections from sys.modules to prevent
     # state bleed between tests; otherwise tests can incorrectly pass
     # despite not explicitly loading/cd'ing to get the tasks they call
     # loaded.
     for name, module in iteritems(sys.modules.copy()):
         if module and support in getattr(module, '__file__', ''):
             del sys.modules[name]
Exemplo n.º 10
0
def clean_sys_modules():
    # TODO: _arguably_ it might be cleaner to register this as a 'finalizer'?
    # it's not like the yield isn't readable here - it's a fixture that only
    # performs teardown.
    yield
    # Strip any test-support task collections from sys.modules to prevent
    # state bleed between tests; otherwise tests can incorrectly pass
    # despite not explicitly loading/cd'ing to get the tasks they call
    # loaded.
    for name, module in iteritems(sys.modules.copy()):
        # Get some comparable __file__ path value, including handling cases
        # where it is None instead of undefined (seems new in Python 3.7?)
        if module and support in (getattr(module, "__file__", "") or ""):
            del sys.modules[name]
Exemplo n.º 11
0
        def _expect_response(self, **kwargs):
            """
            Execute a run() w/ ``watchers`` set from ``responses``.

            Any other ``**kwargs`` given are passed direct to ``_runner()``.

            :returns: The mocked ``write_proc_stdin`` method of the runner.
            """
            watchers = [
                Responder(pattern=key, response=value)
                for key, value in iteritems(kwargs.pop('responses'))
            ]
            kwargs['klass'] = klass = self._mock_stdin_writer()
            runner = self._runner(**kwargs)
            runner.run(_, watchers=watchers, hide=True)
            return klass.write_proc_stdin
Exemplo n.º 12
0
        def _expect_response(self, **kwargs):
            """
            Execute a run() w/ ``watchers`` set from ``responses``.

            Any other ``**kwargs`` given are passed direct to ``_runner()``.

            :returns: The mocked ``write_proc_stdin`` method of the runner.
            """
            watchers = [
                Responder(pattern=key, response=value)
                for key, value in iteritems(kwargs.pop('responses'))
            ]
            kwargs['klass'] = klass = self._mock_stdin_writer()
            runner = self._runner(**kwargs)
            runner.run(_, watchers=watchers, hide=True)
            return klass.write_proc_stdin
Exemplo n.º 13
0
 def supports_readonly_dict_protocols(self):
     # Use single-keypair dict to avoid sorting problems in tests.
     c = Config({'foo': 'bar'})
     c2 = Config({'foo': 'bar'})
     ok_('foo' in c)
     ok_('foo' in c2) # mostly just to trigger loading :x
     eq_(c, c2)
     eq_(len(c), 1)
     eq_(c.get('foo'), 'bar')
     if six.PY2:
         eq_(c.has_key('foo'), True)  # noqa
         eq_(list(c.iterkeys()), ['foo'])
         eq_(list(c.itervalues()), ['bar'])
     eq_(list(c.items()), [('foo', 'bar')])
     eq_(list(six.iteritems(c)), [('foo', 'bar')])
     eq_(list(c.keys()), ['foo'])
     eq_(list(c.values()), ['bar'])
Exemplo n.º 14
0
 def supports_readonly_dict_protocols(self):
     # Use single-keypair dict to avoid sorting problems in tests.
     c = Config({'foo': 'bar'})
     c2 = Config({'foo': 'bar'})
     ok_('foo' in c)
     ok_('foo' in c2)  # mostly just to trigger loading :x
     eq_(c, c2)
     eq_(len(c), 1)
     eq_(c.get('foo'), 'bar')
     if six.PY2:
         eq_(c.has_key('foo'), True)
         eq_(list(c.iterkeys()), ['foo'])
         eq_(list(c.itervalues()), ['bar'])
     eq_(list(c.items()), [('foo', 'bar')])
     eq_(list(six.iteritems(c)), [('foo', 'bar')])
     eq_(list(c.keys()), ['foo'])
     eq_(list(c.values()), ['bar'])
Exemplo n.º 15
0
def clean_sys_modules():
    """
    Attempt to nix any imports incurred by the test, to prevent state bleed.

    In some cases this prevents outright errors (eg a test accidentally relying
    on another's import of a task tree in the support folder) and in others
    it's required because we're literally testing runtime imports.
    """
    snapshot = sys.modules.copy()
    yield
    # Iterate over another copy to avoid ye olde mutate-during-iterate problem
    # NOTE: cannot simply 'sys.modules = snapshot' as that is warned against
    for name, module in iteritems(sys.modules.copy()):
        # Delete anything newly added (imported)
        if name not in snapshot:
            del sys.modules[name]
        # Overwrite anything that was modified (the easy version...)
        sys.modules.update(snapshot)
Exemplo n.º 16
0
    def __init__(self, config=None, **kwargs):
        """
        Create a ``Context``-like object whose methods yield `.Result` objects.

        :param config:
            A Configuration object to use. Identical in behavior to `.Context`.

        :param run:
            A data structure of `Results <.Result>`, to return from calls to
            the instantiated object's `~.Context.run` method (instead of
            actually executing the requested shell command).

            Specifically, this method accepts:

            - A single `.Result` object, which will be returned once.
            - An iterable of `Results <.Result>`, which will be returned on
              each subsequent call to ``.run``.
            - A map of command strings to either of the above, allowing
              specific call-and-response semantics instead of assuming a call
              order.

        :param sudo:
            Identical to ``run``, but whose values are yielded from calls to
            `~.Context.sudo`.

        :raises:
            ``TypeError``, if the values given to ``run`` or other kwargs
            aren't individual `.Result` objects or iterables.
        """
        # TODO: would be nice to allow regexen instead of exact string matches
        super(MockContext, self).__init__(config)
        for method, results in iteritems(kwargs):
            # Special convenience case: individual Result -> one-item list
            if (
                not hasattr(results, '__iter__')
                and not isinstance(results, Result)
                # No need for explicit dict test; they have __iter__
            ):
                err = "Not sure how to yield results from a {0!r}"
                raise TypeError(err.format(type(results)))
            self._set("_{0}".format(method), results)
Exemplo n.º 17
0
    def __init__(self, config=None, **kwargs):
        """
        Create a ``Context``-like object whose methods yield `.Result` objects.

        :param config:
            A Configuration object to use. Identical in behavior to `.Context`.

        :param run:
            A data structure of `Results <.Result>`, to return from calls to
            the instantiated object's `~.Context.run` method (instead of
            actually executing the requested shell command).

            Specifically, this method accepts:

            - A single `.Result` object, which will be returned once.
            - An iterable of `Results <.Result>`, which will be returned on
              each subsequent call to ``.run``.
            - A map of command strings to either of the above, allowing
              specific call-and-response semantics instead of assuming a call
              order.

        :param sudo:
            Identical to ``run``, but whose values are yielded from calls to
            `~.Context.sudo`.

        :raises:
            ``TypeError``, if the values given to ``run`` or other kwargs
            aren't individual `.Result` objects or iterables.
        """
        # TODO: would be nice to allow regexen instead of exact string matches
        super(MockContext, self).__init__(config)
        for method, results in iteritems(kwargs):
            # Special convenience case: individual Result -> one-item list
            if (
                not hasattr(results, "__iter__")
                and not isinstance(results, Result)
                # No need for explicit dict test; they have __iter__
            ):
                err = "Not sure how to yield results from a {0!r}"
                raise TypeError(err.format(type(results)))
            object.__setattr__(self, "_{0}".format(method), results)
Exemplo n.º 18
0
def _expect(where, type_, **kwargs):
    config = _load(where, type_)
    for key, value in six.iteritems(kwargs):
        eq_(config[key], value)
Exemplo n.º 19
0
def _expect(where, type_, **kwargs):
    config = _load(where, type_)
    for key, value in six.iteritems(kwargs):
        eq_(config[key], value)
Exemplo n.º 20
0
def count_errors(c, command, trials=10, verbose=False, fail_fast=False):
    """
    Run ``command`` multiple times and tally statistics about failures.

    Use Ctrl-C or other SIGINT to abort early (also see ``fail_fast``.)

    :param str command:
        The command to execute. Make sure to escape special shell characters!

    :param int trials:
        Number of trials to execute (default 10.)

    :param bool verbose:
        Whether to emit stdout/err from failed runs at end of execution.
        Default: ``False``.

    :param bool fail_fast:
        Whether to exit after the first error (i.e. "count runs til error is
        exhibited" mode.) Default: ``False``.

    Say ``verbose=True`` to see stderr from failed runs at the end.

    Say ``--fail-fast`` to error out, with error output, on the first error.
    """
    # TODO: allow defining failure as something besides "exited 1", e.g.
    # "stdout contained <sentinel>" or whatnot
    goods, bads = [], []
    prev_error = time.time()
    for num_runs in tqdm(range(trials), unit='trial'):
        result = c.run(command, hide=True, warn=True)
        if result.failed:
            now = time.time()
            result.since_prev_error = int(now - prev_error)
            prev_error = now
            bads.append(result)
            # -2 is typically indicative of SIGINT in most shells
            if fail_fast or result.exited == -2:
                break
        else:
            goods.append(result)
    num_runs += 1 # for count starting at 1, not 0
    if verbose or fail_fast:
        # TODO: would be nice to show interwoven stdout/err but I don't believe
        # we track that at present...
        for result in bads:
            print("")
            print(result.stdout)
            print(result.stderr)
    # Stats! TODO: errors only jeez
    successes = len(goods)
    failures = len(bads)
    overall = "{0}/{1} trials failed".format(failures, num_runs)
    # Short-circuit if no errors
    if not bads:
        print(overall)
        return
    periods = [x.since_prev_error for x in bads]
    # Period mean
    mean = int(sum(periods) / float(len(periods)))
    # Period mode
    # TODO: use collections.Counter now that we've dropped 2.6
    counts = defaultdict(int)
    for period in periods:
        counts[period] += 1
    mode = sorted((value, key) for key, value in iteritems(counts))[-1][1]
    # Emission of stats!
    if fail_fast:
        print("First failure occurred after {0} successes".format(successes))
    else:
        print(overall)
    print("Stats: min={0}s, mean={1}s, mode={2}s, max={3}s".format(
        min(periods), mean, mode, max(periods)))
Exemplo n.º 21
0
def count_errors(c, command, trials=10, verbose=False, fail_fast=False):
    """
    Run ``command`` multiple times and tally statistics about failures.

    Use Ctrl-C or other SIGINT to abort early (also see ``fail_fast``.)

    :param str command:
        The command to execute. Make sure to escape special shell characters!

    :param int trials:
        Number of trials to execute (default 10.)

    :param bool verbose:
        Whether to emit stdout/err from failed runs at end of execution.
        Default: ``False``.

    :param bool fail_fast:
        Whether to exit after the first error (i.e. "count runs til error is
        exhibited" mode.) Default: ``False``.

    Say ``verbose=True`` to see stderr from failed runs at the end.

    Say ``--fail-fast`` to error out, with error output, on the first error.
    """
    # TODO: allow defining failure as something besides "exited 1", e.g.
    # "stdout contained <sentinel>" or whatnot
    goods, bads = [], []
    prev_error = time.time()
    for num_runs in tqdm(range(trials), unit="trial"):
        result = c.run(command, hide=True, warn=True)
        if result.failed:
            now = time.time()
            result.since_prev_error = int(now - prev_error)
            prev_error = now
            bads.append(result)
            # -2 is typically indicative of SIGINT in most shells
            if fail_fast or result.exited == -2:
                break
        else:
            goods.append(result)
    num_runs += 1  # for count starting at 1, not 0
    if verbose or fail_fast:
        # TODO: would be nice to show interwoven stdout/err but I don't believe
        # we track that at present...
        for result in bads:
            print("")
            print(result.stdout)
            print(result.stderr)
    # Stats! TODO: errors only jeez
    successes = len(goods)
    failures = len(bads)
    overall = "{0}/{1} trials failed".format(failures, num_runs)
    # Short-circuit if no errors
    if not bads:
        print(overall)
        return
    periods = [x.since_prev_error for x in bads]
    # Period mean
    mean = int(sum(periods) / float(len(periods)))
    # Period mode
    # TODO: use collections.Counter now that we've dropped 2.6
    counts = defaultdict(int)
    for period in periods:
        counts[period] += 1
    mode = sorted((value, key) for key, value in iteritems(counts))[-1][1]
    # Emission of stats!
    if fail_fast:
        print("First failure occurred after {0} successes".format(successes))
    else:
        print(overall)
    print(
        "Stats: min={0}s, mean={1}s, mode={2}s, max={3}s".format(
            min(periods), mean, mode, max(periods)
        )
    )
Exemplo n.º 22
0
    def __init__(self, config=None, **kwargs):
        """
        Create a ``Context``-like object whose methods yield `.Result` objects.

        :param config:
            A Configuration object to use. Identical in behavior to `.Context`.

        :param run:
            A data structure indicating what `.Result` objects to return from
            calls to the instantiated object's `~.Context.run` method (instead
            of actually executing the requested shell command).

            Specifically, this kwarg accepts:

            - A single `.Result` object.

                - Remember that class's first positional argument is its stdout
                  - it can thus be handy to hand in expressions like
                    ``Result("command output here!")``.)

            - A boolean; if True, yields a `.Result` whose ``exited`` is ``0``,
              and if False, ``1``.
            - An iterable of the above values, which will be returned on each
              subsequent call to ``.run`` (the first item on the first call,
              the second on the second call, etc).
            - A dict mapping command strings or compiled regexen to the above
              values (including an iterable), allowing specific
              call-and-response semantics instead of assuming a call order.

        :param sudo:
            Identical to ``run``, but whose values are yielded from calls to
            `~.Context.sudo`.

        :param bool repeat:
            A flag determining whether results yielded by this class' methods
            repeat or are consumed.

            For example, when a single result is indicated, it will normally
            only be returned once, causing `NotImplementedError` afterwards.
            But when ``repeat=True`` is given, that result is returned on
            every call, forever.

            Similarly, iterable results are normally exhausted once, but when
            this setting is enabled, they are wrapped in `itertools.cycle`.

            Default: ``False`` (for backwards compatibility reasons).

        :raises:
            ``TypeError``, if the values given to ``run`` or other kwargs
            aren't of the expected types.

        .. versionchanged:: 1.5
            Added support for boolean and string result values.
        .. versionchanged:: 1.5
            Added support for regex dict keys.
        .. versionchanged:: 1.5
            Added the ``repeat`` keyword argument.
        """
        # Figure out if we can support Mock in the current environment
        Mock = None
        try:
            from mock import Mock
        except ImportError:
            try:
                from unittest.mock import Mock
            except ImportError:
                pass
        # Set up like any other Context would, with the config
        super(MockContext, self).__init__(config)
        # Pull out behavioral kwargs
        self._set("__repeat", kwargs.pop("repeat", False))
        # The rest must be things like run/sudo - mock Context method info
        for method, results in iteritems(kwargs):
            # For each possible value type, normalize to iterable of Result
            # objects (possibly repeating).
            singletons = tuple([Result, bool] + list(string_types))
            if isinstance(results, dict):
                for key, value in iteritems(results):
                    results[key] = self._normalize(value)
            elif isinstance(results, singletons) or hasattr(
                    results, "__iter__"):
                results = self._normalize(results)
            # Unknown input value: cry
            else:
                err = "Not sure how to yield results from a {!r}"
                raise TypeError(err.format(type(results)))
            # Save results for use by the method
            self._set("__{}".format(method), results)
            # Wrap the method in a Mock, if applicable
            if Mock is not None:
                self._set(method, Mock(wraps=getattr(self, method)))