Пример #1
0
def format_timespan(num_seconds, detailed=False, max_units=3):
    """
    Format a timespan in seconds as a human readable string.

    :param num_seconds: Number of seconds (integer or float).
    :param detailed: If :data:`True` milliseconds are represented separately
                     instead of being represented as fractional seconds
                     (defaults to :data:`False`).
    :param max_units: The maximum number of units to show in the formatted time
                      span (an integer, defaults to three).
    :returns: The formatted timespan as a string.

    Some examples:

    >>> from humanfriendly import format_timespan
    >>> format_timespan(0)
    '0 seconds'
    >>> format_timespan(1)
    '1 second'
    >>> import math
    >>> format_timespan(math.pi)
    '3.14 seconds'
    >>> hour = 60 * 60
    >>> day = hour * 24
    >>> week = day * 7
    >>> format_timespan(week * 52 + day * 2 + hour * 3)
    '1 year, 2 days and 3 hours'
    """
    if num_seconds < 60 and not detailed:
        # Fast path.
        return pluralize(round_number(num_seconds), 'second')
    else:
        # Slow path.
        result = []
        num_seconds = decimal.Decimal(str(num_seconds))
        relevant_units = list(reversed(time_units[0 if detailed else 1:]))
        for unit in relevant_units:
            # Extract the unit count from the remaining time.
            divider = decimal.Decimal(str(unit['divider']))
            count = num_seconds / divider
            num_seconds %= divider
            # Round the unit count appropriately.
            if unit != relevant_units[-1]:
                # Integer rounding for all but the smallest unit.
                count = int(count)
            else:
                # Floating point rounding for the smallest unit.
                count = round_number(count)
            # Only include relevant units in the result.
            if count not in (0, '0'):
                result.append(pluralize(count, unit['singular'], unit['plural']))
        if len(result) == 1:
            # A single count/unit combination.
            return result[0]
        else:
            if not detailed:
                # Remove `insignificant' data from the formatted timespan.
                result = result[:max_units]
            # Format the timespan in a readable way.
            return concatenate(result)
def format_size(num_bytes, keep_width=False):
    """
    Format a byte count as a human readable file size.

    :param num_bytes: The size to format in bytes (an integer).
    :param keep_width: ``True`` if trailing zeros should not be stripped,
                       ``False`` if they can be stripped.
    :returns: The corresponding human readable file size (a string).

    This function supports ranges from kilobytes to terabytes.

    Some examples:

    >>> from humanfriendly import format_size
    >>> format_size(0)
    '0 bytes'
    >>> format_size(1)
    '1 byte'
    >>> format_size(5)
    '5 bytes'
    >>> format_size(1024 ** 2)
    '1 MB'
    >>> format_size(1024 ** 3 * 4)
    '4 GB'
    """
    for unit in reversed(disk_size_units):
        if num_bytes >= unit['divider']:
            number = round_number(float(num_bytes) / unit['divider'], keep_width=keep_width)
            return pluralize(number, unit['singular'], unit['plural'])
    return pluralize(num_bytes, 'byte')
Пример #3
0
    def simple_search(self, *keywords):
        """
        Perform a simple search for case insensitive substring matches.

        :param keywords: The string(s) to search for.
        :returns: The matched password names (a generator of strings).

        Only passwords whose names matches *all*  of the given keywords are
        returned.
        """
        matches = []
        keywords = [kw.lower() for kw in keywords]
        logger.verbose(
            "Performing simple search on %s (%s) ..",
            pluralize(len(keywords), "keyword"),
            concatenate(map(repr, keywords)),
        )
        for entry in self.filtered_entries:
            normalized = entry.name.lower()
            if all(kw in normalized for kw in keywords):
                matches.append(entry)
        logger.log(
            logging.INFO if matches else logging.VERBOSE,
            "Matched %s using simple search.",
            pluralize(len(matches), "password"),
        )
        return matches
Пример #4
0
def format_length(num_metres, keep_width=False):
    """
    Format a metre count as a human readable length.

    :param num_metres: The length to format in metres (float / integer).
    :param keep_width: ``True`` if trailing zeros should not be stripped,
                       ``False`` if they can be stripped.
    :returns: The corresponding human readable length (a string).

    This function supports ranges from nanometres to kilometres.

    Some examples:

    >>> from humanfriendly import format_length
    >>> format_length(0)
    '0 metres'
    >>> format_length(1)
    '1 metre'
    >>> format_length(5)
    '5 metres'
    >>> format_length(1000)
    '1 km'
    >>> format_length(0.004)
    '4 mm'
    """
    for unit in reversed(length_size_units):
        if num_metres >= unit['divider']:
            number = round_number(float(num_metres) / unit['divider'], keep_width=keep_width)
            return pluralize(number, unit['singular'], unit['plural'])
    return pluralize(num_metres, 'metre')
def format_length(num_metres, keep_width=False):
    """
    Format a metre count as a human readable length.

    :param num_metres: The length to format in metres (float / integer).
    :param keep_width: ``True`` if trailing zeros should not be stripped,
                       ``False`` if they can be stripped.
    :returns: The corresponding human readable length (a string).

    This function supports ranges from nanometres to kilometres.

    Some examples:

    >>> from humanfriendly import format_length
    >>> format_length(0)
    '0 metres'
    >>> format_length(1)
    '1 metre'
    >>> format_length(5)
    '5 metres'
    >>> format_length(1000)
    '1 km'
    >>> format_length(0.004)
    '4 mm'
    """
    for unit in reversed(length_size_units):
        if num_metres >= unit['divider']:
            number = round_number(float(num_metres) / unit['divider'], keep_width=keep_width)
            return pluralize(number, unit['singular'], unit['plural'])
    return pluralize(num_metres, 'metre')
Пример #6
0
def format_size(num_bytes, keep_width=False):
    """
    Format a byte count as a human readable file size.

    :param num_bytes: The size to format in bytes (an integer).
    :param keep_width: ``True`` if trailing zeros should not be stripped,
                       ``False`` if they can be stripped.
    :returns: The corresponding human readable file size (a string).

    This function supports ranges from kilobytes to terabytes.

    Some examples:

    >>> from humanfriendly import format_size
    >>> format_size(0)
    '0 bytes'
    >>> format_size(1)
    '1 byte'
    >>> format_size(5)
    '5 bytes'
    >>> format_size(1024 ** 2)
    '1 MB'
    >>> format_size(1024 ** 3 * 4)
    '4 GB'
    """
    for unit in reversed(disk_size_units):
        if num_bytes >= unit['divider']:
            number = round_number(float(num_bytes) / unit['divider'], keep_width=keep_width)
            return pluralize(number, unit['singular'], unit['plural'])
    return pluralize(num_bytes, 'byte')
Пример #7
0
def format_timespan(num_seconds, detailed=False, max_units=3):
    """
    Format a timespan in seconds as a human readable string.

    :param num_seconds: Number of seconds (integer or float).
    :param detailed: If :data:`True` milliseconds are represented separately
                     instead of being represented as fractional seconds
                     (defaults to :data:`False`).
    :param max_units: The maximum number of units to show in the formatted time
                      span (an integer, defaults to three).
    :returns: The formatted timespan as a string.

    Some examples:

    >>> from humanfriendly import format_timespan
    >>> format_timespan(0)
    '0 seconds'
    >>> format_timespan(1)
    '1 second'
    >>> import math
    >>> format_timespan(math.pi)
    '3.14 seconds'
    >>> hour = 60 * 60
    >>> day = hour * 24
    >>> week = day * 7
    >>> format_timespan(week * 52 + day * 2 + hour * 3)
    '1 year, 2 days and 3 hours'
    """
    if num_seconds < 60 and not detailed:
        # Fast path.
        return pluralize(round_number(num_seconds), "second")
    else:
        # Slow path.
        result = []
        num_seconds = decimal.Decimal(str(num_seconds))
        relevant_units = list(reversed(time_units[0 if detailed else 1 :]))
        for unit in relevant_units:
            # Extract the unit count from the remaining time.
            divider = decimal.Decimal(str(unit["divider"]))
            count = num_seconds / divider
            num_seconds %= divider
            # Round the unit count appropriately.
            if unit != relevant_units[-1]:
                # Integer rounding for all but the smallest unit.
                count = int(count)
            else:
                # Floating point rounding for the smallest unit.
                count = round_number(count)
            # Only include relevant units in the result.
            if count not in (0, "0"):
                result.append(pluralize(count, unit["singular"], unit["plural"]))
        if len(result) == 1:
            # A single count/unit combination.
            return result[0]
        else:
            if not detailed:
                # Remove `insignificant' data from the formatted timespan.
                result = result[:max_units]
            # Format the timespan in a readable way.
            return concatenate(result)
Пример #8
0
    def rotate_concurrent(self, *locations, **kw):
        """
        Rotate the backups in the given locations concurrently.

        :param locations: One or more values accepted by :func:`coerce_location()`.
        :param kw: Any keyword arguments are passed on to :func:`rotate_backups()`.

        This function uses :func:`rotate_backups()` to prepare rotation
        commands for the given locations and then it removes backups in
        parallel, one backup per mount point at a time.

        The idea behind this approach is that parallel rotation is most useful
        when the files to be removed are on different disks and so multiple
        devices can be utilized at the same time.

        Because mount points are per system :func:`rotate_concurrent()` will
        also parallelize over backups located on multiple remote systems.
        """
        timer = Timer()
        pool = CommandPool(concurrency=10)
        logger.info("Scanning %s ..",
                    pluralize(len(locations), "backup location"))
        for location in locations:
            for cmd in self.rotate_backups(location, prepare=True, **kw):
                pool.add(cmd)
        if pool.num_commands > 0:
            backups = pluralize(pool.num_commands, "backup")
            logger.info("Preparing to rotate %s (in parallel) ..", backups)
            pool.run()
            logger.info("Successfully rotated %s in %s.", backups, timer)
Пример #9
0
    def install(self, directory, silent=False):
        """
        Install Node.js package(s) listed in a ``package.json`` file.

        :param directory: The pathname of a directory with a ``package.json`` file (a string).
        :param silent: Used to set :attr:`~executor.ExternalCommand.silent`.
        :returns: The result of :func:`extract_dependencies()`.
        """
        timer = Timer()
        package_file = os.path.join(directory, 'package.json')
        modules_directory = os.path.join(directory, 'node_modules')
        dependencies = self.extract_dependencies(package_file)
        logger.info("Installing Node.js package(s) in %s ..",
                    format_path(directory))
        if dependencies:
            file_in_cache = self.get_cache_file(dependencies)
            logger.verbose("Checking the cache (%s) ..", file_in_cache)
            if self.read_from_cache and self.context.is_file(file_in_cache):
                self.install_from_cache(file_in_cache, modules_directory)
                logger.info(
                    "Done! Took %s to install %s from cache.", timer,
                    pluralize(len(dependencies), "dependency", "dependencies"))
            else:
                self.installer_method(directory, silent=silent)
                self.prune_dependencies(directory)
                if self.write_to_cache:
                    self.add_to_cache(modules_directory, file_in_cache)
                logger.info(
                    "Done! Took %s to install %s using npm.", timer,
                    pluralize(len(dependencies), "dependency", "dependencies"))
            self.clean_cache()
        else:
            logger.info("Nothing to do! (no dependencies to install)")
        return dependencies
Пример #10
0
    def run(self):
        """
        Keep spawning commands and collecting results until all commands have run.

        :returns: The value of :attr:`results`.
        :raises: Any exceptions raised by :func:`collect()`.

        This method calls :func:`spawn()` and :func:`collect()` in a loop until
        all commands registered using :func:`add()` have run and finished. If
        :func:`collect()` raises an exception any running commands are
        terminated before the exception is propagated to the caller.

        If you're writing code where you want to own the main loop then
        consider calling :func:`spawn()` and :func:`collect()` directly instead
        of using :func:`run()`.

        When :attr:`concurrency` is set to one, specific care is taken to make
        sure that the callbacks configured by :attr:`.start_event` and
        :attr:`.finish_event` are called in the expected (intuitive) order.
        """
        # Start spawning processes to execute the commands.
        timer = Timer()
        logger.debug("Preparing to run %s with a concurrency of %i ..",
                     pluralize(self.num_commands, "command"), self.concurrency)
        try:
            with self.get_spinner(timer) as spinner:
                num_started = 0
                num_collected = 0
                while not self.is_finished:
                    # When concurrency is set to one (I know, initially it
                    # sounds like a silly use case, bear with me) I want the
                    # start_event and finish_event callbacks of external
                    # commands to fire in the right order. The following
                    # conditional is intended to accomplish this goal.
                    if self.concurrency > (num_started - num_collected):
                        num_started += self.spawn()
                    num_collected += self.collect()
                    spinner.step(label=format(
                        "Waiting for %i/%i %s",
                        self.num_commands - self.num_finished,
                        self.num_commands,
                        "command" if self.num_commands == 1 else "commands",
                    ))
                    spinner.sleep()
        except Exception:
            if self.num_running > 0:
                logger.warning(
                    "Command pool raised exception, terminating running commands!"
                )
            # Terminate commands that are still running.
            self.terminate()
            # Re-raise the exception to the caller.
            raise
        # Collect the output and return code of any commands not yet collected.
        self.collect()
        logger.debug("Finished running %s in %s.",
                     pluralize(self.num_commands, "command"), timer)
        # Report the results to the caller.
        return self.results
Пример #11
0
    def ranked_mirrors(self):
        """
        A list of :class:`CandidateMirror` objects (ordered from best to worst).

        The value of this property is computed by concurrently testing the
        mirrors in :attr:`available_mirrors` for the following details:

        - availability (:attr:`~CandidateMirror.is_available`)
        - connection speed (:attr:`~CandidateMirror.bandwidth`)
        - update status (:attr:`~CandidateMirror.is_updating`)

        The number of mirrors to test is limited to :attr:`max_mirrors` and you
        can change the number of simultaneous HTTP connections allowed by
        setting :attr:`concurrency`.
        """
        timer = Timer()
        # Sort the candidates based on the currently available information
        # (and transform the input argument into a list in the process).
        mirrors = sorted(self.available_mirrors,
                         key=lambda c: c.sort_key,
                         reverse=True)
        # Limit the number of candidates to a reasonable number?
        if self.max_mirrors and len(mirrors) > self.max_mirrors:
            mirrors = mirrors[:self.max_mirrors]
        # Prepare the Release.gpg URLs to fetch.
        mapping = dict((c.release_gpg_url, c) for c in mirrors)
        num_mirrors = pluralize(len(mapping), "mirror")
        logger.info("Checking %s for availability and performance ..",
                    num_mirrors)
        # Concurrently fetch the Release.gpg files.
        with AutomaticSpinner(label="Checking mirrors"):
            for url, data, elapsed_time in fetch_concurrent(
                    mapping.keys(), concurrency=self.concurrency):
                candidate = mapping[url]
                candidate.release_gpg_contents = data
                candidate.release_gpg_latency = elapsed_time
        # Concurrently check for Archive-Update-in-Progress markers.
        update_mapping = dict((c.archive_update_in_progress_url, c)
                              for c in mirrors if c.is_available)
        logger.info("Checking %s for Archive-Update-in-Progress marker ..",
                    pluralize(len(update_mapping), "mirror"))
        with AutomaticSpinner(label="Checking mirrors"):
            for url, data, elapsed_time in fetch_concurrent(
                    update_mapping.keys(), concurrency=self.concurrency):
                update_mapping[url].is_updating = data is not None
        # Sanity check our results.
        mirrors = list(mapping.values())
        logger.info("Finished checking %s (took %s).", num_mirrors, timer)
        if not any(c.is_available for c in mirrors):
            raise Exception("It looks like all %s are unavailable!" %
                            num_mirrors)
        if all(c.is_updating for c in mirrors):
            logger.warning("It looks like all %s are being updated?!",
                           num_mirrors)
        return sorted(mirrors, key=lambda c: c.sort_key, reverse=True)
Пример #12
0
    def extract_dependencies(self, package_file):
        """
        Extract the relevant dependencies from a ``package.json`` file.

        :param package_file: The pathname of the file (a string).
        :returns: A dictionary with the relevant dependencies.
        :raises: :exc:`.MissingPackageFileError` when the given directory
                 doesn't contain a ``package.json`` file.

        If no dependencies are extracted from the ``package.json`` file
        a warning message is logged but it's not considered an error.
        """
        logger.verbose("Extracting dependencies (%s) ..", package_file)
        if not self.context.is_file(package_file):
            msg = "Missing package.json file! (%s)" % package_file
            raise MissingPackageFileError(msg)
        contents = self.context.read_file(package_file)
        metadata = json.loads(auto_decode(contents))
        dependencies = metadata.get('dependencies', {})
        if not self.production:
            dependencies.update(metadata.get('devDependencies', {}))
        if dependencies:
            logger.verbose(
                "Extracted %s from package.json file.",
                pluralize(len(dependencies), "dependency", "dependencies"))
        else:
            logger.warning("No dependencies extracted from %s file?!",
                           package_file)
        return dependencies
Пример #13
0
    def find_egg_info_file(self, pattern=''):
        """
        Find :pypi:`pip` metadata files in unpacked source distributions.

        :param pattern: The :mod:`glob` pattern to search for (a string).
        :returns: A list of matched filenames (strings).

        When pip unpacks a source distribution archive it creates a directory
        ``pip-egg-info`` which contains the package metadata in a declarative
        and easy to parse format. This method finds such metadata files.
        """
        full_pattern = os.path.join(self.requirement.source_directory,
                                    'pip-egg-info', '*.egg-info', pattern)
        logger.debug("Looking for %r file(s) using pattern %r ..", pattern,
                     full_pattern)
        matches = glob.glob(full_pattern)
        if len(matches) > 1:
            msg = "Source distribution directory of %s (%s) contains multiple *.egg-info directories: %s"
            raise Exception(
                msg % (self.requirement.project_name, self.requirement.version,
                       concatenate(matches)))
        elif matches:
            logger.debug("Matched %s: %s.",
                         pluralize(len(matches), "file", "files"),
                         concatenate(matches))
            return matches[0]
        else:
            logger.debug("No matching %r files found.", pattern)
Пример #14
0
def wait_for_processes(processes):
    """
    Wait for the given processes to end.

    Prints an overview of running processes to the terminal once a second so
    the user knows what they are waiting for.

    This function is not specific to :mod:`proc.cron` at all (it doesn't
    even need to know what cron jobs are), it just waits until all of the given
    processes have ended.

    :param processes: A list of :class:`~proc.tree.ProcessNode` objects.
    """
    wait_timer = Timer()
    running_processes = list(processes)
    for process in running_processes:
        logger.info("Waiting for process %i: %s (runtime is %s)", process.pid,
                    quote(process.cmdline),
                    format_timespan(round(process.runtime)))
    with Spinner(timer=wait_timer) as spinner:
        while True:
            for process in list(running_processes):
                if not process.is_alive:
                    running_processes.remove(process)
            if not running_processes:
                break
            num_processes = pluralize(len(running_processes), "process",
                                      "processes")
            process_ids = concatenate(str(p.pid) for p in running_processes)
            spinner.step(label="Waiting for %s: %s" %
                         (num_processes, process_ids))
            spinner.sleep()
    logger.info("All processes have finished, we're done waiting (took %s).",
                wait_timer.rounded)
Пример #15
0
    def lsb_release_variables(self):
        """
        The contents of ``/etc/lsb-release`` as a dictionary.

        The values of :attr:`distributor_id` and :attr:`distribution_codename`
        are based on the information provided by :attr:`lsb_release_variables`.
        If ``/etc/lsb-release`` doesn't exist or can't be parsed a debug
        message is logged and an empty dictionary is returned. Here's an
        example:

        >>> from executor.contexts import LocalContext
        >>> context = LocalContext()
        >>> context.lsb_release_variables
        {'DISTRIB_CODENAME': 'bionic',
         'DISTRIB_DESCRIPTION': 'Ubuntu 18.04.1 LTS',
         'DISTRIB_ID': 'Ubuntu',
         'DISTRIB_RELEASE': '18.04'}

        The :attr:`lsb_release_variables` property was added in response to
        `issue #10`_ where it was reported that the :man:`lsb_release` program
        wasn't available in vanilla Ubuntu 18.04 Docker images.

        .. _issue #10: https://github.com/xolox/python-executor/issues/10
        """
        variables = dict()
        # We proceed under the assumption that the file exists, but avoid
        # raising an exception when it doesn't and we don't leak error messages
        # to the standard error stream. We could have used is_file() and
        # is_readable() to "ask for permission instead of forgiveness" (so to
        # speak) but that requires the execution of three external commands
        # instead of one to accomplish the exact same thing :-P.
        logger.debug("Trying to read /etc/lsb-release ..")
        contents = self.capture('cat', '/etc/lsb-release', check=False, silent=True)
        logger.debug("Parsing /etc/lsb-release contents: %r", contents)
        for lnum, line in enumerate(contents.splitlines()):
            name, delimiter, value = line.partition(u'=')
            # The following encode/decode trick works around shlex.split() not
            # properly supporting Unicode strings on Python 2.7, for details
            # refer to https://stackoverflow.com/a/14219159/788200.
            if PY2:
                tokens = shlex.split(value.encode('UTF-8'))
                parsed_value = [t.decode('UTF-8') for t in tokens]
            else:
                parsed_value = shlex.split(value)
            # The null byte check below guards against a weird edge case
            # that has so far only manifested in the Python 2.6 environment
            # of Travis CI: The parsing of /etc/lsb-release results in the
            # expected variable names but values containing binary
            # data including nul bytes, for details refer to
            # https://github.com/xolox/python-executor/issues/15.
            if len(parsed_value) == 1 and u'\0' not in parsed_value[0]:
                variables[name.strip()] = parsed_value[0]
            else:
                logger.debug("Failed to parse line %i: %r", lnum + 1, line)
        if variables:
            logger.debug("Extracted %s from /etc/lsb-release: %r", pluralize(len(variables), "variable"), variables)
        else:
            logger.debug("Failed to read /etc/lsb-release ..")
        return variables
Пример #16
0
 def error_message(self):
     """An error message that explains which commands *failed unexpectedly* (a string)."""
     summary = format("%i out of %s failed unexpectedly:",
                      self.pool.num_failed,
                      pluralize(self.pool.num_commands, "command"))
     details = "\n".join(" - %s" % cmd.error_message
                         for cmd in self.commands)
     return summary + "\n\n" + details
Пример #17
0
def format_timespan(num_seconds, detailed=False):
    """
    Format a timespan in seconds as a human readable string.

    :param num_seconds: Number of seconds (integer or float).
    :param detailed: If :data:`True` milliseconds are represented separately
                     instead of being represented as fractional seconds
                     (defaults to :data:`False`).
    :returns: The formatted timespan as a string.

    Some examples:

    >>> from humanfriendly import format_timespan
    >>> format_timespan(0)
    '0 seconds'
    >>> format_timespan(1)
    '1 second'
    >>> import math
    >>> format_timespan(math.pi)
    '3.14 seconds'
    >>> hour = 60 * 60
    >>> day = hour * 24
    >>> week = day * 7
    >>> format_timespan(week * 52 + day * 2 + hour * 3)
    '1 year, 2 days and 3 hours'
    """
    if num_seconds < 60 and not detailed:
        # Fast path.
        return pluralize(round_number(num_seconds), 'second')
    else:
        # Slow path.
        result = []
        for unit in reversed(time_units):
            if num_seconds >= unit['divider']:
                count = int(num_seconds / unit['divider'])
                num_seconds %= unit['divider']
                result.append(pluralize(count, unit['singular'], unit['plural']))
        if len(result) == 1:
            # A single count/unit combination.
            return result[0]
        else:
            if not detailed:
                # Remove insignificant data from the formatted timespan.
                result = result[:3]
            # Format the timespan in a readable way.
            return concatenate(result)
def format_timespan(num_seconds, detailed=False):
    """
    Format a timespan in seconds as a human readable string.

    :param num_seconds: Number of seconds (integer or float).
    :param detailed: If :data:`True` milliseconds are represented separately
                     instead of being represented as fractional seconds
                     (defaults to :data:`False`).
    :returns: The formatted timespan as a string.

    Some examples:

    >>> from humanfriendly import format_timespan
    >>> format_timespan(0)
    '0 seconds'
    >>> format_timespan(1)
    '1 second'
    >>> import math
    >>> format_timespan(math.pi)
    '3.14 seconds'
    >>> hour = 60 * 60
    >>> day = hour * 24
    >>> week = day * 7
    >>> format_timespan(week * 52 + day * 2 + hour * 3)
    '1 year, 2 days and 3 hours'
    """
    if num_seconds < 60 and not detailed:
        # Fast path.
        return pluralize(round_number(num_seconds), 'second')
    else:
        # Slow path.
        result = []
        for unit in reversed(time_units):
            if num_seconds >= unit['divider']:
                count = int(num_seconds / unit['divider'])
                num_seconds %= unit['divider']
                result.append(pluralize(count, unit['singular'], unit['plural']))
        if len(result) == 1:
            # A single count/unit combination.
            return result[0]
        else:
            if not detailed:
                # Remove insignificant data from the formatted timespan.
                result = result[:3]
            # Format the timespan in a readable way.
            return concatenate(result)
Пример #19
0
    def __init__(self, browser, job_group_url, root_url, args):
        """Construct a product report object with options."""
        self.args = args
        self.job_group_url = job_group_url
        self.group = job_group_url.split('/')[-1]
        current_url, previous_url = get_build_urls_to_compare(
            browser, job_group_url, args.builds, args.against_reviewed,
            args.running_threshold)
        # read last finished
        current_details = browser.get_soup(current_url)
        previous_details = browser.get_soup(previous_url)
        for details in current_details, previous_details:
            assert sum(int(badge.text) for badge in details.find_all(class_='badge')) > 0, \
                "invalid page with no test results found, make sure you specified valid builds (leading zero missing?)"
        current_summary = parse_summary(current_details)
        previous_summary = parse_summary(previous_details)

        changes = {
            k: v - previous_summary.get(k, 0)
            for k, v in iteritems(current_summary)
            if k != 'none' and k != 'incomplete'
        }
        log.info("Changes since last build:\n\t%s" %
                 '\n\t'.join("%s: %s" % (k, v) for k, v in iteritems(changes)))

        self.build = get_build_nr(current_url)
        self.ref_build = get_build_nr(previous_url)

        # for each architecture iterate over all
        cur_archs, prev_archs = (
            set(arch.text
                for arch in details.find_all('th', id=re.compile('flavor_')))
            for details in [current_details, previous_details])
        archs = cur_archs
        if args.arch:
            assert args.arch in cur_archs, "Selected arch {} was not found in test results {}".format(
                args.arch, cur_archs)
            archs = [args.arch]
        self.missing_archs = sorted(prev_archs - cur_archs)
        if self.missing_archs:
            log.info("%s missing completely from current run: %s" % (pluralize(
                len(self.missing_archs), "architecture is",
                "architectures are"), ', '.join(self.missing_archs)))

        # create arch reports
        self.reports = SortedDict()
        progress_browser = progress_browser_factory(
            args) if args.query_issue_status else None
        bugzilla_browser = bugzilla_browser_factory(
            args) if args.query_issue_status else None
        for arch in sorted(archs):
            results = get_arch_state_results(arch, current_details,
                                             previous_details,
                                             args.output_state_results)
            self.reports[arch] = ArchReport(arch, results, args, root_url,
                                            progress_browser, bugzilla_browser,
                                            browser)
Пример #20
0
    def fuzzy_search(self, *filters):
        """
        Perform a "fuzzy" search that matches the given characters in the given order.

        :param filters: The pattern(s) to search for.
        :returns: The matched password names (a list of strings).
        """
        matches = []
        logger.verbose("Performing fuzzy search on %s (%s) ..",
                       pluralize(len(filters), "pattern"),
                       concatenate(map(repr, filters)))
        patterns = list(map(create_fuzzy_pattern, filters))
        for entry in self.entries:
            if all(p.search(entry.name) for p in patterns):
                matches.append(entry)
        logger.log(logging.INFO if matches else logging.VERBOSE,
                   "Matched %s using fuzzy search.",
                   pluralize(len(matches), "password"))
        return matches
Пример #21
0
 def port_number(self):
     """A dynamically selected free ephemeral port number (an integer between 49152 and 65535)."""
     timer = Timer()
     logger.debug("Looking for free ephemeral port number ..")
     for i in itertools.count(1):
         value = self.ephemeral_port_number
         set_property(self, 'port_number', value)
         if not self.is_connected:
             logger.debug("Found free ephemeral port number %s after %s (took %s).",
                          value, pluralize(i, "attempt"), timer)
             return value
Пример #22
0
def generate_product_report(browser, job_group_url, root_url, args=None):
    """Read overview page of one job group and generate a report for the product.

    @returns review report for product in Markdown format

    Example:
    >>> browser = BrowserPlus() # doctest: +SKIP
    >>> report = generate_product_report(browser, 'https://openqa.opensuse.org/group_overview/25', 'https://openqa.opensuse.org') # doctest: +SKIP
    """
    output_state_results = args.output_state_results if args.output_state_results else False
    verbose_test = args.verbose_test if args.verbose_test else False
    try:
        current_url, previous_url = get_build_urls_to_compare(browser, job_group_url, args.builds, args.against_reviewed)
    except ValueError:
        raise NotEnoughBuildsError()

    # read last finished
    current_details = browser.get_soup(current_url)
    previous_details = browser.get_soup(previous_url)
    for details in current_details, previous_details:
        assert sum(int(badge.text) for badge in details.find_all(class_='badge')) > 0, \
            "invalid page with no test results found, make sure you specified valid builds (leading zero missing?)"
    current_summary = parse_summary(current_details)
    previous_summary = parse_summary(previous_details)

    changes = {k: v - previous_summary.get(k, 0) for k, v in iteritems(current_summary) if k != 'none' and k != 'incomplete'}
    log.info("Changes since last build:\n\t%s" % '\n\t'.join("%s: %s" % (k, v) for k, v in iteritems(changes)))

    def get_build_nr(url):
        return unquote(re.search('build=([^&]*)', url).groups()[0])
    build = get_build_nr(current_url) + '**'
    if args.verbose_test and args.verbose_test > 1:
        build += ' (reference %s)' % get_build_nr(previous_url)
    # for each architecture iterate over all
    cur_archs, prev_archs = (set(arch.text for arch in details.find_all('th', id=re.compile('flavor_'))) for details in [current_details, previous_details])
    archs = cur_archs
    if args.arch:
        assert args.arch in cur_archs, "Selected arch {} was not found in test results {}".format(args.arch, cur_archs)
        archs = [args.arch]
    missing_archs = prev_archs - cur_archs
    if missing_archs:
        log.info("%s missing completely from current run: %s" %
                 (pluralize(len(missing_archs), "architecture is", "architectures are"), ', '.join(missing_archs)))
    arch_state_results = {arch: get_arch_state_results(arch, current_details, previous_details, output_state_results) for arch in archs}
    now_str = datetime.datetime.now().strftime('%Y-%m-%d - %H:%M')
    openqa_review_report_product = openqa_review_report_product_template.substitute({
        'now': now_str,
        'build': build,
        # TODO Missing architectures should probably be moved into the arch report, not as "common issue", e.g. by adding missing archs to arch_state_results
        'common_issues': ' * **Missing architectures**: %s' % ', '.join(missing_archs) if missing_archs else 'None',  # reserved for manual entries for now
        'arch_report': generate_arch_reports(arch_state_results, root_url, verbose_test),
    })
    return openqa_review_report_product
Пример #23
0
    def __init__(self, **kw):
        """
        Initialize a :class:`PropertyManager` object.

        :param kw: Any keyword arguments are passed on to :func:`set_properties()`.
        """
        self.set_properties(**kw)
        missing_properties = self.missing_properties
        if missing_properties:
            msg = "missing %s" % pluralize(len(missing_properties),
                                           "required argument")
            raise TypeError("%s (%s)" % (msg, concatenate(missing_properties)))
def format_timespan(num_seconds):
    """
    Format a timespan in seconds as a human readable string.

    :param num_seconds: Number of seconds (integer or float).
    :returns: The formatted timespan as a string.

    Some examples:

    >>> from humanfriendly import format_timespan
    >>> format_timespan(0)
    '0.00 seconds'
    >>> format_timespan(1)
    '1.00 second'
    >>> format_timespan(math.pi)
    '3.14 seconds'
    >>> hour = 60 * 60
    >>> day = hour * 24
    >>> week = day * 7
    >>> format_timespan(week * 52 + day * 2 + hour * 3)
    '1 year, 2 days and 3 hours'
    """
    if num_seconds < 60:
        # Fast path.
        return pluralize(round_number(num_seconds), 'second')
    else:
        # Slow path.
        result = []
        for unit in reversed(time_units):
            if num_seconds >= unit['divider']:
                count = int(num_seconds / unit['divider'])
                num_seconds %= unit['divider']
                result.append(pluralize(count, unit['singular'], unit['plural']))
        if len(result) == 1:
            # A single count/unit combination.
            return result[0]
        else:
            # Remove insignificant data from the formatted timespan and format
            # it in a readable way.
            return concatenate(result[:3])
Пример #25
0
def format_size(num_bytes, keep_width=False, binary=False):
    """
    Format a byte count as a human readable file size.

    :param num_bytes: The size to format in bytes (an integer).
    :param keep_width: :data:`True` if trailing zeros should not be stripped,
                       :data:`False` if they can be stripped.
    :param binary: :data:`True` to use binary multiples of bytes (base-2),
                   :data:`False` to use decimal multiples of bytes (base-10).
    :returns: The corresponding human readable file size (a string).

    This function knows how to format sizes in bytes, kilobytes, megabytes,
    gigabytes, terabytes and petabytes. Some examples:

    >>> from humanfriendly import format_size
    >>> format_size(0)
    '0 bytes'
    >>> format_size(1)
    '1 byte'
    >>> format_size(5)
    '5 bytes'
    > format_size(1000)
    '1 KB'
    > format_size(1024, binary=True)
    '1 KiB'
    >>> format_size(1000 ** 3 * 4)
    '4 GB'
    """
    for unit in reversed(disk_size_units):
        if num_bytes >= unit.binary.divider and binary:
            number = round_number(float(num_bytes) / unit.binary.divider,
                                  keep_width=keep_width)
            return pluralize(number, unit.binary.symbol, unit.binary.symbol)
        elif num_bytes >= unit.decimal.divider and not binary:
            number = round_number(float(num_bytes) / unit.decimal.divider,
                                  keep_width=keep_width)
            return pluralize(number, unit.decimal.symbol, unit.decimal.symbol)
    return pluralize(num_bytes, 'byte')
Пример #26
0
 def entries(self):
     """A list of :class:`PasswordEntry` objects."""
     timer = Timer()
     passwords = []
     logger.info("Scanning %s ..", format_path(self.directory))
     listing = self.context.capture("find", "-type", "f", "-name", "*.gpg", "-print0")
     for filename in split(listing, "\0"):
         basename, extension = os.path.splitext(filename)
         if extension == ".gpg":
             # We use os.path.normpath() to remove the leading `./' prefixes
             # that `find' adds because it searches the working directory.
             passwords.append(PasswordEntry(name=os.path.normpath(basename), store=self))
     logger.verbose("Found %s in %s.", pluralize(len(passwords), "password"), timer)
     return natsort(passwords, key=lambda e: e.name)
Пример #27
0
def discover_mirror_selection():
    """Discover "geographically suitable" Ubuntu mirrors."""
    timer = Timer()
    logger.info("Identifying fast Ubuntu mirrors using %s ..",
                MIRROR_SELECTION_URL)
    data = fetch_url(MIRROR_SELECTION_URL, retry=False)
    dammit = UnicodeDammit(data)
    mirrors = set(
        CandidateMirror(mirror_url=mirror_url.strip())
        for mirror_url in dammit.unicode_markup.splitlines()
        if mirror_url and not mirror_url.isspace())
    logger.debug("Found %s in %s.",
                 pluralize(len(mirrors), "fast Ubuntu mirror"), timer)
    return mirrors
Пример #28
0
    def lsb_release_variables(self):
        """
        The contents of ``/etc/lsb-release`` as a dictionary.

        The values of :attr:`distributor_id` and :attr:`distribution_codename`
        are based on the information provided by :attr:`lsb_release_variables`.
        If ``/etc/lsb-release`` doesn't exist or can't be parsed a debug
        message is logged and an empty dictionary is returned. Here's an
        example:

        >>> from executor.contexts import LocalContext
        >>> context = LocalContext()
        >>> context.lsb_release_variables
        {'DISTRIB_CODENAME': 'bionic',
         'DISTRIB_DESCRIPTION': 'Ubuntu 18.04.1 LTS',
         'DISTRIB_ID': 'Ubuntu',
         'DISTRIB_RELEASE': '18.04'}
        """
        variables = dict()
        # We proceed under the assumption that the file exists, but avoid
        # raising an exception when it doesn't and we don't leak error messages
        # to the standard error stream. We could have used is_file() and
        # is_readable() to "ask for permission instead of forgiveness" (so to
        # speak) but that requires the execution of three external commands
        # instead of one to accomplish the exact same thing :-P.
        logger.debug("Trying to read /etc/lsb-release ..")
        contents = self.capture('cat',
                                '/etc/lsb-release',
                                check=False,
                                silent=True)
        logger.debug("Parsing /etc/lsb-release contents: %r", contents)
        for lnum, line in enumerate(contents.splitlines()):
            name, delimiter, value = line.partition('=')
            parsed_value = shlex.split(value)
            # The zero byte check below guards against a weird edge that has so
            # far only manifested in the Python 2.6 environment of Travis CI:
            # The parsing of /etc/lsb-release results in the expected variable
            # names but values containing binary data including nul bytes.
            # https://github.com/xolox/python-executor/issues/15.
            if len(parsed_value) == 1 and '\0' not in parsed_value[0]:
                variables[name.strip()] = parsed_value[0]
            else:
                logger.debug("Failed to parse line %i: %r", lnum + 1, line)
        if variables:
            logger.debug("Extracted %s from /etc/lsb-release: %r",
                         pluralize(len(variables), "variable"), variables)
        else:
            logger.debug("Failed to read /etc/lsb-release ..")
        return variables
Пример #29
0
def format_size(num_bytes, keep_width=False, binary=False):
    """
    Format a byte count as a human readable file size.

    :param num_bytes: The size to format in bytes (an integer).
    :param keep_width: :data:`True` if trailing zeros should not be stripped,
                       :data:`False` if they can be stripped.
    :param binary: :data:`True` to use binary multiples of bytes (base-2),
                   :data:`False` to use decimal multiples of bytes (base-10).
    :returns: The corresponding human readable file size (a string).

    This function knows how to format sizes in bytes, kilobytes, megabytes,
    gigabytes, terabytes and petabytes. Some examples:

    >>> from humanfriendly import format_size
    >>> format_size(0)
    '0 bytes'
    >>> format_size(1)
    '1 byte'
    >>> format_size(5)
    '5 bytes'
    > format_size(1000)
    '1 KB'
    > format_size(1024, binary=True)
    '1 KiB'
    >>> format_size(1000 ** 3 * 4)
    '4 GB'
    """
    for unit in reversed(disk_size_units):
        if num_bytes >= unit.binary.divider and binary:
            number = round_number(float(num_bytes) / unit.binary.divider, keep_width=keep_width)
            return pluralize(number, unit.binary.symbol, unit.binary.symbol)
        elif num_bytes >= unit.decimal.divider and not binary:
            number = round_number(float(num_bytes) / unit.decimal.divider, keep_width=keep_width)
            return pluralize(number, unit.decimal.symbol, unit.decimal.symbol)
    return pluralize(num_bytes, 'byte')
Пример #30
0
    def collect(self):
        """
        Collect the exit codes and output of finished commands.

        :returns: The number of external commands that were collected by this
                  invocation of :func:`collect()` (an integer).
        :raises: If :attr:`delay_checks` is :data:`True`:
                  After all external commands have started and finished, if any
                  commands that have :attr:`~.ExternalCommand.check` set to
                  :data:`True` failed :exc:`CommandPoolFailed` is raised.
                 If :attr:`delay_checks` is :data:`False`:
                  The exceptions :exc:`.ExternalCommandFailed`,
                  :exc:`.RemoteCommandFailed` and :exc:`.RemoteConnectFailed`
                  can be raised if a command in the pool that has
                  :attr:`~.ExternalCommand.check` set to :data:`True` fails.
                  The :attr:`~.ExternalCommandFailed.pool` attribute of the
                  exception will be set to the pool.

        .. warning:: If an exception is raised, commands that are still running
                     will not be terminated! If this concerns you then consider
                     calling :func:`terminate()` from a :keyword:`finally`
                     block (this is what :func:`run()` does).
        """
        num_collected = 0
        for identifier, command in self.commands:
            if identifier not in self.collected and command.is_finished:
                try:
                    command.wait(check=False if self.delay_checks else None)
                except ExternalCommandFailed as e:
                    if not command.retry_allowed:
                        # Propagate exceptions that can't be retried.
                        e.pool = self
                        raise
                finally:
                    # Update our bookkeeping even if wait() raised an exception.
                    if not command.retry_allowed:
                        self.collected.add(identifier)
                # We count retries as collected commands in order to
                # preserve the symmetry between the return values of
                # spawn() and collect() because run() depends on it.
                num_collected += 1
        if num_collected > 0:
            logger.debug("Collected %s ..",
                         pluralize(num_collected, "external command"))
        # Check if delayed error checking was requested and is applicable.
        if self.delay_checks and self.is_finished and self.unexpected_failures:
            raise CommandPoolFailed(pool=self)
        return num_collected
Пример #31
0
def check_mandatory_fields(control_fields):
    """
    Make sure mandatory binary control fields are defined.

    :param control_fields: A dictionary with control file fields.
    :raises: :exc:`~exceptions.ValueError` when a mandatory binary control
             field is not present in the provided control fields (see also
             :data:`MANDATORY_BINARY_CONTROL_FIELDS`).
    """
    missing_fields = [f for f in MANDATORY_BINARY_CONTROL_FIELDS if not control_fields.get(f)]
    if missing_fields:
        raise ValueError(compact(
            "Missing {fields}! ({details})",
            fields=pluralize(len(missing_fields), "mandatory binary package control field"),
            details=concatenate(sorted(missing_fields)),
        ))
Пример #32
0
    def lsb_release_variables(self):
        """
        The contents of ``/etc/lsb-release`` as a dictionary.

        The values of :attr:`distributor_id` and :attr:`distribution_codename`
        are based on the information provided by :attr:`lsb_release_variables`.
        If ``/etc/lsb-release`` doesn't exist or can't be parsed a debug
        message is logged and an empty dictionary is returned. Here's an
        example:

        >>> from executor.contexts import LocalContext
        >>> context = LocalContext()
        >>> context.lsb_release_variables
        {'DISTRIB_CODENAME': 'bionic',
         'DISTRIB_DESCRIPTION': 'Ubuntu 18.04.1 LTS',
         'DISTRIB_ID': 'Ubuntu',
         'DISTRIB_RELEASE': '18.04'}
        """
        variables = dict()
        # We proceed under the assumption that the file exists, but avoid
        # raising an exception when it doesn't and we don't leak error messages
        # to the standard error stream. We could have used is_file() and
        # is_readable() to "ask for permission instead of forgiveness" (so to
        # speak) but that requires the execution of three external commands
        # instead of one to accomplish the exact same thing :-P.
        logger.debug("Trying to read /etc/lsb-release ..")
        contents = self.capture('cat', '/etc/lsb-release', check=False, silent=True)
        logger.debug("Parsing /etc/lsb-release contents: %r", contents)
        for lnum, line in enumerate(contents.splitlines()):
            name, delimiter, value = line.partition('=')
            parsed_value = shlex.split(value)
            # The zero byte check below guards against a weird edge that has so
            # far only manifested in the Python 2.6 environment of Travis CI:
            # The parsing of /etc/lsb-release results in the expected variable
            # names but values containing binary data including nul bytes.
            # https://github.com/xolox/python-executor/issues/15.
            if len(parsed_value) == 1 and '\0' not in parsed_value[0]:
                variables[name.strip()] = parsed_value[0]
            else:
                logger.debug("Failed to parse line %i: %r", lnum + 1, line)
        if variables:
            logger.debug("Extracted %s from /etc/lsb-release: %r", pluralize(len(variables), "variable"), variables)
        else:
            logger.debug("Failed to read /etc/lsb-release ..")
        return variables
Пример #33
0
    def __init__(self, browser, job_group_url, root_url, args):
        """Construct a product report object with options."""
        self.args = args
        self.job_group_url = job_group_url
        self.group = job_group_url.split('/')[-1]

        try:
            current_url, previous_url = get_build_urls_to_compare(browser, job_group_url, args.builds, args.against_reviewed, args.running_threshold)
        except ValueError:
            raise NotEnoughBuildsError()

        # read last finished
        current_details = browser.get_soup(current_url)
        previous_details = browser.get_soup(previous_url)
        for details in current_details, previous_details:
            assert sum(int(badge.text) for badge in details.find_all(class_='badge')) > 0, \
                "invalid page with no test results found, make sure you specified valid builds (leading zero missing?)"
        current_summary = parse_summary(current_details)
        previous_summary = parse_summary(previous_details)

        changes = {k: v - previous_summary.get(k, 0) for k, v in iteritems(current_summary) if k != 'none' and k != 'incomplete'}
        log.info("Changes since last build:\n\t%s" % '\n\t'.join("%s: %s" % (k, v) for k, v in iteritems(changes)))

        self.build = get_build_nr(current_url)
        self.ref_build = get_build_nr(previous_url)

        # for each architecture iterate over all
        cur_archs, prev_archs = (set(arch.text for arch in details.find_all('th', id=re.compile('flavor_'))) for details in [current_details, previous_details])
        archs = cur_archs
        if args.arch:
            assert args.arch in cur_archs, "Selected arch {} was not found in test results {}".format(args.arch, cur_archs)
            archs = [args.arch]
        self.missing_archs = sorted(prev_archs - cur_archs)
        if self.missing_archs:
            log.info("%s missing completely from current run: %s" %
                     (pluralize(len(self.missing_archs), "architecture is", "architectures are"), ', '.join(self.missing_archs)))

        # create arch reports
        self.reports = SortedDict()
        progress_browser = progress_browser_factory(args) if args.query_issue_status else None
        bugzilla_browser = bugzilla_browser_factory(args) if args.query_issue_status else None
        for arch in sorted(archs):
            results = get_arch_state_results(arch, current_details, previous_details, args.output_state_results)
            self.reports[arch] = ArchReport(arch, results, args, root_url, progress_browser, bugzilla_browser, browser)
Пример #34
0
    def parse_host_keys(self, cmd):
        """
        Find the SSH host keys in the output of ssh-keyscan_.

        :param cmd: A :class:`~executor.ssh.client.RemoteCommand` object.
        :returns: A :class:`set` of strings.
        """
        host_keys = set()
        for line in cmd.stdout.splitlines():
            tokens = line.split()
            if len(tokens) >= 3:
                hostname, key_type, base64_key = tokens[:3]
                host_keys.add(base64_key.decode('ascii'))
        if host_keys:
            logger.verbose("Found %s in ssh-keyscan output.",
                           pluralize(len(host_keys), "host key"))
        else:
            logger.verbose("Didn't find any host keys in ssh-keyscan output!")
        return host_keys
Пример #35
0
    def spawn(self):
        """
        Spawn additional external commands up to the :attr:`concurrency` level.

        :returns: The number of external commands that were spawned by this
                  invocation of :func:`spawn()` (an integer).

        The commands to start are picked according to three criteria:

        1. The command's :attr:`~.ExternalCommand.was_started` property is
           :data:`False`.
        2. The command's :attr:`~.ExternalCommand.group_by` value is not
           present in :attr:`running_groups`.
        3. The :attr:`~.ExternalCommand.is_finished_with_retries`
           properties of all of the command's :attr:`~.ExternalCommand.dependencies`
           are :data:`True`.
        """
        num_started = 0
        limit = self.concurrency - self.num_running
        if limit > 0:
            running_groups = self.running_groups
            for id, cmd in self.commands:
                # Skip commands that have already been started and cannot be retried.
                if (not cmd.was_started) or (cmd.retry_allowed
                                             and not cmd.is_running):
                    # If command groups are being used we'll only
                    # allow one running command per command group.
                    if cmd.group_by not in running_groups:
                        # If a command has any dependencies we won't allow it
                        # to start until all of its dependencies have finished.
                        if all(d.is_finished_with_retries
                               for d in cmd.dependencies):
                            cmd.start()
                            num_started += 1
                            if cmd.group_by is not None:
                                running_groups.add(cmd.group_by)
                            if num_started == limit:
                                break
        if num_started > 0:
            logger.debug("Spawned %s ..",
                         pluralize(num_started, "external command"))
        return num_started
Пример #36
0
def cron_graceful(arguments):
    """Command line interface for the ``cron-graceful`` program."""
    runtime_timer = Timer()
    # Initialize logging to the terminal.
    dry_run = parse_arguments(arguments)
    if not dry_run:
        ensure_root_privileges()
    try:
        cron_daemon = find_cron_daemon()
    except CronDaemonNotRunning:
        logger.info(
            "No running cron daemon found, assuming it was previously stopped .."
        )
    else:
        if not dry_run:
            # Prevent the cron daemon from starting new cron jobs.
            cron_daemon.suspend()
            # Enable user defined additional logic.
            run_additions()
        # Identify the running cron jobs based on the process tree _after_ the
        # cron daemon has been paused (assuming we're not performing a dry run)
        # so we know for sure that we see all running cron jobs (also we're not
        # interested in any processes that have already been stopped by
        # cron-graceful-additions).
        cron_daemon = find_cron_daemon()
        cron_jobs = sorted_by_pid(cron_daemon.grandchildren)
        if cron_jobs:
            logger.info("Found %s: %s",
                        pluralize(len(cron_jobs), "running cron job"),
                        concatenate(str(j.pid) for j in cron_jobs))
            # Wait for the running cron jobs to finish.
            wait_for_processes(cron_jobs)
        else:
            logger.info("No running cron jobs found.")
        # Terminate the cron daemon.
        if dry_run:
            logger.info("Stopping cron daemon with process id %i ..",
                        cron_daemon.pid)
        else:
            terminate_cron_daemon(cron_daemon)
        logger.info("Done! Took %s to gracefully terminate cron.",
                    runtime_timer.rounded)
Пример #37
0
    def terminate(self):
        """
        Terminate any commands that are currently running.

        :returns: The number of commands that were terminated (an integer).

        If :func:`terminate()` successfully terminates commands, you then call
        :func:`collect()` and the :attr:`.check` property of a terminated
        command is :data:`True` you will get an exception because terminated
        commands (by definition) report a nonzero
        :attr:`~executor.ExternalCommand.returncode`.
        """
        num_terminated = 0
        for identifier, command in self.commands:
            if command.terminate():
                num_terminated += 1
        if num_terminated > 0:
            logger.warning("Terminated %s ..",
                           pluralize(num_terminated, "external command"))
        return num_terminated
Пример #38
0
 def clean_cache(self):
     """Remove old and unused archives from the cache directory."""
     timer = Timer()
     entries = []
     for file_in_cache in self.find_archives():
         cache_metadata = self.read_metadata(file_in_cache)
         last_accessed = cache_metadata.get('last-accessed', 0)
         entries.append((last_accessed, file_in_cache))
     to_remove = sorted(entries)[:-self.cache_limit]
     if to_remove:
         for last_used, file_in_cache in to_remove:
             logger.debug("Removing archive from cache: %s", file_in_cache)
             metadata_file = self.get_metadata_file(file_in_cache)
             self.context.execute('rm', '-f', file_in_cache, metadata_file)
         logger.verbose("Took %s to remove %s from cache.", timer,
                        pluralize(len(to_remove), "archive"))
     else:
         logger.verbose(
             "Wasted %s checking whether cache needs to be cleaned (it doesn't).",
             timer)
Пример #39
0
def check_mandatory_fields(control_fields):
    """
    Make sure mandatory binary control fields are defined.

    :param control_fields: A dictionary with control file fields.
    :raises: :exc:`~exceptions.ValueError` when a mandatory binary control
             field is not present in the provided control fields (see also
             :data:`MANDATORY_BINARY_CONTROL_FIELDS`).
    """
    missing_fields = [
        f for f in MANDATORY_BINARY_CONTROL_FIELDS if not control_fields.get(f)
    ]
    if missing_fields:
        raise ValueError(
            compact(
                "Missing {fields}! ({details})",
                fields=pluralize(len(missing_fields),
                                 "mandatory binary package control field"),
                details=concatenate(sorted(missing_fields)),
            ))
Пример #40
0
def collect_packages(archives, directory, prompt=True, cache=None, concurrency=None):
    """
    Interactively copy packages and their dependencies.

    :param archives: An iterable of strings with the filenames of one or more
                     ``*.deb`` files.
    :param directory: The pathname of a directory where the package archives
                      and dependencies should be copied to (a string).
    :param prompt: :data:`True` (the default) to ask confirmation from the
                   operator (using a confirmation prompt rendered on the
                   terminal), :data:`False` to skip the prompt.
    :param cache: The :class:`.PackageCache` to use (defaults to :data:`None`).
    :param concurrency: Override the number of concurrent processes (defaults
                        to the number of `archives` given or to the value of
                        :func:`multiprocessing.cpu_count()`, whichever is
                        smaller).
    :raises: :exc:`~exceptions.ValueError` when no archives are given.

    When more than one archive is given a :mod:`multiprocessing` pool is used
    to collect related archives concurrently, in order to speed up the process
    of collecting large dependency sets.
    """
    archives = list(archives)
    related_archives = set(map(parse_filename, archives))
    if not archives:
        raise ValueError("At least one package archive is required!")
    elif len(archives) == 1:
        # Find the related packages of a single archive.
        related_archives.update(collect_related_packages(archives[0], cache=cache))
    else:
        # Find the related packages of multiple archives (concurrently).
        with AutomaticSpinner(label="Collecting related packages"):
            concurrency = min(len(archives), concurrency or multiprocessing.cpu_count())
            pool = multiprocessing.Pool(concurrency)
            try:
                arguments = [(archive, cache) for archive in archives]
                for result in pool.map(collect_packages_worker, arguments, chunksize=1):
                    related_archives.update(result)
            finally:
                pool.terminate()
    # Ignore package archives that are already in the target directory.
    relevant_archives = set()
    for archive in related_archives:
        basename = os.path.basename(archive.filename)
        if not os.path.isfile(os.path.join(directory, basename)):
            relevant_archives.add(archive)
    # Interactively move the package archives.
    if relevant_archives:
        relevant_archives = sorted(relevant_archives)
        pluralized = pluralize(len(relevant_archives), "package archive", "package archives")
        say("Found %s:", pluralized)
        for file_to_collect in relevant_archives:
            say(" - %s", format_path(file_to_collect.filename))
        prompt_text = "Copy %s to %s?" % (pluralized, format_path(directory))
        if prompt and not prompt_for_confirmation(prompt_text, default=True, padding=False):
            logger.warning("Not copying archive(s) to %s! (aborted by user)", format_path(directory))
        else:
            # Link or copy the file(s).
            for file_to_collect in relevant_archives:
                src = file_to_collect.filename
                dst = os.path.join(directory, os.path.basename(src))
                smart_copy(src, dst)
            logger.info("Done! Copied %s to %s.", pluralized, format_path(directory))
    else:
        logger.info("Nothing to do! (%s previously copied)",
                    pluralize(len(related_archives), "package archive"))