Ejemplo n.º 1
0
    def find_egg_info_file(self, pattern=''):
        """
        Find pip metadata files in unpacked source distributions.

        When pip unpacks a source distribution archive it creates a directory
        ``pip-egg-info`` which contains the package metadata in a declarative
        and easy to parse format. This method finds such metadata files.

        :param pattern: The :mod:`glob` pattern to search for (a string).
        :returns: A list of matched filenames (strings).
        """
        full_pattern = os.path.join(self.requirement.source_directory,
                                    'pip-egg-info', '*.egg-info', pattern)
        logger.debug("Looking for %r file(s) using pattern %r ..", pattern,
                     full_pattern)
        matches = glob.glob(full_pattern)
        if len(matches) > 1:
            msg = "Source distribution directory of %s (%s) contains multiple *.egg-info directories: %s"
            raise Exception(
                msg % (self.requirement.project_name, self.requirement.version,
                       concatenate(matches)))
        elif matches:
            logger.debug("Matched %s: %s.",
                         pluralize(len(matches), "file", "files"),
                         concatenate(matches))
            return matches[0]
        else:
            logger.debug("No matching %r files found.", pattern)
Ejemplo n.º 2
0
 def test_concatenate(self):
     """Test :func:`humanfriendly.concatenate()`."""
     self.assertEqual(humanfriendly.concatenate([]), '')
     self.assertEqual(humanfriendly.concatenate(['one']), 'one')
     self.assertEqual(humanfriendly.concatenate(['one', 'two']),
                      'one and two')
     self.assertEqual(humanfriendly.concatenate(['one', 'two', 'three']),
                      'one, two and three')
Ejemplo n.º 3
0
def load_config():
    """
    Load settings from the given configuration file.

    :param pathname: The pathname of the configuration file (a string).
    :returns: A dictionary with the configured location and display
              brightness controllers.
    :raises: :py:exc:`ConfigurationError` when the parsing or validation of the
             configuration file fails.
    """
    parser = ConfigParser.ConfigParser()
    config = {'location': {}, 'controllers': []}
    loaded_files = parser.read(map(os.path.expanduser, CONFIG_FILES))
    if not loaded_files:
        msg = "No configuration files loaded! Please review the documentation on how to get started!"
        raise ConfigurationError(msg)
    logger.debug("Loading configuration file(s): %s",
                 concatenate(loaded_files))
    for section in parser.sections():
        options = dict(parser.items(section))
        if section == 'location':
            config['location'].update(options)
        else:
            tag, _, friendly_name = section.partition(':')
            if tag != 'display':
                msg = "Unsupported section %r in configuration file!"
                raise ConfigurationError(msg % section)
            if 'output-name' in options:
                config['controllers'].append(
                    SoftwareBrightnessController(
                        friendly_name=friendly_name,
                        minimum_percentage=int(options['min-brightness']),
                        maximum_percentage=int(options['max-brightness']),
                        output_name=options['output-name'],
                    ))
            elif 'sys-directory' in options:
                config['controllers'].append(
                    BacklightBrightnessController(
                        friendly_name=friendly_name,
                        minimum_percentage=int(options['min-brightness']),
                        maximum_percentage=int(options['max-brightness']),
                        sys_directory=options['sys-directory'],
                    ))
            else:
                msg = "Don't know how to control brightness of %r display defined in configuration file!"
                raise ConfigurationError(msg % friendly_name)
    # Make sure the configuration file defines the essential settings.
    expected_location_keys = ('latitude', 'longitude', 'elevation')
    if not all(k in config['location'] for k in expected_location_keys):
        msg = "You need to define the %s options in the [location] section of the configuration file!"
        raise ConfigurationError(
            msg % concatenate(map(repr, expected_location_keys)))
    if not config['controllers']:
        msg = "You need to define one or more displays in the configuration file!"
        raise ConfigurationError(msg)
    return config
Ejemplo n.º 4
0
def wait_for_processes(processes):
    """
    Wait for the given processes to end.

    Prints an overview of running processes to the terminal once a second so
    the user knows what they are waiting for.

    This function is not specific to :mod:`proc.cron` at all (it doesn't
    even need to know what cron jobs are), it just waits until all of the given
    processes have ended.

    :param processes: A list of :class:`~proc.tree.ProcessNode` objects.
    """
    wait_timer = Timer()
    running_processes = list(processes)
    for process in running_processes:
        logger.info("Waiting for process %i: %s (runtime is %s)", process.pid,
                    quote(process.cmdline),
                    format_timespan(round(process.runtime)))
    with Spinner(timer=wait_timer) as spinner:
        while True:
            for process in list(running_processes):
                if not process.is_alive:
                    running_processes.remove(process)
            if not running_processes:
                break
            num_processes = pluralize(len(running_processes), "process",
                                      "processes")
            process_ids = concatenate(str(p.pid) for p in running_processes)
            spinner.step(label="Waiting for %s: %s" %
                         (num_processes, process_ids))
            spinner.sleep()
    logger.info("All processes have finished, we're done waiting (took %s).",
                wait_timer.rounded)
Ejemplo n.º 5
0
def create_control_file(control_file, control_fields):
    """
    Create a Debian control file.

    :param control_file: The filename of the control file to create (a string).
    :param control_fields: A dictionary with control file fields. This
                           dictionary is merged with the values in
                           :data:`DEFAULT_CONTROL_FIELDS`.
    :raises: :exc:`~exceptions.ValueError` when a mandatory binary control
             field is not present in the provided control fields (see also
             :data:`MANDATORY_BINARY_CONTROL_FIELDS`).
    """
    logger.debug("Creating control file: %s", format_path(control_file))
    # Merge the defaults with the fields defined by the caller.
    merged_fields = merge_control_fields(DEFAULT_CONTROL_FIELDS, control_fields)
    # Sanity check for mandatory fields that are missing.
    missing_fields = [f for f in MANDATORY_BINARY_CONTROL_FIELDS if f not in merged_fields]
    if missing_fields:
        raise ValueError("Missing %s! (%s)" % (pluralize(len(missing_fields), "mandatory binary package control field"),
                                               concatenate(sorted(missing_fields))))
    # Make sure the parent directory of the control file exists.
    makedirs(os.path.dirname(control_file))
    # Remove the control file if it already exists in case it's a hard link to
    # an inode with multiple hard links that should _not_ be changed by us.
    if os.path.exists(control_file):
        os.unlink(control_file)
    # Write the control file.
    with open(control_file, 'wb') as handle:
        merged_fields.dump(handle)
Ejemplo n.º 6
0
    def __init__(self, config):
        """
        Initialize a cache manager.

        Automatically initializes instances of all registered cache backends
        based on setuptools' support for entry points which makes it possible
        for external Python packages to register additional cache backends
        without any modifications to pip-accel.

        :param config: The pip-accel configuration (a :py:class:`.Config`
                       object).
        """
        self.config = config
        for entry_point in get_entry_map('pip-accel',
                                         'pip_accel.cache_backends').values():
            logger.debug("Importing cache backend: %s",
                         entry_point.module_name)
            __import__(entry_point.module_name)
        # Initialize instances of all registered cache backends (sorted by
        # priority so that e.g. the local file system is checked before S3).
        self.backends = sorted(
            (b(self.config)
             for b in registered_backends if b != AbstractCacheBackend),
            key=lambda b: b.PRIORITY)
        logger.debug("Initialized %s: %s",
                     pluralize(len(self.backends), "cache backend"),
                     concatenate(map(repr, self.backends)))
Ejemplo n.º 7
0
 def __str__(self):
     """The name, version and extras of the package encoded in a human readable string."""
     version = [self.python_version]
     extras = self.requirement.pip_requirement.extras
     if extras:
         version.append("extras: %s" % concatenate(sorted(extras)))
     return "%s (%s)" % (self.python_name, ', '.join(version))
 def execute_helper(self):
     """Helper for :func:`execute()`."""
     timer = Timer()
     actions = []
     if self.backup_enabled:
         self.notify_starting()
     self.unlock_device()
     try:
         self.mount_filesystem()
         if self.backup_enabled:
             self.transfer_changes()
             actions.append('create backup')
         if self.snapshot_enabled:
             self.create_snapshot()
             actions.append('create snapshot')
         if self.rotate_enabled:
             self.rotate_snapshots()
             actions.append('rotate old snapshots')
     except Exception:
         self.notify_failed(timer)
         raise
     else:
         if self.backup_enabled:
             self.notify_finished(timer)
         if actions:
             logger.info("Took %s to %s.", timer, concatenate(actions))
Ejemplo n.º 9
0
 async def download_all_conversations(self, conversation_list):
     """Download conversations from Google Hangouts."""
     timer = Timer()
     for conversation in conversation_list.get_all(include_archived=True):
         try:
             await self.download_conversation(conversation)
         except Exception:
             logger.warning("Skipping conversation due to synchronization error ..", exc_info=True)
             self.stats.failed_conversations += 1
         self.stats.show()
     summary = []
     if self.stats.conversations_added > 0:
         summary.append(pluralize(self.stats.conversations_added, "conversation"))
     if self.stats.messages_added > 0:
         summary.append(pluralize(self.stats.messages_added, "message"))
     if summary:
         logger.info("Added %s in %s.", concatenate(summary), timer)
     else:
         logger.info("No new conversations or messages found (took %s to check).", timer)
     if self.stats.failed_conversations > 0:
         logger.warning(
             "Skipped %s due to synchronization %s!",
             pluralize(self.stats.failed_conversations, "conversation"),
             "errors" if self.stats.failed_conversations > 1 else "error",
         )
     if self.stats.skipped_conversations > 0:
         logger.notice(
             "Skipped %s due to previous synchronization %s! (use --force to retry %s)",
             pluralize(self.stats.skipped_conversations, "conversation"),
             "errors" if self.stats.skipped_conversations > 1 else "error",
             "them" if self.stats.skipped_conversations > 1 else "it",
         )
Ejemplo n.º 10
0
 def render_conversation_summary(self, conversation):
     """Render a summary of which conversation a message is part of."""
     # Gather the names of the participants in the conversation, but exclude the
     # operator's name from private conversations (we can safely assume they
     # know who they are 😇).
     participants = sorted(
         set(contact.unambiguous_name if conversation.is_group_conversation
             else (contact.full_name or UNKNOWN_CONTACT_LABEL)
             for contact in conversation.participants
             if conversation.is_group_conversation
             or not self.is_operator(contact)))
     parts = [
         self.get_backend_name(conversation.account.backend),
         "group" if conversation.is_group_conversation else "private",
         "chat",
     ]
     if conversation.name:
         parts.append(
             self.generate_html("conversation_name",
                                html.escape(conversation.name)))
     parts.append("with")
     participants_html = concatenate(map(html.escape, participants))
     if conversation.is_group_conversation:
         parts.append(pluralize(len(participants), "participant"))
         parts.append("(%s)" % participants_html)
     else:
         parts.append(
             self.generate_html("conversation_name", participants_html))
     if conversation.account.name_is_significant:
         parts.append("in %s account" % conversation.account.name)
     return " ".join(parts)
Ejemplo n.º 11
0
 def execute_helper(self):
     """Helper for :func:`execute()`."""
     timer = Timer()
     actions = []
     if self.crypto_device and not self.crypto_device_available:
         msg = "Encrypted filesystem %s isn't available! (the device file %s doesn't exist)"
         raise MissingBackupDiskError(
             msg % (self.crypto_device, self.crypttab_entry.source_device))
     if self.backup_enabled:
         self.notify_starting()
     self.unlock_device()
     try:
         self.mount_filesystem()
         if self.backup_enabled:
             self.transfer_changes()
             actions.append('create backup')
         if self.snapshot_enabled:
             self.create_snapshot()
             actions.append('create snapshot')
         if self.rotate_enabled:
             self.rotate_snapshots()
             actions.append('rotate old snapshots')
     except Exception:
         self.notify_failed(timer)
         raise
     else:
         if self.backup_enabled:
             self.notify_finished(timer)
         if actions:
             logger.info("Took %s to %s.", timer, concatenate(actions))
Ejemplo n.º 12
0
def format_timespan(num_seconds):
    """
    Taken from the humanfriendly library and changed the time units to
    their abbreviations.
    """

    if num_seconds < 60:
        # Fast path.
        rounded_number = round_number(num_seconds, False)
        return pluralize(rounded_number, 's', 's')
    else:
        # Slow path.
        result = []
        for unit in reversed(time_units):
            if num_seconds >= unit['divider']:
                count = int(num_seconds / unit['divider'])
                num_seconds %= unit['divider']
                result.append(
                    pluralize(count, unit['singular'], unit['plural']))
        if len(result) == 1:
            # A single count/unit combination.
            return result[0]
        else:
            # Remove insignificant data from the formatted timespan and format
            # it in a readable way.
            return concatenate(result[:2])
Ejemplo n.º 13
0
    def install_requirements(self, requirements, **kw):
        """
        Manually install a requirement set from binary and/or wheel distributions.

        :param requirements: A list of :class:`pip_accel.req.Requirement` objects.
        :param kw: Any keyword arguments are passed on to
                   :func:`~pip_accel.bdist.BinaryDistributionManager.install_binary_dist()`.
        :returns: The number of packages that were just installed (an integer).
        """
        install_timer = Timer()
        install_types = []
        if any(not req.is_wheel for req in requirements):
            install_types.append('binary')
        if any(req.is_wheel for req in requirements):
            install_types.append('wheel')
        logger.info("Installing from %s distributions ..",
                    concatenate(install_types))
        # Track installed files by default (unless the caller specifically opted out).
        kw.setdefault('track_installed_files', True)
        num_installed = 0
        for requirement in requirements:
            # If we're upgrading over an older version, first remove the
            # old version to make sure we don't leave files from old
            # versions around.
            if is_installed(requirement.name):
                uninstall(requirement.name)
            # When installing setuptools we need to uninstall distribute,
            # otherwise distribute will shadow setuptools and all sorts of
            # strange issues can occur (e.g. upgrading to the latest
            # setuptools to gain wheel support and then having everything
            # blow up because distribute doesn't know about wheels).
            if requirement.name == 'setuptools' and is_installed('distribute'):
                uninstall('distribute')
            if requirement.is_editable:
                logger.debug("Installing %s in editable form using pip.",
                             requirement)
                command = InstallCommand()
                opts, args = command.parse_args(
                    ['--no-deps', '--editable', requirement.source_directory])
                command.run(opts, args)
            elif requirement.is_wheel:
                logger.info("Installing %s wheel distribution using pip ..",
                            requirement)
                wheel_version = pip_wheel_module.wheel_version(
                    requirement.source_directory)
                pip_wheel_module.check_compatibility(wheel_version,
                                                     requirement.name)
                requirement.pip_requirement.move_wheel_files(
                    requirement.source_directory)
            else:
                binary_distribution = self.bdists.get_binary_dist(requirement)
                self.bdists.install_binary_dist(binary_distribution, **kw)
            num_installed += 1
        logger.info("Finished installing %s in %s.",
                    pluralize(num_installed, "requirement"), install_timer)
        return num_installed
Ejemplo n.º 14
0
    def __init__(self, **kw):
        """
        Initialize a :class:`PropertyManager` object.

        :param kw: Any keyword arguments are passed on to :func:`set_properties()`.
        """
        self.set_properties(**kw)
        missing_properties = self.missing_properties
        if missing_properties:
            msg = "missing %s" % pluralize(len(missing_properties), "required argument")
            raise TypeError("%s (%s)" % (msg, concatenate(missing_properties)))
Ejemplo n.º 15
0
 def overview(self):
     """Render an overview with related members grouped together."""
     return (
         ("Superclass" if len(self.type.__bases__) == 1 else "Superclasses",
          concatenate(
              format(":class:`~%s.%s`", b.__module__, b.__name__)
              for b in self.type.__bases__)),
         ("Special methods", self.format_methods(self.special_methods)),
         ("Public methods", self.format_methods(self.public_methods)),
         ("Properties",
          self.format_properties(n for n, v in self.properties)),
     )
Ejemplo n.º 16
0
    def __init__(self, config):
        """
        Initialize the system package dependency manager.

        :param config: The pip-accel configuration (a :class:`.Config`
                       object).
        """
        # Defaults for unsupported systems.
        self.list_command = 'true'
        self.install_command = 'true'
        self.dependencies = {}
        # Keep a reference to the pip-accel configuration.
        self.config = config
        # Initialize the platform specific package manager interface.
        directory = os.path.dirname(os.path.abspath(__file__))
        for filename in sorted(os.listdir(directory)):
            pathname = os.path.join(directory, filename)
            if filename.endswith('.ini') and os.path.isfile(pathname):
                logger.debug("Loading configuration from %s ..", pathname)
                parser = configparser.RawConfigParser()
                parser.read(pathname)
                # Check if the package manager is supported.
                supported_command = parser.get('commands', 'supported')
                logger.debug("Checking if configuration is supported: %s",
                             supported_command)
                with open(os.devnull, 'wb') as null_device:
                    if subprocess.call(supported_command,
                                       shell=True,
                                       stdout=null_device,
                                       stderr=subprocess.STDOUT) == 0:
                        logger.debug(
                            "System package manager configuration is supported!"
                        )
                        # Get the commands to list and install system packages.
                        self.list_command = parser.get('commands', 'list')
                        self.install_command = parser.get(
                            'commands', 'install')
                        # Get the known dependencies.
                        self.dependencies = dict(
                            (n.lower(), v.split())
                            for n, v in parser.items('dependencies'))
                        logger.debug(
                            "Loaded dependencies of %s: %s",
                            pluralize(len(self.dependencies),
                                      "Python package"),
                            concatenate(sorted(self.dependencies)))
                    else:
                        logger.debug(
                            "Command failed, assuming configuration doesn't apply .."
                        )
Ejemplo n.º 17
0
    def installation_refused(self, requirement, missing_dependencies, reason):
        """
        Raise :py:exc:`.DependencyInstallationRefused` with a user friendly message.

        :param requirement: A :py:class:`.Requirement` object.
        :param missing_dependencies: A list of strings with missing dependencies.
        :param reason: The reason why installation was refused (a string).
        """
        msg = "Missing %s (%s) required by Python package %s (%s) but %s!"
        raise DependencyInstallationRefused(
            msg %
            (pluralize(len(missing_dependencies), "system package",
                       "system packages"), concatenate(missing_dependencies),
             requirement.name, requirement.version, reason))
Ejemplo n.º 18
0
 def show(self):
     """Show statistics about imported conversations, messages, contacts, etc."""
     additions = []
     if self.conversations_added > 0:
         additions.append(pluralize(self.conversations_added, "conversation"))
     if self.messages_added > 0:
         additions.append(pluralize(self.messages_added, "message"))
     if self.contacts_added > 0:
         additions.append(pluralize(self.contacts_added, "contact"))
     if self.email_addresses_added > 0:
         additions.append(pluralize(self.contacts_added, "email address", "email addresses"))
     if self.telephone_numbers_added > 0:
         additions.append(pluralize(self.telephone_numbers_added, "telephone number"))
     if additions:
         logger.info("Imported %s.", concatenate(additions))
Ejemplo n.º 19
0
def find_latest_version(packages):
    """
    Find the package archive with the highest version number. Uses
    :py:class:`.Version` objects for version comparison. Raises
    :py:exc:`ValueError` when not all of the given package archives share the
    same package name.

    :param packages: A list of filenames (strings) and/or
                     :py:class:`PackageFile` objects.
    :returns: The :py:class:`PackageFile` with
              the highest version number.
    """
    packages = sorted(map(parse_filename, packages))
    names = set(p.name for p in packages)
    if len(names) > 1:
        msg = "Refusing to compare unrelated packages! (%s)"
        raise ValueError(msg % concatenate(sorted(names)))
    return packages[-1]
Ejemplo n.º 20
0
def cron_graceful(arguments):
    """Command line interface for the ``cron-graceful`` program."""
    runtime_timer = Timer()
    # Initialize logging to the terminal.
    dry_run = parse_arguments(arguments)
    if not dry_run:
        ensure_root_privileges()
    try:
        cron_daemon = find_cron_daemon()
    except CronDaemonNotRunning:
        logger.info(
            "No running cron daemon found, assuming it was previously stopped .."
        )
    else:
        if not dry_run:
            # Prevent the cron daemon from starting new cron jobs.
            cron_daemon.suspend()
            # Enable user defined additional logic.
            run_additions()
        # Identify the running cron jobs based on the process tree _after_ the
        # cron daemon has been paused (assuming we're not performing a dry run)
        # so we know for sure that we see all running cron jobs (also we're not
        # interested in any processes that have already been stopped by
        # cron-graceful-additions).
        cron_daemon = find_cron_daemon()
        cron_jobs = sorted_by_pid(cron_daemon.grandchildren)
        if cron_jobs:
            logger.info("Found %s: %s",
                        pluralize(len(cron_jobs), "running cron job"),
                        concatenate(str(j.pid) for j in cron_jobs))
            # Wait for the running cron jobs to finish.
            wait_for_processes(cron_jobs)
        else:
            logger.info("No running cron jobs found.")
        # Terminate the cron daemon.
        if dry_run:
            logger.info("Stopping cron daemon with process id %i ..",
                        cron_daemon.pid)
        else:
            terminate_cron_daemon(cron_daemon)
        logger.info("Done! Took %s to gracefully terminate cron.",
                    runtime_timer.rounded)
Ejemplo n.º 21
0
    def find_known_dependencies(self, requirement):
        """
        Find the known dependencies of a Python package.

        :param requirement: A :py:class:`.Requirement` object.
        :returns: A list of strings with system package names.
        """
        logger.info("Checking for known dependencies of %s ..",
                    requirement.name)
        known_dependencies = sorted(
            self.dependencies.get(requirement.name.lower(), []))
        if known_dependencies:
            logger.info(
                "Found %s: %s",
                pluralize(len(known_dependencies),
                          "known dependency", "known dependencies"),
                concatenate(known_dependencies))
        else:
            logger.info(
                "No known dependencies... Maybe you have a suggestion?")
        return known_dependencies
Ejemplo n.º 22
0
def load_config(repository):
    repository = os.path.abspath(repository)
    for config_dir in (config.user_config_directory,
                       config.system_config_directory):
        config_file = os.path.join(config_dir, config.repo_config_file)
        if os.path.isfile(config_file):
            logger.debug("Loading configuration from %s ..",
                         format_path(config_file))
            parser = configparser.RawConfigParser()
            parser.read(config_file)
            sections = dict(
                (n, dict(parser.items(n))) for n in parser.sections())
            defaults = sections.get('default', {})
            logger.debug("Found %i sections: %s", len(sections),
                         concatenate(parser.sections()))
            for name, options in sections.items():
                directory = options.get('directory')
                if directory and fnmatch.fnmatch(repository, directory):
                    defaults.update(options)
                    return defaults
    return {}
Ejemplo n.º 23
0
 def listen_addresses(self):
     
     logger.debug("Discovering where Apache is listening by parsing %s ..", self.ports_config)
     
     if not os.path.isfile(self.ports_config):
         raise AddressDiscoveryError(compact("""Failed!""", filename=self.ports_config))
     matched_addresses = []
     pattern = re.compile(r'^(.+):(\d+)$')
     with open(self.ports_config) as handle:
         for lnum, line in enumerate(handle, start=1):
             tokens = line.split()
             if len(tokens) >= 2 and tokens[0] == 'Listen':
                 parsed_value = None
                 if tokens[1].isdigit():
                     parsed_value = NetworkAddress(port=int(tokens[1]))
                 else:
                     match = pattern.match(tokens[1])
                     if match:
                         address = match.group(1)
                         port = int(match.group(2))
                         if address == '0.0.0.0':
                             address = '127.0.0.1'
                         parsed_value = NetworkAddress(address=address, port=port)
                 if parsed_value is not None:
                     if len(tokens) >= 3:
                         parsed_value.protocol = tokens[2]
                     logger.debug("Parsed listen directive on line %i: %s", lnum, parsed_value)
                     matched_addresses.append(parsed_value)
                 else:
                     logger.warning("Failed to parse listen directive on line %i: %s", lnum, line)
     if not matched_addresses:
         raise AddressDiscoveryError(compact("""
             Failed to discover any addresses or ports that Apache is
             listening on! Maybe I'm parsing the wrong configuration file?
             ({filename})
         """, filename=self.ports_config))
     logger.debug("Discovered %s that Apache is listening on: %s",
                  pluralize(len(matched_addresses), "address", "addresses"),
                  concatenate(map(str, matched_addresses)))
     return matched_addresses
Ejemplo n.º 24
0
    def find_missing_dependencies(self, requirement):
        """
        Find missing dependencies of a Python package.

        :param requirement: A :py:class:`.Requirement` object.
        :returns: A list of strings with system package names.
        """
        known_dependencies = self.find_known_dependencies(requirement)
        if known_dependencies:
            installed_packages = self.find_installed_packages()
            logger.debug("Checking for missing dependencies of %s ..",
                         requirement.name)
            missing_dependencies = sorted(
                set(known_dependencies).difference(installed_packages))
            if missing_dependencies:
                logger.debug(
                    "Found %s: %s",
                    pluralize(len(missing_dependencies), "missing dependency",
                              "missing dependencies"),
                    concatenate(missing_dependencies))
            else:
                logger.info("All known dependencies are already installed.")
            return missing_dependencies
Ejemplo n.º 25
0
    def install_dependencies(self, requirement):
        """
        If :py:mod:`pip_accel` fails to build a binary distribution, it will
        call this method as a last chance to install missing dependencies. If
        this function does not raise an exception, :py:mod:`pip_accel` will
        retry the build once.

        :param requirement: A :py:class:`.Requirement` object.
        :returns: ``True`` when missing system packages were installed,
                  ``False`` otherwise.
        :raises: :py:exc:`.DependencyInstallationRefused` when automatic
                 installation is disabled or refused by the operator.
        :raises: :py:exc:`.DependencyInstallationFailed` when the installation
                 of missing system packages fails.
        """
        install_timer = Timer()
        missing_dependencies = self.find_missing_dependencies(requirement)
        if missing_dependencies:
            # Compose the command line for the install command.
            install_command = shlex.split(
                self.install_command) + missing_dependencies
            if os.getuid() != 0:
                # Prepend `sudo' to the command line.
                install_command.insert(0, 'sudo')
            # Always suggest the installation command to the operator.
            logger.info(
                "You seem to be missing %s: %s",
                pluralize(len(missing_dependencies), "dependency",
                          "dependencies"), concatenate(missing_dependencies))
            logger.info("You can install %s with this command: %s",
                        "it" if len(missing_dependencies) == 1 else "them",
                        " ".join(install_command))
            if self.config.auto_install is False:
                # Refuse automatic installation and don't prompt the operator when the configuration says no.
                self.installation_refused(
                    requirement, missing_dependencies,
                    "automatic installation is disabled")
            # Get the operator's permission to install the missing package(s).
            if self.config.auto_install or self.confirm_installation(
                    requirement, missing_dependencies, install_command):
                logger.info(
                    "Got permission to install %s.",
                    pluralize(len(missing_dependencies), "dependency",
                              "dependencies"))
            else:
                logger.error(
                    "Refused installation of missing %s!", "dependency"
                    if len(missing_dependencies) == 1 else "dependencies")
                self.installation_refused(requirement, missing_dependencies,
                                          "manual installation was refused")
            if subprocess.call(install_command) == 0:
                logger.info(
                    "Successfully installed %s in %s.",
                    pluralize(len(missing_dependencies), "dependency",
                              "dependencies"), install_timer)
                return True
            else:
                logger.error(
                    "Failed to install %s.",
                    pluralize(len(missing_dependencies), "dependency",
                              "dependencies"))
                msg = "Failed to install %s required by Python package %s! (%s)"
                raise DependencyInstallationFailed(
                    msg %
                    (pluralize(len(missing_dependencies), "system package",
                               "system packages"), requirement.name,
                     concatenate(missing_dependencies)))
        return False
Ejemplo n.º 26
0
    def build_binary_dist_helper(self, requirement, setup_command):
        """
        Convert an unpacked source distribution to a binary distribution.

        :param requirement: A :class:`.Requirement` object.
        :param setup_command: A list of strings with the arguments to
                              ``setup.py``.
        :returns: The pathname of the resulting binary distribution (a string).
        :raises: :exc:`.BuildFailed` when the build reports an error (e.g.
                 because of missing binary dependencies like system
                 libraries).
        :raises: :exc:`.NoBuildOutput` when the build does not produce the
                 expected binary distribution archive.
        """
        build_timer = Timer()
        # Make sure the source distribution contains a setup script.
        setup_script = os.path.join(requirement.source_directory, 'setup.py')
        if not os.path.isfile(setup_script):
            msg = "Directory %s (%s %s) doesn't contain a source distribution!"
            raise InvalidSourceDistribution(
                msg % (requirement.source_directory, requirement.name,
                       requirement.version))
        # Let the user know what's going on.
        build_text = "Building %s binary distribution" % requirement
        logger.info("%s ..", build_text)
        # Cleanup previously generated distributions.
        dist_directory = os.path.join(requirement.source_directory, 'dist')
        if os.path.isdir(dist_directory):
            logger.debug(
                "Cleaning up previously generated distributions in %s ..",
                dist_directory)
            shutil.rmtree(dist_directory)
        # Let the user know (approximately) which command is being executed
        # (I don't think it's necessary to show them the nasty details :-).
        logger.debug(
            "Executing external command: %s", ' '.join(
                map(pipes.quote, [self.config.python_executable, 'setup.py'] +
                    setup_command)))
        # Compose the command line needed to build the binary distribution.
        # This nasty command line forces the use of setuptools (instead of
        # distutils) just like pip does. This will cause the `*.egg-info'
        # metadata to be written to a directory instead of a file, which
        # (amongst other things) enables tracking of installed files.
        command_line = [
            self.config.python_executable, '-c', ';'.join([
                'import setuptools',
                '__file__=%r' % setup_script,
                r"exec(compile(open(__file__).read().replace('\r\n', '\n'), __file__, 'exec'))",
            ])
        ] + setup_command
        # Redirect all output of the build to a temporary file.
        fd, temporary_file = tempfile.mkstemp()
        try:
            # Start the build.
            build = subprocess.Popen(command_line,
                                     cwd=requirement.source_directory,
                                     stdout=fd,
                                     stderr=fd)
            # Wait for the build to finish and provide feedback to the user in the mean time.
            spinner = Spinner(label=build_text, timer=build_timer)
            while build.poll() is None:
                spinner.step()
                # Don't tax the CPU too much.
                time.sleep(0.2)
            spinner.clear()
            # Make sure the build succeeded and produced a binary distribution archive.
            try:
                # If the build reported an error we'll try to provide the user with
                # some hints about what went wrong.
                if build.returncode != 0:
                    raise BuildFailed(
                        "Failed to build {name} ({version}) binary distribution!",
                        name=requirement.name,
                        version=requirement.version)
                # Check if the build created the `dist' directory (the os.listdir()
                # call below will raise an exception if we don't check for this).
                if not os.path.isdir(dist_directory):
                    raise NoBuildOutput(
                        "Build of {name} ({version}) did not produce a binary distribution archive!",
                        name=requirement.name,
                        version=requirement.version)
                # Check if we can find the binary distribution archive.
                filenames = os.listdir(dist_directory)
                if len(filenames) != 1:
                    variables = dict(name=requirement.name,
                                     version=requirement.version,
                                     filenames=concatenate(sorted(filenames)))
                    raise NoBuildOutput(
                        """
                        Build of {name} ({version}) produced more than one
                        distribution archive! (matches: {filenames})
                    """, **variables)
            except Exception as e:
                # Decorate the exception with the output of the failed build.
                with open(temporary_file) as handle:
                    build_output = handle.read()
                enhanced_message = compact("""
                    {message}

                    Please check the build output because it will probably
                    provide a hint about what went wrong.

                    Build output:

                    {output}
                """,
                                           message=e.args[0],
                                           output=build_output.strip())
                e.args = (enhanced_message, )
                raise
            logger.info("Finished building %s in %s.", requirement.name,
                        build_timer)
            return os.path.join(dist_directory, filenames[0])
        finally:
            # Close file descriptor before removing the temporary file.
            # Without closing Windows is complaining that the file cannot
            # be removed because it is used by another process.
            os.close(fd)
            os.unlink(temporary_file)
Ejemplo n.º 27
0
    def listen_addresses(self):
        """
        The network address(es) where Apache is listening (a list of :class:`NetworkAddress` objects).

        :raises: :exc:`.AddressDiscoveryError` when discovery fails (e.g. because
                 ``/etc/apache2/ports.conf`` is missing or can't be parsed).

        Here's an example:

        >>> from apache_manager import ApacheManager
        >>> manager = ApacheManager()
        >>> manager.listen_addresses
        [NetworkAddress(protocol='http',
                        address='127.0.0.1',
                        port=81,
                        url='http://127.0.0.1:81')]
        """
        logger.debug("Discovering where Apache is listening by parsing %s ..",
                     self.ports_config)
        # Make sure the configuration file exists.
        if not os.path.isfile(self.ports_config):
            raise AddressDiscoveryError(
                compact("""
                Failed to discover any addresses or ports that Apache is
                listening on! The configuration file {filename} is missing. Are
                you sure the Apache web server is properly installed? If so
                you'll have to specify the configuration's location.
            """,
                        filename=self.ports_config))
        # Parse the configuration file.
        matched_addresses = []
        pattern = re.compile(r'^(.+):(\d+)$')
        with open(self.ports_config) as handle:
            for lnum, line in enumerate(handle, start=1):
                tokens = line.split()
                # We are looking for `Listen' directives.
                if len(tokens) >= 2 and tokens[0] == 'Listen':
                    parsed_value = None
                    # Check for a port number without an IP address.
                    if tokens[1].isdigit():
                        parsed_value = NetworkAddress(port=int(tokens[1]))
                    else:
                        # Check for an IP address with a port number.
                        match = pattern.match(tokens[1])
                        if match:
                            address = match.group(1)
                            port = int(match.group(2))
                            if address == '0.0.0.0':
                                address = '127.0.0.1'
                            parsed_value = NetworkAddress(address=address,
                                                          port=port)
                    # Check if we have a match.
                    if parsed_value is not None:
                        # Override the protocol if necessary.
                        if len(tokens) >= 3:
                            parsed_value.protocol = tokens[2]
                        logger.debug("Parsed listen directive on line %i: %s",
                                     lnum, parsed_value)
                        matched_addresses.append(parsed_value)
                    else:
                        logger.warning(
                            "Failed to parse listen directive on line %i: %s",
                            lnum, line)
        # Sanity check the results.
        if not matched_addresses:
            raise AddressDiscoveryError(
                compact("""
                Failed to discover any addresses or ports that Apache is
                listening on! Maybe I'm parsing the wrong configuration file?
                ({filename})
            """,
                        filename=self.ports_config))
        # Log and return sorted port numbers.
        logger.debug("Discovered %s that Apache is listening on: %s",
                     pluralize(len(matched_addresses), "address", "addresses"),
                     concatenate(map(str, matched_addresses)))
        return matched_addresses
Ejemplo n.º 28
0
def foreach(hosts, *command, **options):
    """
    Execute a command simultaneously on a group of remote hosts using SSH.

    :param hosts: An iterable of strings with SSH host aliases.
    :param command: Any positional arguments are converted to a list and used
                    to set the :attr:`~.ExternalCommand.command` property of
                    the :class:`RemoteCommand` objects constructed by
                    :func:`foreach()`.
    :param concurrency: The value of :attr:`.concurrency` to use
                        (defaults to :data:`DEFAULT_CONCURRENCY`).
    :param delay_checks: The value of :attr:`.delay_checks` to use
                         (defaults to :data:`True`).
    :param logs_directory: The value of :attr:`.logs_directory` to
                           use (defaults to :data:`None`).
    :param options: Additional keyword arguments can be used to conveniently
                    override the default values of the writable properties of
                    the :class:`RemoteCommand` objects constructed by
                    :func:`foreach()` (see :func:`RemoteCommand.__init__()` for
                    details).
    :returns: The list of :class:`RemoteCommand` objects constructed by
              :func:`foreach()`.
    :raises: Any of the following exceptions can be raised:

             - :exc:`.CommandPoolFailed` if :attr:`.delay_checks` is enabled
               (the default) and a command in the pool that has :attr:`.check`
               enabled (the default) fails.
             - :exc:`RemoteCommandFailed` if :attr:`.delay_checks` is disabled
               (not the default) and an SSH connection was successful but the
               remote command failed (the exit code of the ``ssh`` command was
               neither zero nor 255). Use the keyword argument ``check=False``
               to disable raising of this exception.
             - :exc:`RemoteConnectFailed` if :attr:`.delay_checks` is disabled
               (not the default) and an SSH connection failed (the exit code of
               the ``ssh`` command is 255). Use the keyword argument
               ``check=False`` to disable raising of this exception.

    .. note:: The :func:`foreach()` function enables the :attr:`.check` and
              :attr:`.delay_checks` options by default in an attempt to make it
              easy to do "the right thing". My assumption here is that if you
              are running *the same command* on multiple remote hosts:

              - You definitely want to know when a remote command has failed,
                ideally without manually checking the :attr:`.succeeded`
                property of each command.

              - Regardless of whether some remote commands fail you want to
                know that the command was at least executed on all hosts,
                otherwise your cluster of hosts will end up in a very
                inconsistent state.

              - If remote commands fail and an exception is raised the
                exception message should explain *which* remote commands
                failed.

              If these assumptions are incorrect then you can use the keyword
              arguments ``check=False`` and/or ``delay_checks=False`` to opt
              out of "doing the right thing" ;-)
    """
    hosts = list(hosts)
    # Separate command pool options from command options.
    concurrency = options.pop('concurrency', DEFAULT_CONCURRENCY)
    delay_checks = options.pop('delay_checks', True)
    logs_directory = options.pop('logs_directory', None)
    # Capture the output of remote commands by default
    # (unless the caller requested capture=False).
    if options.get('capture') is not False:
        options['capture'] = True
    # Enable error checking of remote commands by default
    # (unless the caller requested check=False).
    if options.get('check') is not False:
        options['check'] = True
    # Create a command pool.
    timer = Timer()
    pool = RemoteCommandPool(concurrency=concurrency,
                             delay_checks=delay_checks,
                             logs_directory=logs_directory)
    hosts_pluralized = pluralize(len(hosts), "host")
    logger.debug(
        "Preparing to run remote command on %s (%s) with a concurrency of %i: %s",
        hosts_pluralized, concatenate(hosts), concurrency, quote(command))
    # Populate the pool with remote commands to execute.
    for ssh_alias in hosts:
        pool.add(identifier=ssh_alias,
                 command=RemoteCommand(ssh_alias, *command, **options))
    # Run all commands in the pool.
    pool.run()
    # Report the results to the caller.
    logger.debug("Finished running remote command on %s in %s.",
                 hosts_pluralized, timer)
    return dict(pool.commands).values()
def initialize_keys_device(image_file,
                           mapper_name,
                           mount_point,
                           volumes=(),
                           cleanup=None):
    """
    Initialize and activate the virtual keys device and use it to activate encrypted volumes.

    :param image_file: The absolute pathname of the image file for the virtual
                       keys device (a string). If you are using an encrypted
                       root drive this file should reside on the ``/boot``
                       partition to avoid chicken and egg problems :-).
    :param mapper_name: The device mapper name for the virtual keys device (a
                        string).
    :param mount_point: The mount point for the virtual keys device (a string).
    :param volumes: An iterable of strings that match match mapper names
                    configured in /etc/crypttab. If given then only these
                    volumes will be unlocked. By default it's empty which means
                    all of the configured and available drives are unlocked.
    :param cleanup: :data:`True` to unmount and lock the virtual keys device
                    after use, :data:`False` to leave the device mounted or
                    :data:`None` to automatically figure out what the best
                    choice is (this is the default). See also
                    :func:`.have_systemd_dependencies()`.
    """
    first_run = not os.path.isfile(image_file)
    initialized = not first_run
    mapper_device = '/dev/mapper/%s' % mapper_name
    if cleanup is None:
        # Figure out whether it's safe to unmount and lock
        # the virtual keys device after we're done.
        if have_systemd_dependencies(mount_point):
            logger.notice(
                compact("""
                The virtual keys device will remain unlocked because
                you're running systemd and you appear to be affected
                by https://github.com/systemd/systemd/issues/3816.
            """))
            cleanup = False
        else:
            logger.verbose(
                compact("""
                Locking virtual keys device after use (this should be
                safe to do because it appears that you're not affected
                by https://github.com/systemd/systemd/issues/3816).
            """))
            cleanup = True
    try:
        # Create the virtual keys device (on the first run).
        if first_run:
            logger.info("Creating virtual keys device %s ..", image_file)
            execute('dd', 'if=/dev/zero', 'of=%s' % image_file,
                    'bs=%i' % (1024 * 1024), 'count=10')
            execute('cryptsetup', 'luksFormat', image_file)
        # Unlock the keys device.
        if not os.path.exists(mapper_device):
            logger.info("Unlocking virtual keys device %s ..", image_file)
            execute('cryptsetup', 'luksOpen', image_file, mapper_name)
        unlocked_timer = Timer()
        with finalizer('cryptsetup', 'luksClose', mapper_name,
                       enabled=cleanup):
            # Create a file system on the virtual keys device (on the first run).
            if first_run:
                logger.info("Creating file system on virtual keys device ..")
                execute('mkfs.ext4', mapper_device)
                initialized = True
            # Mount the virtual keys device.
            if not os.path.isdir(mount_point):
                os.makedirs(mount_point)
            if os.path.ismount(mount_point):
                logger.info("The virtual keys device is already mounted ..")
            else:
                logger.info("Mounting the virtual keys device ..")
                execute('mount', mapper_device, mount_point)
            with finalizer('umount', mount_point, enabled=cleanup):
                os.chmod(mount_point, 0o700)
                if volumes:
                    logger.verbose(
                        "Unlocking encrypted devices matching filter: %s",
                        concatenate(map(repr, volumes)))
                else:
                    logger.verbose(
                        "Unlocking all configured and available encrypted devices .."
                    )
                # Create, install and use the keys to unlock the drives.
                num_configured = 0
                num_available = 0
                num_unlocked = 0
                for device in find_managed_drives(mount_point):
                    if volumes and device.target not in volumes:
                        logger.verbose(
                            "Ignoring %s because it doesn't match the filter.",
                            device.target)
                    elif device.is_available:
                        status = activate_encrypted_drive(
                            mapper_name=device.target,
                            physical_device=device.source_device,
                            keys_directory=mount_point,
                            reset=first_run,
                        )
                        if status & DriveStatus.UNLOCKED:
                            num_unlocked += 1
                        num_available += 1
                    num_configured += 1
                if num_unlocked > 0:
                    logger.success("Unlocked %s.",
                                   pluralize(num_unlocked, "encrypted device"))
                elif num_available > 0:
                    logger.info("Nothing to do! (%s already unlocked)",
                                pluralize(num_available, "encrypted device"))
                elif num_configured > 0:
                    logger.info(
                        "Nothing to do! (no encrypted devices available)")
                else:
                    logger.info(
                        "Nothing to do! (no encrypted drives configured)")
        if cleanup:
            logger.verbose("Virtual keys device was accessible for %s.",
                           unlocked_timer)
    finally:
        if not initialized:
            logger.warning(
                "Initialization procedure was interrupted, deleting %s ..",
                image_file)
            if os.path.isfile(image_file):
                os.unlink(image_file)
Ejemplo n.º 30
0
def collect_related_packages(filename, cache=None):
    """
    Collect the package archive(s) related to the given package archive. This
    works by parsing and resolving the dependencies of the given package to
    filenames of package archives, then parsing and resolving the dependencies
    of those package archives, etc. until no more relationships can be resolved
    to existing package archives.

    :param filename: The filename of an existing ``*.deb`` archive (a string).
    :param cache: The :py:class:`.PackageCache` to use (defaults to ``None``).
    :returns: A list of :py:class:`PackageFile` objects.

    Known limitations / sharp edges of this function:

    - Only `Depends` and `Pre-Depends` relationships are processed, `Provides`
      is ignored. I'm not yet sure whether it makes sense to add support for
      `Conflicts`, `Provides` and `Replaces` (and how to implement it).

    - Unsatisfied relationships don't trigger a warning or error because this
      function doesn't know in what context a package can be installed (e.g.
      which additional repositories a given apt client has access to).

    - Please thoroughly test this functionality before you start to rely on it.
      What this function tries to do is a complex operation to do correctly
      (given the limited information this function has to work with) and the
      implementation is far from perfect. Bugs have been found and fixed in
      this code and more bugs will undoubtedly be discovered. You've been
      warned :-).

    - This function can be rather slow on large package repositories and
      dependency sets due to the incremental nature of the related package
      collection. It's a known issue / limitation.

    This function is used to implement the ``deb-pkg-tools --collect`` command:

    .. code-block:: sh

       $ deb-pkg-tools -c /tmp python-deb-pkg-tools_1.13-1_all.deb
       2014-05-18 08:33:42 deb_pkg_tools.package INFO Collecting packages related to ~/python-deb-pkg-tools_1.13-1_all.deb ..
       2014-05-18 08:33:42 deb_pkg_tools.package INFO Scanning ~/python-deb-pkg-tools_1.13-1_all.deb ..
       2014-05-18 08:33:42 deb_pkg_tools.package INFO Scanning ~/python-coloredlogs_0.4.8-1_all.deb ..
       2014-05-18 08:33:42 deb_pkg_tools.package INFO Scanning ~/python-chardet_2.2.1-1_all.deb ..
       2014-05-18 08:33:42 deb_pkg_tools.package INFO Scanning ~/python-humanfriendly_1.7.1-1_all.deb ..
       2014-05-18 08:33:42 deb_pkg_tools.package INFO Scanning ~/python-debian_0.1.21-1_all.deb ..
       Found 5 package archives:
        - ~/python-chardet_2.2.1-1_all.deb
        - ~/python-coloredlogs_0.4.8-1_all.deb
        - ~/python-deb-pkg-tools_1.13-1_all.deb
        - ~/python-humanfriendly_1.7.1-1_all.deb
        - ~/python-debian_0.1.21-1_all.deb
       Copy 5 package archives to /tmp? [Y/n] y
       2014-05-18 08:33:44 deb_pkg_tools.cli INFO Done! Copied 5 package archives to /tmp.
    """
    given_archive = parse_filename(filename)
    logger.info("Collecting packages related to %s ..",
                format_path(given_archive.filename))
    # Group the related package archive candidates by name.
    candidate_archives = collections.defaultdict(list)
    for archive in find_package_archives(given_archive.directory):
        if archive.name != given_archive.name:
            candidate_archives[archive.name].append(archive)
    # Sort the related package archive candidates by descending versions
    # because we want to prefer newer versions over older versions.
    for name in candidate_archives:
        candidate_archives[name].sort(reverse=True)
    # Prepare for more than one attempt to find a converging set of related
    # package archives so we can properly deal with conflicts between
    # transitive (indirect) dependencies.
    while True:
        try:
            # Assuming there are no possible conflicts one call will be enough.
            return collect_related_packages_helper(candidate_archives,
                                                   given_archive, cache)
        except CollectedPackagesConflict as e:
            # If we do encounter conflicts we take the brute force approach of
            # removing the conflicting package archive(s) from the set of
            # related package archive candidates and retrying from scratch.
            # This approach works acceptably as long as your repository isn't
            # full of conflicts between transitive dependencies...
            logger.warning(
                "Removing %s from candidates (%s) ..",
                pluralize(len(e.conflicts), "conflicting archive"),
                concatenate(
                    os.path.basename(archive.filename)
                    for archive in e.conflicts))
            for archive in e.conflicts:
                candidate_archives[archive.name].remove(archive)
            logger.info("Retrying related archive collection without %s ..",
                        pluralize(len(e.conflicts), "conflicting archive"))