def check_package(archive, cache=None):
    """
    Perform static checks on a package's dependency set.

    :param archive: The pathname of an existing ``*.deb`` archive (a string).
    :param cache: The :py:class:`.PackageCache` to use (defaults to ``None``).
    :raises: :py:class:`BrokenPackage` when one or more checks failed.
    """
    timer = Timer()
    logger.info("Checking %s ..", format_path(archive))
    dependency_set = collect_related_packages(archive, cache=cache)
    failed_checks = []
    # Check for duplicate files in the dependency set.
    try:
        check_duplicate_files(dependency_set, cache=cache)
    except BrokenPackage as e:
        failed_checks.append(e)
    except ValueError:
        # Silenced.
        pass
    # Check for version conflicts in the dependency set.
    try:
        check_version_conflicts(dependency_set, cache=cache)
    except BrokenPackage as e:
        failed_checks.append(e)
    if len(failed_checks) == 1:
        raise failed_checks[0]
    elif failed_checks:
        raise BrokenPackage('\n\n'.join(map(str, failed_checks)))
    else:
        logger.info("Finished checking in %s, no problems found.", timer)
    def install(self, directory, silent=False):
        """
        Install Node.js package(s) listed in a ``package.json`` file.

        :param directory: The pathname of a directory with a ``package.json`` file (a string).
        :param silent: Used to set :attr:`~executor.ExternalCommand.silent`.
        :returns: The result of :func:`extract_dependencies()`.
        """
        timer = Timer()
        package_file = os.path.join(directory, 'package.json')
        modules_directory = os.path.join(directory, 'node_modules')
        dependencies = self.extract_dependencies(package_file)
        logger.info("Installing Node.js package(s) in %s ..",
                    format_path(directory))
        if dependencies:
            file_in_cache = self.get_cache_file(dependencies)
            logger.verbose("Checking the cache (%s) ..", file_in_cache)
            if self.read_from_cache and self.context.is_file(file_in_cache):
                self.install_from_cache(file_in_cache, modules_directory)
                logger.info(
                    "Done! Took %s to install %s from cache.", timer,
                    pluralize(len(dependencies), "dependency", "dependencies"))
            else:
                self.installer_method(directory, silent=silent)
                self.prune_dependencies(directory)
                if self.write_to_cache:
                    self.add_to_cache(modules_directory, file_in_cache)
                logger.info(
                    "Done! Took %s to install %s using npm.", timer,
                    pluralize(len(dependencies), "dependency", "dependencies"))
            self.clean_cache()
        else:
            logger.info("Nothing to do! (no dependencies to install)")
        return dependencies
def fetch_worker(url):
    """
    Fetch the given URL for :func:`fetch_concurrent()`.

    :param url: The URL to fetch (a string).
    :returns: A tuple of three values:

              1. The URL that was fetched (a string).
              2. The data that was fetched (a string or :data:`None`).
              3. The number of seconds it took to fetch the URL (a number).
    """
    # Ignore Control-C instead of raising KeyboardInterrupt because (due to a
    # quirk in multiprocessing) this can cause the parent and child processes
    # to get into a deadlock kind of state where only Control-Z will get you
    # your precious terminal back; super annoying IMHO.
    signal.signal(signal.SIGINT, signal.SIG_IGN)
    timer = Timer()
    try:
        response = fetch_url(url, retry=False)
        data = response.read()
    except Exception as e:
        logger.debug("Failed to fetch %s! (%s)", url, e)
        data = None
    else:
        kbps = format_size(round(len(data) / timer.elapsed_time, 2))
        logger.debug("Downloaded %s at %s per second.", url, kbps)
    return url, data, timer.elapsed_time
Exemple #4
0
    def get(self, filename):
        """
        Download a cached distribution archive from the configured Amazon S3
        bucket to the local cache.

        :param filename: The filename of the distribution archive (a string).
        :returns: The pathname of a distribution archive on the local file
                  system or ``None``.
        :raises: :py:exc:`.CacheBackendError` when any underlying method fails.
        """
        timer = Timer()
        self.check_prerequisites()
        # Check if the distribution archive is available.
        raw_key = self.get_cache_key(filename)
        logger.info(
            "Checking if distribution archive is available in S3 bucket: %s",
            raw_key)
        key = self.s3_bucket.get_key(raw_key)
        if key is None:
            logger.debug("Distribution archive is not available in S3 bucket.")
        else:
            # Download the distribution archive to the local binary index.
            # TODO Shouldn't this use LocalCacheBackend.put() instead of
            #      implementing the same steps manually?!
            logger.info("Downloading distribution archive from S3 bucket ..")
            local_file = os.path.join(self.config.binary_cache, filename)
            makedirs(os.path.dirname(local_file))
            key.get_contents_to_filename(local_file)
            logger.debug(
                "Finished downloading distribution archive from S3 bucket in %s.",
                timer)
            return local_file
Exemple #5
0
    def wait_until_connected(self, port_number=None):
        """
        Wait until the TCP server starts accepting connections.

        :param port_number: The port number to check (an integer, defaults to
                            the computed value of :attr:`port_number`).
        :raises: :exc:`TimeoutError` when the SSH server isn't fast enough to
                 initialize.
        """
        timer = Timer()
        if port_number is None:
            port_number = self.port_number
        location = self.render_location(port_number=port_number)
        with Spinner(timer=timer) as spinner:
            while not self.is_connected(port_number):
                if timer.elapsed_time > self.wait_timeout:
                    msg = "%s server didn't start accepting connections within timeout of %s!"
                    raise TimeoutError(msg %
                                       (self.scheme.upper(),
                                        format_timespan(self.wait_timeout)))
                spinner.step(
                    label="Waiting for server to accept connections (%s)" %
                    location)
                spinner.sleep()
        self.logger.debug("Waited %s for server to accept connections (%s).",
                          timer, location)
    def fetch_status_page(self, status_url):
        """
        Fetch an Apache status page and return its content.

        :param url: The URL of the status page (a string).
        :returns: The response body (a string).
        :raises: :exc:`.StatusPageError` if fetching of the status page fails.
        """
        timer = Timer()
        # Get the Apache status page.
        logger.debug("Fetching Apache status page from %s ..", status_url)
        try:
            response = urlopen(status_url)
        except HTTPError as e:
            # These objects can be treated as response objects.
            response = e
        # Validate the HTTP response status.
        response_code = response.getcode()
        if response_code != 200:
            # Record the failure.
            self.status_response = False
            # Notify the caller using a custom exception.
            raise StatusPageError(
                compact("""
                Failed to retrieve Apache status page from {url}! Expected to
                get HTTP response status 200, got {code} instead.
            """,
                        url=status_url,
                        code=response_code))
        response_body = response.read()
        logger.debug("Fetched %s in %s.", format_size(len(response_body)),
                     timer)
        self.status_response = True
        return response_body
Exemple #7
0
def discover_mirrors():
    """
    Discover available Ubuntu mirrors.

    :returns: A set of :class:`.CandidateMirror` objects that have their
              :attr:`~.CandidateMirror.mirror_url` property set and may have
              the :attr:`~.CandidateMirror.last_updated` property set.
    :raises: If no mirrors are discovered an exception is raised.

    This only queries :data:`MIRROR_SELECTION_URL` to
    discover available Ubuntu mirrors. Here's an example run:
    >>> from apt_smart.backends.ubuntu import discover_mirrors
    >>> from pprint import pprint
    >>> pprint(discover_mirrors())

    """
    timer = Timer()
    mirrors = set()
    mirrors = discover_mirror_selection()
    if not mirrors:
        logger.warning("Failed to discover any Ubuntu mirrors! (using %s)" %
                       MIRROR_SELECTION_URL)
        logger.info("Trying to use %s as fallback" % MIRRORS_URL)
        mirrors = discover_mirrors_old()
    elif len(mirrors) < 2:
        logger.warning("Too few mirrors, trying to use %s to find more" %
                       MIRRORS_URL)
        mirrors |= discover_mirrors_old(
        )  # add mirrors from discover_mirrors_old()
    logger.info("Discovered %s in %s.", pluralize(len(mirrors),
                                                  "Ubuntu mirror"), timer)
    return mirrors
    def run_migrations(self):
        """
        Upgrade the database schema using Alembic_.

        This method is automatically called when a :class:`SchemaManager`
        object is created. In order to upgrade the database schema the
        :attr:`alembic_directory` property needs to be set, but if it's
        not set then :func:`run_migrations()` won't complain.

        .. _Alembic: http://alembic.zzzcomputing.com/
        """
        if self.alembic_directory:
            timer = Timer()
            logger.verbose("Checking whether database needs upgrading ..")
            if not self.current_schema_revision:
                logger.verbose(
                    "Stamping empty database with current schema revision ..")
                with CustomVerbosity(level="warning"):
                    stamp(self.alembic_config, "head")
                logger.success(
                    "Stamped initial database schema revision in %s.", timer)
                # Invalidate cached property.
                del self.current_schema_revision
            elif not self.schema_up_to_date:
                logger.info("Running database migrations ..")
                with CustomVerbosity(level="info"):
                    upgrade(self.alembic_config, "head")
                logger.info("Successfully upgraded database schema in %s.",
                            timer)
                # Invalidate cached property.
                del self.current_schema_revision
            else:
                logger.verbose(
                    "Database schema already up to date! (took %s to check)",
                    timer)
Exemple #9
0
 def find_uids_to_download(self):
     """Determine the UIDs of the email messages to be downloaded."""
     timer = Timer()
     # Load the UID values of the Google Talk conversations in the local database.
     logger.verbose(
         "Discovering conversations available in local archive ..")
     conversation_uids = (self.session.query(
         Conversation.external_id).filter(
             Conversation.account == self.account).filter(
                 Conversation.external_id != None))
     message_uids = (self.session.query(Message.external_id).join(
         Message.conversation).filter(
             Conversation.account == self.account).filter(
                 Message.external_id != None))
     logger.debug("Query: %s", conversation_uids.union(message_uids))
     local_uids = set(
         int(row[0]) for row in conversation_uids.union(message_uids))
     # Discover the UID values of the conversations available remotely.
     logger.verbose("Discovering conversations available on server ..")
     response = self.client.uid("search", None, "ALL")
     data = self.check_response(response,
                                "Search for available messages failed!")
     remote_uids = set(map(int, data[0].split()))
     # Discover the UID values of the conversations that we're missing.
     missing_uids = remote_uids - local_uids
     logger.verbose(
         "Found %s, %s and %s (took %s).",
         pluralize(len(local_uids), "local conversation"),
         pluralize(len(remote_uids), "remote conversation"),
         pluralize(len(missing_uids), "conversation to download",
                   "conversations to download"),
         timer,
     )
     return missing_uids
Exemple #10
0
    def __init__(self, filename):
        """
        Initialize a package cache.

        :param filename: The pathname of the SQLite database file (a string).
        """
        self.character_encoding = 'utf-8'
        self.db = None
        self.db_timer = Timer(resumable=True)
        self.decode_timer = Timer(resumable=True)
        self.encode_timer = Timer(resumable=True)
        self.filename = os.path.expanduser(filename)
        self.fs_timer = Timer(resumable=True)
        self.gc_enabled = False
        self.gc_timer = Timer(resumable=True)
        self.identity_map = {}
Exemple #11
0
    def unpack_source_dists(self, arguments):
        """
        Check whether there are local source distributions available for all
        requirements, unpack the source distribution archives and find the
        names and versions of the requirements. By using the ``pip install
        --no-install`` command we avoid reimplementing the following pip
        features:

        - Parsing of ``requirements.txt`` (including recursive parsing)
        - Resolution of possibly conflicting pinned requirements
        - Unpacking source distributions in multiple formats
        - Finding the name & version of a given source distribution

        :param arguments: The command line arguments to ``pip install ...`` (a
                          list of strings).
        :returns: A list of :py:class:`pip_accel.req.Requirement` objects.
        :raises: Any exceptions raised by pip, for example
                 :py:exc:`pip.exceptions.DistributionNotFound` when not all
                 requirements can be satisfied.
        """
        unpack_timer = Timer()
        logger.info("Unpacking source distribution(s) ..")
        # Install our custom package finder to force --no-index behavior.
        original_package_finder = pip_index_module.PackageFinder
        pip_install_module.PackageFinder = CustomPackageFinder
        try:
            requirements = self.get_pip_requirement_set(arguments, use_remote_index=False)
            logger.info("Finished unpacking %s in %s.",
                        pluralize(len(requirements), "source distribution"),
                        unpack_timer)
            return requirements
        finally:
            # Make sure to remove our custom package finder.
            pip_install_module.PackageFinder = original_package_finder
Exemple #12
0
def wait_for_processes(processes):
    """
    Wait for the given processes to end.

    Prints an overview of running processes to the terminal once a second so
    the user knows what they are waiting for.

    This function is not specific to :mod:`proc.cron` at all (it doesn't
    even need to know what cron jobs are), it just waits until all of the given
    processes have ended.

    :param processes: A list of :class:`~proc.tree.ProcessNode` objects.
    """
    wait_timer = Timer()
    running_processes = list(processes)
    for process in running_processes:
        logger.info("Waiting for process %i: %s (runtime is %s)", process.pid,
                    quote(process.cmdline),
                    format_timespan(round(process.runtime)))
    with Spinner(timer=wait_timer) as spinner:
        while True:
            for process in list(running_processes):
                if not process.is_alive:
                    running_processes.remove(process)
            if not running_processes:
                break
            num_processes = pluralize(len(running_processes), "process",
                                      "processes")
            process_ids = concatenate(str(p.pid) for p in running_processes)
            spinner.step(label="Waiting for %s: %s" %
                         (num_processes, process_ids))
            spinner.sleep()
    logger.info("All processes have finished, we're done waiting (took %s).",
                wait_timer.rounded)
    def save_profile(self, filename=None):
        """
        Save gathered profile statistics to a file.

        :param filename: The pathname of the profile file (a string or
                         :data:`None`). Defaults to the value of
                         :attr:`profile_file`.
        :raises: :exc:`~exceptions.ValueError` when profiling was never enabled
                 or `filename` isn't given and :attr:`profile_file` also isn't
                 set.
        """
        filename = filename or self.profile_file
        if not filename:
            raise TypeError("Missing 'filename' argument!")
        elif self.profiler is None:
            raise ValueError("Code profiling isn't enabled!")
        timer = Timer()
        logger.info("Saving profile statistics to %s ..", self.profile_file)
        if self.profiling_enabled:
            self.profiler.disable()
            self.profiling_enabled = False
            profiling_disabled = True
        else:
            profiling_disabled = False
        self.profiler.dump_stats(self.profile_file)
        if profiling_disabled:
            self.profiler.enable()
        logger.verbose("Took %s to save profile statistics.", timer)
Exemple #14
0
    def get(self, filename):
        """
        Download a cached distribution archive from the configured Amazon S3
        bucket to the local cache.

        :param filename: The filename (without directory components) of the
                         distribution archive (a string).
        :returns: The pathname of a distribution archive on the local file
                  system or ``None``.
        :raises: :py:exc:`.CacheBackendError` when any underlying method fails.
        """
        timer = Timer()
        bucket = self.connect_to_bucket()
        # Check if the distribution archive is available.
        raw_key = self.get_cache_key(filename)
        logger.info(
            "Checking if distribution archive is available in S3 bucket: %s",
            raw_key)
        key = bucket.get_key(raw_key)
        if key is None:
            logger.debug("Distribution archive is not available in S3 bucket.")
        else:
            # Download the distribution archive to the local index.
            logger.info("Downloading distribution archive from S3 bucket ..")
            local_file = os.path.join(binary_index, filename)
            key.get_contents_to_filename(local_file)
            logger.debug(
                "Finished downloading distribution archive from S3 bucket in %s.",
                timer)
            return local_file
 def execute_helper(self):
     """Helper for :func:`execute()`."""
     timer = Timer()
     actions = []
     if self.crypto_device and not self.crypto_device_available:
         msg = "Encrypted filesystem %s isn't available! (the device file %s doesn't exist)"
         raise MissingBackupDiskError(
             msg % (self.crypto_device, self.crypttab_entry.source_device))
     if self.backup_enabled:
         self.notify_starting()
     self.unlock_device()
     try:
         self.mount_filesystem()
         if self.backup_enabled:
             self.transfer_changes()
             actions.append('create backup')
         if self.snapshot_enabled:
             self.create_snapshot()
             actions.append('create snapshot')
         if self.rotate_enabled:
             self.rotate_snapshots()
             actions.append('rotate old snapshots')
     except Exception:
         self.notify_failed(timer)
         raise
     else:
         if self.backup_enabled:
             self.notify_finished(timer)
         if actions:
             logger.info("Took %s to %s.", timer, concatenate(actions))
Exemple #16
0
 def dumb_update(self):
     """Update the system's package lists (by running ``apt-get update``)."""
     timer = Timer()
     logger.info("Updating package lists of %s ..", self.context)
     self.context.execute('apt-get', 'update', sudo=True)
     logger.info("Finished updating package lists of %s in %s ..",
                 self.context, timer)
    def create_snapshot(self):
        """
        Create a snapshot of the destination directory.

        :raises: The following exceptions can be raised:

                 - :exc:`.DestinationContextUnavailable`, refer
                   to :attr:`destination_context` for details.
                 - :exc:`.ParentDirectoryUnavailable`, refer
                   to :attr:`.parent_directory` for details.
                 - :exc:`~executor.ExternalCommandFailed` when
                   the ``cp`` command reports an error.
        """
        # Compose the `cp' command needed to create a snapshot.
        snapshot = os.path.join(self.destination.parent_directory,
                                time.strftime('%Y-%m-%d %H:%M:%S'))
        cp_command = [
            'cp',
            '--archive',
            '--link',
            self.destination.directory,
            snapshot,
        ]
        # Execute the `cp' command?
        if self.dry_run:
            logger.info("Snapshot command: %s", quote(cp_command))
        else:
            timer = Timer()
            logger.info("Creating snapshot: %s", snapshot)
            self.destination_context.execute(*cp_command, ionice=self.ionice)
            logger.info("Took %s to create snapshot.", timer)
    def rotate_concurrent(self, *locations, **kw):
        """
        Rotate the backups in the given locations concurrently.

        :param locations: One or more values accepted by :func:`coerce_location()`.
        :param kw: Any keyword arguments are passed on to :func:`rotate_backups()`.

        This function uses :func:`rotate_backups()` to prepare rotation
        commands for the given locations and then it removes backups in
        parallel, one backup per mount point at a time.

        The idea behind this approach is that parallel rotation is most useful
        when the files to be removed are on different disks and so multiple
        devices can be utilized at the same time.

        Because mount points are per system :func:`rotate_concurrent()` will
        also parallelize over backups located on multiple remote systems.
        """
        timer = Timer()
        pool = CommandPool(concurrency=10)
        logger.info("Scanning %s ..",
                    pluralize(len(locations), "backup location"))
        for location in locations:
            for cmd in self.rotate_backups(location, prepare=True, **kw):
                pool.add(cmd)
        if pool.num_commands > 0:
            backups = pluralize(pool.num_commands, "backup")
            logger.info("Preparing to rotate %s (in parallel) ..", backups)
            pool.run()
            logger.info("Successfully rotated %s in %s.", backups, timer)
Exemple #19
0
    def unpack_source_dists(self, arguments, use_wheels=False):
        """
        Check whether there are local source distributions available for all
        requirements, unpack the source distribution archives and find the
        names and versions of the requirements. By using the ``pip install
        --download`` command we avoid reimplementing the following pip
        features:

        - Parsing of ``requirements.txt`` (including recursive parsing)
        - Resolution of possibly conflicting pinned requirements
        - Unpacking source distributions in multiple formats
        - Finding the name & version of a given source distribution

        :param arguments: The command line arguments to ``pip install ...`` (a
                          list of strings).
        :param use_wheels: Whether pip and pip-accel are allowed to use wheels_
                           (``False`` by default for backwards compatibility
                           with callers that use pip-accel as a Python API).
        :returns: A list of :py:class:`pip_accel.req.Requirement` objects.
        :raises: Any exceptions raised by pip, for example
                 :py:exc:`pip.exceptions.DistributionNotFound` when not all
                 requirements can be satisfied.
        """
        unpack_timer = Timer()
        logger.info("Unpacking distribution(s) ..")
        with PatchedAttribute(pip_install_module, 'PackageFinder', CustomPackageFinder):
            requirements = self.get_pip_requirement_set(arguments, use_remote_index=False, use_wheels=use_wheels)
            logger.info("Finished unpacking %s in %s.", pluralize(len(requirements), "distribution"), unpack_timer)
            return requirements
Exemple #20
0
def install_requirements(requirements, install_prefix=ENVIRONMENT):
    """
    Manually install all requirements from binary distributions.

    :param requirements: A list of tuples in the format of the return value of
                         :py:func:`unpack_source_dists()`.
    :param install_prefix: The "prefix" under which the requirements should be
                           installed. This will be a pathname like ``/usr``,
                           ``/usr/local`` or the pathname of a virtual
                           environment.
    :returns: ``True`` if it succeeds in installing all requirements from
              binary distribution archives, ``False`` otherwise.
    """
    install_timer = Timer()
    logger.info("Installing from binary distributions ..")
    python = os.path.join(install_prefix, 'bin', 'python')
    pip = os.path.join(install_prefix, 'bin', 'pip')
    for name, version, directory in requirements:
        if os.system('%s uninstall --yes %s >/dev/null 2>&1' %
                     (pipes.quote(pip), pipes.quote(name))) == 0:
            logger.info("Uninstalled previously installed package %s.", name)
        members = get_binary_dist(name,
                                  version,
                                  directory,
                                  prefix=install_prefix,
                                  python=python)
        install_binary_dist(members, prefix=install_prefix, python=python)
    logger.info("Finished installing all requirements in %s.", install_timer)
    return True
Exemple #21
0
    def wait_for_process(self, timeout=0, use_spinner=None):
        """
        Wait until the process ends or the timeout expires.

        :param timeout: The number of seconds to wait for the process to
                        terminate after we've asked it nicely (defaults
                        to zero which means we wait indefinitely).
        :param use_spinner: Whether or not to display an interactive spinner
                            on the terminal (using :class:`~humanfriendly.Spinner`)
                            to explain to the user what they are waiting for:

                            - :data:`True` enables the spinner,
                            - :data:`False` disables the spinner,
                            - :data:`None` (the default) means the spinner is
                              enabled when the program is connected to an
                              interactive terminal, otherwise it's disabled.
        :returns: A :class:`~humanfriendly.Timer` object telling you how long
                  it took to wait for the process.
        """
        with Timer(resumable=True) as timer:
            with Spinner(interactive=use_spinner, timer=timer) as spinner:
                while self.is_running:
                    if timeout and timer.elapsed_time >= timeout:
                        break
                    spinner.step(label="Waiting for process %i to terminate" %
                                 self.pid)
                    spinner.sleep()
            return timer
def fetch_url(url, timeout=10, retry=False, max_attempts=3):
    """
    Fetch a URL, optionally retrying on failure.

    :param url: The URL to fetch (a string).
    :param timeout: The maximum time in seconds that's allowed to pass before
                    the request is aborted (a number, defaults to 10 seconds).
    :param retry: Whether to retry on failure (defaults to :data:`False`).
    :param max_attempts: The maximum number of attempts when retrying is
                         enabled (an integer, defaults to three).
    :returns: The response object.
    :raises: Any exception raised by Python's standard library in the last
             attempt (assuming all attempts raise an exception).
    """
    timer = Timer()
    logger.debug("Fetching %s ..", url)
    for i in range(1, max_attempts + 1):
        try:
            with SignalTimeout(timeout, swallow_exc=False):
                response = urlopen(url)
                if response.getcode() != 200:
                    raise Exception("Got HTTP %i response when fetching %s!" %
                                    (response.getcode(), url))
        except Exception as e:
            if retry and i < max_attempts:
                logger.warning(
                    "Failed to fetch %s, retrying (%i/%i, error was: %s)", url,
                    i, max_attempts, e)
            else:
                raise
        else:
            logger.debug("Took %s to fetch %s.", timer, url)
            return response
Exemple #23
0
 def check_process_termination(self, method):
     """Helper method for process termination tests."""
     timer = Timer()
     # We use Executor to launch an external process.
     with ExternalCommand('sleep', '60', check=False) as cmd:
         # Verify that proc.unix.UnixProcess.is_running (which is normally
         # overridden by proc.core.Process.is_running) works as expected,
         # even though this property isn't actively used in the `proc'
         # package because we want to deal with not-yet-reclaimed
         # processes and zombie processes which is very much a Linux
         # specific thing (hence the override).
         unix_process = UnixProcess(pid=cmd.pid)
         assert unix_process.is_running, "UnixProcess.is_running is broken!"
         # We don't use Executor to control the process, instead we take the
         # process ID and use it to create a Process object that doesn't
         # know about Python's subprocess module.
         linux_process = Process.from_pid(cmd.pid)
         # We terminate the process using a positive but very low timeout so
         # that all of the code involved gets a chance to run, but without
         # significantly slowing down the test suite.
         getattr(linux_process, method)(timeout=0.1)
         # Now we can verify our assertions.
         assert not linux_process.is_running, "Child still running despite graceful termination request!"
         assert timer.elapsed_time < 10, "It look too long to terminate the child!"
         # Now comes a hairy bit of Linux implementation details that most
         # people can thankfully ignore (blissful ignorance :-). Parent
         # processes are responsible for reclaiming child processes and
         # until this happens the /proc/[pid] entry remains, which means
         # the `kill -0' trick used by UnixProcess to detect running
         # processes doesn't work as expected. Basically this means we
         # _must_ make sure that waitpid() is called before we can expect
         # UnixProcess.is_running to behave as expected.
         cmd.wait()
         # Now that we've called waitpid() things should work as expected.
         assert not unix_process.is_running, "UnixProcess.is_running is broken!"
Exemple #24
0
def apply_fudge_factor(fudge_factor):
    """
    Apply the requested scheduling fudge factor.

    :param fudge_factor: The maximum number of seconds to sleep (a number).

    Previous implementations of the fudge factor interrupt used UNIX signals
    (specifically ``SIGUSR1``) but the use of this signal turned out to be
    sensitive to awkward race conditions and it wasn't very cross platform, so
    now the creation of a regular file is used to interrupt the fudge factor.
    """
    if fudge_factor:
        timer = Timer()
        logger.debug("Calculating fudge factor based on user defined maximum (%s) ..",
                     format_timespan(fudge_factor))
        fudged_sleep_time = fudge_factor * random.random()
        logger.info("Sleeping for %s because of user defined fudge factor ..",
                    format_timespan(fudged_sleep_time))
        interrupt_file = get_lock_path(INTERRUPT_FILE)
        while timer.elapsed_time < fudged_sleep_time:
            if os.path.isfile(interrupt_file):
                logger.info("Fudge factor sleep was interrupted! (%s exists)",
                            interrupt_file)
                break
            time_to_sleep = min(1, fudged_sleep_time - timer.elapsed_time)
            if time_to_sleep > 0:
                time.sleep(time_to_sleep)
        else:
            logger.info("Finished sleeping because of fudge factor (took %s).", timer)
Exemple #25
0
 async def download_all_conversations(self, conversation_list):
     """Download conversations from Google Hangouts."""
     timer = Timer()
     for conversation in conversation_list.get_all(include_archived=True):
         try:
             await self.download_conversation(conversation)
         except Exception:
             logger.warning("Skipping conversation due to synchronization error ..", exc_info=True)
             self.stats.failed_conversations += 1
         self.stats.show()
     summary = []
     if self.stats.conversations_added > 0:
         summary.append(pluralize(self.stats.conversations_added, "conversation"))
     if self.stats.messages_added > 0:
         summary.append(pluralize(self.stats.messages_added, "message"))
     if summary:
         logger.info("Added %s in %s.", concatenate(summary), timer)
     else:
         logger.info("No new conversations or messages found (took %s to check).", timer)
     if self.stats.failed_conversations > 0:
         logger.warning(
             "Skipped %s due to synchronization %s!",
             pluralize(self.stats.failed_conversations, "conversation"),
             "errors" if self.stats.failed_conversations > 1 else "error",
         )
     if self.stats.skipped_conversations > 0:
         logger.notice(
             "Skipped %s due to previous synchronization %s! (use --force to retry %s)",
             pluralize(self.stats.skipped_conversations, "conversation"),
             "errors" if self.stats.skipped_conversations > 1 else "error",
             "them" if self.stats.skipped_conversations > 1 else "it",
         )
Exemple #26
0
    def put(self, filename, handle):
        """
        Upload a distribution archive to the configured Amazon S3 bucket.

        If the :py:attr:`~.Config.s3_cache_readonly` configuration option is
        enabled this method does nothing.

        :param filename: The filename of the distribution archive (a string).
        :param handle: A file-like object that provides access to the
                       distribution archive.
        :raises: :py:exc:`.CacheBackendError` when any underlying method fails.
        """
        if self.config.s3_cache_readonly:
            logger.info(
                'Skipping upload to S3 bucket (using S3 in read only mode).')
        else:
            timer = Timer()
            self.check_prerequisites()
            from boto.s3.key import Key
            raw_key = self.get_cache_key(filename)
            logger.info("Uploading distribution archive to S3 bucket: %s",
                        raw_key)
            key = Key(self.s3_bucket)
            key.key = raw_key
            try:
                key.set_contents_from_file(handle)
            except Exception as e:
                logger.info(
                    "Encountered error writing to S3 bucket, falling back to read only mode (exception: %s)",
                    e)
                self.config.s3_cache_readonly = True
            else:
                logger.info(
                    "Finished uploading distribution archive to S3 bucket in %s.",
                    timer)
    def add_to_cache(self, modules_directory, file_in_cache):
        """
        Add a ``node_modules`` directory to the cache.

        :param modules_directory: The pathname of the ``node_modules`` directory (a string).
        :param file_in_cache: The pathname of the archive in the cache (a string).
        :raises: Any exceptions raised by the :mod:`executor.contexts` module.

        This method generates the tar archive under a temporary name inside the
        cache directory and then renames it into place atomically, in order to
        avoid race conditions where multiple concurrent npm-accel commands try
        to use partially generated cache entries.

        The temporary names are generated by appending a randomly generated
        integer number to the original filename (with a dash to delimit the
        original filename from the number).
        """
        timer = Timer()
        logger.info("Adding to cache (%s) ..", format_path(file_in_cache))
        self.context.execute('mkdir', '-p', os.path.dirname(file_in_cache))
        with self.context.atomic_write(file_in_cache) as temporary_file:
            self.context.execute('tar', '-cf', temporary_file, '-C',
                                 modules_directory, '.')
        self.write_metadata(file_in_cache)
        logger.verbose("Took %s to add directory to cache.", timer)
 def execute_helper(self):
     """Helper for :func:`execute()`."""
     timer = Timer()
     actions = []
     if self.backup_enabled:
         self.notify_starting()
     self.unlock_device()
     try:
         self.mount_filesystem()
         if self.backup_enabled:
             self.transfer_changes()
             actions.append('create backup')
         if self.snapshot_enabled:
             self.create_snapshot()
             actions.append('create snapshot')
         if self.rotate_enabled:
             self.rotate_snapshots()
             actions.append('rotate old snapshots')
     except Exception:
         self.notify_failed(timer)
         raise
     else:
         if self.backup_enabled:
             self.notify_finished(timer)
         if actions:
             logger.info("Took %s to %s.", timer, concatenate(actions))
Exemple #29
0
    def wait_for_post_boot(self, pre_server):
        """
        Wait for the post-boot environment to come online.

        :param pre_server: A :class:`ServerDetails` object created by :func:`wait_for_pre_boot()`.
        """
        method_timer = Timer()
        check_keys = bool(pre_server.host_keys)
        check_headers = (
            self.pre_boot.port_number == self.post_boot.port_number)
        logger.info(
            "Waiting for post-boot environment based on SSH %s ..",
            "host keys" if check_keys else
            ("server headers" if check_headers else "port numbers"))
        with AutomaticSpinner("Waiting for post-boot environment",
                              show_time=True):
            while True:
                iteration_timer = Timer()
                if check_headers or check_keys:
                    post_server = self.scan_ssh_server(self.post_boot)
                    if check_keys and post_server.host_keys:
                        logger.verbose(
                            "Checking if SSH host keys have changed ..")
                        if post_server.host_keys != pre_server.host_keys:
                            logger.info("Detected change in SSH host keys.")
                            self.store_host_keys(pre_server, post_server)
                            break
                    if check_headers and pre_server.header and post_server.header:
                        logger.verbose(
                            "Checking if SSH server header has changed ..")
                        if post_server.header != pre_server.header:
                            logger.info(
                                "Detected change in SSH server header.")
                            break
                elif self.test_ssh_connection(self.post_boot,
                                              self.post_context):
                    logger.info("Detected change in SSH port number.")
                    break
                if method_timer.elapsed_time >= self.boot_timeout:
                    raise BootTimeoutError(
                        format(
                            "Timed out waiting for post-boot environment of %s to come online within %s!",
                            self.post_context,
                            format_timespan(self.boot_timeout),
                        ))
                iteration_timer.sleep(self.retry_interval)
        logger.info("Waited %s for post-boot environment.", method_timer)
    def run(self):
        """
        Keep spawning commands and collecting results until all commands have run.

        :returns: The value of :attr:`results`.
        :raises: Any exceptions raised by :func:`collect()`.

        This method calls :func:`spawn()` and :func:`collect()` in a loop until
        all commands registered using :func:`add()` have run and finished. If
        :func:`collect()` raises an exception any running commands are
        terminated before the exception is propagated to the caller.

        If you're writing code where you want to own the main loop then
        consider calling :func:`spawn()` and :func:`collect()` directly instead
        of using :func:`run()`.

        When :attr:`concurrency` is set to one, specific care is taken to make
        sure that the callbacks configured by :attr:`.start_event` and
        :attr:`.finish_event` are called in the expected (intuitive) order.
        """
        # Start spawning processes to execute the commands.
        timer = Timer()
        logger.debug("Preparing to run %s with a concurrency of %i ..",
                     pluralize(self.num_commands, "command"), self.concurrency)
        try:
            with self.get_spinner(timer) as spinner:
                num_started = 0
                num_collected = 0
                while not self.is_finished:
                    # When concurrency is set to one (I know, initially it
                    # sounds like a silly use case, bear with me) I want the
                    # start_event and finish_event callbacks of external
                    # commands to fire in the right order. The following
                    # conditional is intended to accomplish this goal.
                    if self.concurrency > (num_started - num_collected):
                        num_started += self.spawn()
                    num_collected += self.collect()
                    spinner.step(label=format(
                        "Waiting for %i/%i %s",
                        self.num_commands - self.num_finished,
                        self.num_commands,
                        "command" if self.num_commands == 1 else "commands",
                    ))
                    spinner.sleep()
        except Exception:
            if self.num_running > 0:
                logger.warning(
                    "Command pool raised exception, terminating running commands!"
                )
            # Terminate commands that are still running.
            self.terminate()
            # Re-raise the exception to the caller.
            raise
        # Collect the output and return code of any commands not yet collected.
        self.collect()
        logger.debug("Finished running %s in %s.",
                     pluralize(self.num_commands, "command"), timer)
        # Report the results to the caller.
        return self.results