Beispiel #1
0
 def ensure_supported_platform(self):
     """
     Make sure we're running on a supported platform.
     :raises: :exc:`.UnsupportedPlatformError` when the output of the
              ``uname`` command doesn't include the word 'Linux' and
              :attr:`force` is :data:`False`.
     When :attr:`force` is :data:`True` this method logs a warning message
     instead of raising an exception.
     """
     uname_output = self.source_context.capture('uname',
                                                capture=True,
                                                check=False,
                                                shell=False)
     if 'linux' not in uname_output.lower():
         if self.force:
             logger.warning(
                 compact("""
                 It looks like you aren't running Linux (which is the only
                 platform supported by rsync-system-backup) however the -f,
                 --force option was given so I will continue anyway. Please
                 note that you are on your own if things break!
             """))
         else:
             raise UnsupportedPlatformError(
                 compact("""
                 It looks like you aren't running Linux, which is the only
                 platform supported by rsync-system-backup! You can use the
                 -f, --force option to override this sanity check. Please
                 note that you are on your own if things break.
             """))
Beispiel #2
0
    def update_file(self, force=None):
        """
        Update the file with the contents of the files in the ``.d`` directory.

        :param force: Override the value of :attr:`force` (a boolean or
                      :data:`None`).
        :raises: :exc:`RefuseToOverwrite` when :attr:`force` is :data:`False`
                 and the contents of :attr:`filename` were modified.
        """
        if force is None:
            force = self.force
        if not self.context.is_directory(self.directory):
            # Create the .d directory.
            logger.info("Creating directory %s ..", format_path(self.directory))
            self.context.execute('mkdir', '-p', self.directory, tty=False)
            # Move the original file into the .d directory.
            local_file = os.path.join(self.directory, 'local')
            logger.info("Moving %s to %s ..", format_path(self.filename), format_path(local_file))
            self.context.execute('mv', self.filename, local_file, tty=False)
        # Read the modular configuration file(s).
        blocks = []
        for entry in natsort(self.context.list_entries(self.directory)):
            if not entry.startswith('.'):
                filename = os.path.join(self.directory, entry)
                if self.context.is_executable(filename):
                    blocks.append(self.execute_file(filename))
                else:
                    blocks.append(self.read_file(filename))
        contents = b"\n\n".join(blocks)
        # Make sure the generated file was not modified? We skip this on the
        # first run, when the original file was just moved into the newly
        # created directory (see above).
        if all(map(self.context.is_file, (self.filename, self.checksum_file))):
            logger.info("Checking for local changes to %s ..", format_path(self.filename))
            if self.new_checksum != self.old_checksum:
                if force:
                    logger.warning(compact(
                        """
                        The contents of the file to generate ({filename})
                        were modified but --force was used so overwriting
                        anyway!
                        """,
                        filename=format_path(self.filename),
                    ))
                else:
                    raise RefuseToOverwrite(compact(
                        """
                        The contents of the file to generate ({filename})
                        were modified and I'm refusing to overwrite it! If
                        you're sure you want to proceed, use the --force
                        option or delete the file {checksum_file} and
                        retry.
                        """,
                        filename=format_path(self.filename),
                        checksum_file=format_path(self.checksum_file),
                    ))
        # Update the generated configuration file.
        self.write_file(self.filename, contents)
        # Update the checksum file.
        self.context.write_file(self.checksum_file, self.new_checksum)
 def test_compact(self):
     assert compact(' a \n\n b ') == 'a b'
     assert compact('''
         %s template notation
     ''', 'Simple') == 'Simple template notation'
     assert compact('''
         More {type} template notation
     ''', type='readable') == 'More readable template notation'
Beispiel #4
0
 def test_compact(self):
     """Test :func:`humanfriendly.text.compact()`."""
     assert compact(' a \n\n b ') == 'a b'
     assert compact('''
         %s template notation
     ''', 'Simple') == 'Simple template notation'
     assert compact('''
         More {type} template notation
     ''', type='readable') == 'More readable template notation'
Beispiel #5
0
 def test_compact(self):
     """Test :func:`humanfriendly.text.compact()`."""
     assert compact(' a \n\n b ') == 'a b'
     assert compact('''
         %s template notation
     ''', 'Simple') == 'Simple template notation'
     assert compact('''
         More {type} template notation
     ''', type='readable') == 'More readable template notation'
    def update_file(self, force=None):
        """
        Update the file with the contents of the files in the ``.d`` directory.

        :param force: Override the value of :attr:`force` (a boolean or
                      :data:`None`).
        :raises: :exc:`RefuseToOverwrite` when :attr:`force` is :data:`False`
                 and the contents of :attr:`filename` were modified.
        """
        if force is None:
            force = self.force
        if not self.context.is_directory(self.directory):
            # Create the .d directory.
            logger.info("Creating directory %s", format_path(self.directory))
            self.context.execute('mkdir', '-p', self.directory, tty=False)
            # Move the original file into the .d directory.
            local_file = os.path.join(self.directory, 'local')
            logger.info("Moving %s to %s", format_path(self.filename), format_path(local_file))
            self.context.execute('mv', self.filename, local_file, tty=False)
        # Read the modularized configuration file(s).
        blocks = []
        for filename in natsort(self.context.list_entries(self.directory)):
            if not filename.startswith('.'):
                blocks.append(self.read_file(os.path.join(self.directory, filename)))
        contents = b"\n\n".join(blocks)
        # Make sure the generated file was not modified? We skip this on the
        # first run, when the original file was just moved into the newly
        # created directory (see above).
        if all(map(self.context.is_file, (self.filename, self.checksum_file))):
            logger.info("Checking for local changes to %s ..", format_path(self.filename))
            if self.hash_contents() != self.context.read_file(self.checksum_file):
                if force:
                    logger.warning(compact(
                        """
                        The contents of the file to generate ({filename})
                        were modified but --force was used so overwriting
                        anyway!
                        """,
                        filename=format_path(self.filename),
                    ))
                else:
                    raise RefuseToOverwrite(compact(
                        """
                        The contents of the file to generate ({filename})
                        were modified and I'm refusing to overwrite it! If
                        you're sure you want to proceed, use the --force
                        option or delete the file {checksum_file} and
                        retry.
                        """,
                        filename=format_path(self.filename),
                        checksum_file=format_path(self.checksum_file),
                    ))
        # Update the generated configuration file.
        self.write_file(self.filename, contents)
        # Update the checksum file.
        self.context.write_file(self.checksum_file, self.hash_contents())
Beispiel #7
0
    def read(self):
        """
        Wait for a JSON encoded message from the remote side.

        The basic communication protocol is really simple:

        1. First an ASCII encoded integer number is received, terminated by a
           newline.
        2. Second the number of bytes given by step 1 is read and interpreted
           as a JSON encoded value. This step is not terminated by a newline.

        That's it :-).

        :returns: The JSON value decoded to a Python value.
        :raises: :exc:`ProtocolError` when the remote side violates the
                 defined protocol.
        """
        logger.debug("Waiting for message from other side ..")
        # Wait for a line containing an integer byte count.
        line = self.raw_readline().strip()
        if not line.isdigit():
            # Complain loudly about protocol errors :-).
            raise ProtocolError(
                compact("""
                Received invalid input from remote side! I was expecting a
                byte count, but what I got instead was the line {input}!
            """,
                        input=repr(line)))
        else:
            # First we get a line containing a byte count, then we read
            # that number of bytes from the remote side and decode it as a
            # JSON encoded message.
            num_bytes = int(line, 10)
            logger.debug("Reading message of %i bytes ..", num_bytes)
            encoded_value = self.raw_read(num_bytes)
            try:
                decoded_value = json.loads(encoded_value)
                logger.debug("Parsed message: %s", decoded_value)
                return decoded_value
            except Exception as e:
                logger.exception("Failed to parse JSON formatted message!")
                raise ProtocolError(
                    compact("""
                    Failed to decode message from remote side as JSON!
                    Tried to decode message {message}. Original error:
                    {error}.
                """,
                            message=repr(encoded_value),
                            error=str(e)))
Beispiel #8
0
    def normalize_whitespace(self, text):
        """
        Normalize the whitespace in a chat message before rendering on the terminal.

        :param text: The chat message text (a string).
        :returns: The normalized text (a string).

        This method works as follows:

        - First leading and trailing whitespace is stripped from the text.
        - When the resulting text consists of a single line, it is processed
          using :func:`~humanfriendly.text.compact()` and returned.
        - When the resulting text contains multiple lines the text is prefixed
          with a newline character, so that the chat message starts on its own
          line. This ensures that messages requiring vertical alignment render
          properly (for example a table drawn with ``|`` and ``-`` characters).
        """
        # Check for multi-line chat messages.
        stripped = text.strip()
        if "\n" in stripped:
            # When the message contains "significant" newline
            # characters we start the message on its own line.
            return "\n" + stripped
        else:
            # When the message doesn't contain significant newline characters
            # we compact all whitespace in the message. I added this when I
            # found that quite a few of the HTML fragments in my personal chat
            # archive contain very inconsistent whitespace, which bothered me
            # when I viewed them on the terminal.
            return compact(text)
def setUpModule():
    """
    Prepare the test suite.

    Sets up logging to the terminal. When a test fails the logging output can
    help to perform a post-mortem analysis of the failure in question (even
    when its hard to reproduce locally). This is especially useful when
    debugging remote test failures, whether they happened on Travis CI or a
    user's local system.

    Also makes sure that the Apache web server is installed and running because
    this is required to run the test suite.
    """
    # Set up logging to the terminal.
    coloredlogs.install(level=logging.DEBUG)
    # Make sure Apache is installed and configured.
    try:
        manager = ApacheManager()
        manager.fetch_status_page(manager.text_status_url)
    except Exception as e:
        logger.exception("Failed to connect to local Apache server!")
        raise Exception(
            compact("""
            Please make sure the Apache web server is installed and configured
            (running) before you run this test suite because this test suite
            tests the actual integration with Apache (it doesn't use mocking)
            and so requires Apache to be installed, configured and running.

            Swallowed exception: {message} ({type})
        """,
                    message=e,
                    type=type(e)))
Beispiel #10
0
def find_character_device(port_name):
    """
    Find the character device for the given port name.

    :param port_name: The name of the virtio port (a string).
    :returns: The absolute pathname of a character device (a string).
    :raises: :exc:`Exception` when the character device cannot be found.
    """
    root = '/sys/class/virtio-ports'
    logger.debug(
        "Automatically selecting appropriate character device based on %s ..",
        root)
    for entry in os.listdir(root):
        name_file = os.path.join(root, entry, 'name')
        if os.path.isfile(name_file):
            with open(name_file) as handle:
                contents = handle.read().strip()
            if contents == port_name:
                character_device = '/dev/%s' % entry
                logger.debug("Selected character device: %s", character_device)
                return character_device
    raise Exception(
        compact("""
        Failed to select the appropriate character device for the port name
        {name}! This is probably caused by a configuration issue on either the
        QEMU host or inside the QEMU guest. Please refer to the following web
        page for help: http://negotiator.readthedocs.org/en/latest/#character-device-detection-fails
    """,
                name=repr(port_name)))
Beispiel #11
0
    def test_sphinx_integration(self):
        """Tests for the :mod:`property_manager.sphinx` module."""
        class FakeApp(object):
            def __init__(self):
                self.callbacks = {}

            def connect(self, event, callback):
                self.callbacks.setdefault(event, []).append(callback)

        app = FakeApp()
        setup(app)
        assert append_property_docs in app.callbacks[
            'autodoc-process-docstring']
        lines = ["Some boring description."]
        obj = TypeInspector
        append_property_docs(app=app,
                             what=None,
                             name=None,
                             obj=obj,
                             options=None,
                             lines=lines)
        assert len(lines) > 0
        assert lines[0] == "Some boring description."
        assert not lines[1]
        assert lines[2] == compact("""
            When you initialize a :class:`TypeInspector` object you are
            required to provide a value for the :attr:`type` property. You can
            set the value of the :attr:`type` property by passing a keyword
            argument to the class initializer.
        """)
        assert not lines[3]
        assert lines[
            4] == "Here's an overview of the :class:`TypeInspector` class:"
Beispiel #12
0
 def python_callback(self, value):
     """Automatically coerce :attr:`python_callback` to a callable value."""
     if value:
         # Python callers get to pass a callable directly.
         if not callable(value):
             expression = value
             # Otherwise we expect a string to parse (from a command line
             # argument, environment variable or configuration file).
             callback_path, _, callback_name = expression.partition(':')
             if os.path.isfile(callback_path):
                 # Callback specified as Python script.
                 script_name = os.path.basename(callback_path)
                 if script_name.endswith('.py'):
                     script_name, _ = os.path.splitext(script_name)
                 environment = dict(__file__=callback_path, __name__=script_name)
                 logger.debug("Loading Python callback from pathname: %s", callback_path)
                 with open(callback_path) as handle:
                     exec(handle.read(), environment)
                 value = environment.get(callback_name)
             else:
                 # Callback specified as `dotted path'.
                 logger.debug("Loading Python callback from dotted path: %s", callback_path)
                 module = importlib.import_module(callback_path)
                 value = getattr(module, callback_name, None)
             if not callable(value):
                 raise ValueError(compact("""
                     The Python callback expression {expr} didn't result in
                     a valid callable! (result: {value})
                 """, expr=expression, value=value))
     else:
         value = None
     set_property(self, 'python_callback', value)
Beispiel #13
0
def setUpModule():
    """
    Prepare the test suite.

    Sets up logging to the terminal. When a test fails the logging output can
    help to perform a post-mortem analysis of the failure in question (even
    when its hard to reproduce locally). This is especially useful when
    debugging remote test failures, whether they happened on Travis CI or a
    user's local system.

    Also makes sure that the Apache web server is installed and running because
    this is required to run the test suite.
    """
    # Set up logging to the terminal.
    coloredlogs.install(level=logging.DEBUG)
    # Make sure Apache is installed and configured.
    try:
        manager = ApacheManager()
        manager.fetch_status_page(manager.text_status_url)
    except Exception as e:
        logger.exception("Failed to connect to local Apache server!")
        raise Exception(compact("""
            Please make sure the Apache web server is installed and configured
            (running) before you run this test suite because this test suite
            tests the actual integration with Apache (it doesn't use mocking)
            and so requires Apache to be installed, configured and running.

            Swallowed exception: {message} ({type})
        """, message=e, type=type(e)))
Beispiel #14
0
    def change_brightness(self, raw_brightness):
        """
        Change the brightness of the display.

        This method writes the brightness to
        ``/sys/class/backlight/<name>/brightness``.

        :param raw_brightness: A number representing the brightness to be
                               configured.
        """
        logger.debug("Setting brightness of %s to %s (raw value) ..",
                     self.friendly_name, raw_brightness)
        try:
            filename = os.path.join(self.sys_directory, 'brightness')
            logger.debug("Writing %s ..", filename)
            with open(filename, 'w') as handle:
                handle.write(str(int(raw_brightness)))
        except IOError as e:
            if e.errno == errno.EACCES:
                # Give a user friendly explanation.
                raise IOError(
                    e.errno,
                    compact("""
                    To control backlight brightness you need super user privileges!
                    (consider using `sudo' to run the program?)
                """))
            # Don't swallow errors we don't know what to do with.
            raise
    def fetch_status_page(self, status_url):
        """
        Fetch an Apache status page and return its content.

        :param url: The URL of the status page (a string).
        :returns: The response body (a string).
        :raises: :exc:`.StatusPageError` if fetching of the status page fails.
        """
        timer = Timer()
        # Get the Apache status page.
        logger.debug("Fetching Apache status page from %s ..", status_url)
        try:
            response = urlopen(status_url)
        except HTTPError as e:
            # These objects can be treated as response objects.
            response = e
        # Validate the HTTP response status.
        response_code = response.getcode()
        if response_code != 200:
            # Record the failure.
            self.status_response = False
            # Notify the caller using a custom exception.
            raise StatusPageError(
                compact("""
                Failed to retrieve Apache status page from {url}! Expected to
                get HTTP response status 200, got {code} instead.
            """,
                        url=status_url,
                        code=response_code))
        response_body = response.read()
        logger.debug("Fetched %s in %s.", format_size(len(response_body)),
                     timer)
        self.status_response = True
        return response_body
    def test_kill_active_worker(self):
        """Test killing of active workers based on memory usage thresholds."""
        if os.getuid() != 0:
            logger.warning(
                "Skipping test that kills active workers (superuser privileges are required)"
            )
            return
        pid_file = os.path.join(tempfile.gettempdir(),
                                'apache-manager-worker-pid.txt')
        with TemporaryWSGIApp('wsgi-memory-hog') as context:
            # Create a WSGI application that keeps allocating memory but never returns.
            context.install_wsgi_app('''
                import itertools
                import os
                import random
                import string

                def application(environ, start_response):
                    # Store the PID of the Apache worker handling this request.
                    with open({pid_file}, 'w') as handle:
                        handle.write(str(os.getpid()))
                    # Start the response.
                    start_response('200 OK', [])
                    # Keep allocating memory but never return.
                    random_heap_objects = []
                    for i in itertools.count():
                        random_heap_objects.append(random_string())

                def random_string():
                    length = random.randint(1024*512, 1024*1024)
                    characters = string.ascii_letters + string.digits
                    return ''.join(random.choice(characters) for i in range(length))
            ''',
                                     pid_file=repr(pid_file))
            # Activate the WSGI application by making a request.
            context.make_request()
            # Make sure the PID file was created.
            assert os.path.isfile(pid_file), compact("""
                It looks like the WSGI application (affectionately called
                "memory hog" :-) never got a chance to run! Please review the
                messages Apache emitted when its configuration was reloaded to
                pinpoint the cause of this issue.
            """)
            # Get the PID of the Apache worker handling the request.
            with open(pid_file) as handle:
                worker_pid = int(handle.read())

            # Use the Apache manager to kill the worker handling the request.
            def kill_active_worker():
                manager = ApacheManager()
                killed_processes = manager.kill_workers(
                    max_memory_active=1024 * 1024 * 50)
                assert worker_pid in killed_processes

            # It might take a while for the worker to hit the memory limit.
            retry(kill_active_worker)
Beispiel #17
0
 def listen_addresses(self):
     
     logger.debug("Discovering where Apache is listening by parsing %s ..", self.ports_config)
     
     if not os.path.isfile(self.ports_config):
         raise AddressDiscoveryError(compact("""Failed!""", filename=self.ports_config))
     matched_addresses = []
     pattern = re.compile(r'^(.+):(\d+)$')
     with open(self.ports_config) as handle:
         for lnum, line in enumerate(handle, start=1):
             tokens = line.split()
             if len(tokens) >= 2 and tokens[0] == 'Listen':
                 parsed_value = None
                 if tokens[1].isdigit():
                     parsed_value = NetworkAddress(port=int(tokens[1]))
                 else:
                     match = pattern.match(tokens[1])
                     if match:
                         address = match.group(1)
                         port = int(match.group(2))
                         if address == '0.0.0.0':
                             address = '127.0.0.1'
                         parsed_value = NetworkAddress(address=address, port=port)
                 if parsed_value is not None:
                     if len(tokens) >= 3:
                         parsed_value.protocol = tokens[2]
                     logger.debug("Parsed listen directive on line %i: %s", lnum, parsed_value)
                     matched_addresses.append(parsed_value)
                 else:
                     logger.warning("Failed to parse listen directive on line %i: %s", lnum, line)
     if not matched_addresses:
         raise AddressDiscoveryError(compact("""
             Failed to discover any addresses or ports that Apache is
             listening on! Maybe I'm parsing the wrong configuration file?
             ({filename})
         """, filename=self.ports_config))
     logger.debug("Discovered %s that Apache is listening on: %s",
                  pluralize(len(matched_addresses), "address", "addresses"),
                  concatenate(map(str, matched_addresses)))
     return matched_addresses
Beispiel #18
0
 def slots(self):
     soup = BeautifulSoup(self.html_status, "html.parser")
     required_columns = [normalize_text(c) for c in STATUS_COLUMNS]
     for table in soup.findAll('table'):
         matched_rows = list(parse_status_table(table))
         validated_rows = [r for r in matched_rows if all(c in r for c in required_columns)]
         if validated_rows:
             return [WorkerStatus(status_fields=f) for f in validated_rows]
     raise StatusPageError(compact("""
         Failed to parse Apache status page! No tables found containing all
         of the required column headings and at least one row of data that
         could be parsed.
     """))
Beispiel #19
0
    def test_kill_active_worker(self):
        """Test killing of active workers based on memory usage thresholds."""
        if os.getuid() != 0:
            logger.warning("Skipping test that kills active workers (superuser privileges are required)")
            return
        pid_file = os.path.join(tempfile.gettempdir(), 'apache-manager-worker-pid.txt')
        with TemporaryWSGIApp('wsgi-memory-hog') as context:
            # Create a WSGI application that keeps allocating memory but never returns.
            context.install_wsgi_app('''
                import itertools
                import os
                import random
                import string

                def application(environ, start_response):
                    # Store the PID of the Apache worker handling this request.
                    with open({pid_file}, 'w') as handle:
                        handle.write(str(os.getpid()))
                    # Start the response.
                    start_response('200 OK', [])
                    # Keep allocating memory but never return.
                    random_heap_objects = []
                    for i in itertools.count():
                        random_heap_objects.append(random_string())

                def random_string():
                    length = random.randint(1024*512, 1024*1024)
                    characters = string.ascii_letters + string.digits
                    return ''.join(random.choice(characters) for i in range(length))
            ''', pid_file=repr(pid_file))
            # Activate the WSGI application by making a request.
            context.make_request()
            # Make sure the PID file was created.
            assert os.path.isfile(pid_file), compact("""
                It looks like the WSGI application (affectionately called
                "memory hog" :-) never got a chance to run! Please review the
                messages Apache emitted when its configuration was reloaded to
                pinpoint the cause of this issue.
            """)
            # Get the PID of the Apache worker handling the request.
            with open(pid_file) as handle:
                worker_pid = int(handle.read())

            # Use the Apache manager to kill the worker handling the request.
            def kill_active_worker():
                manager = ApacheManager()
                killed_processes = manager.kill_workers(max_memory_active=1024*1024*50)
                assert worker_pid in killed_processes

            # It might take a while for the worker to hit the memory limit.
            retry(kill_active_worker)
Beispiel #20
0
    def skipTest(self, text, *args, **kw):
        """
        Enable backwards compatible "marking of tests to skip".

        By calling this method from a return statement in the test to be
        skipped the test can be marked as skipped when possible, without
        breaking the test suite when unittest.TestCase.skipTest() isn't
        available.
        """
        reason = compact(text, *args, **kw)
        try:
            super(PipAccelTestCase, self).skipTest(reason)
        except AttributeError:
            # unittest.TestCase.skipTest() isn't available in Python 2.6.
            logger.warning("%s", reason)
Beispiel #21
0
 def required_hint(self):
     """A hint about required properties (a string or :data:`None`)."""
     names = sorted(name for name, value in self.custom_properties
                    if value.required)
     if names:
         return compact(
             """
             When you initialize a :class:`{type}` object you are required
             to provide {values} for the {required} {properties}.
             """,
             type=self.type.__name__,
             required=self.format_properties(names),
             values=("a value" if len(names) == 1 else "values"),
             properties=("property" if len(names) == 1 else "properties"),
         )
    def skipTest(self, text, *args, **kw):
        """
        Enable backwards compatible "marking of tests to skip".

        By calling this method from a return statement in the test to be
        skipped the test can be marked as skipped when possible, without
        breaking the test suite when unittest.TestCase.skipTest() isn't
        available.
        """
        reason = compact(text, *args, **kw)
        try:
            super(AptMirrorUpdaterTestCase, self).skipTest(reason)
        except AttributeError:
            # unittest.TestCase.skipTest() isn't available in Python 2.6.
            logger.warning("%s", reason)
def main():
    """Command line interface."""
    coloredlogs.install(level='debug')
    arguments = sys.argv[1:]
    if arguments:
        interpret_script(arguments[0])
    else:
        logger.notice(compact("""
            This script requires the 'urxvt' terminal emulator and the
            ImageMagick command line programs 'convert' and 'import' to be
            installed. Don't switch windows while the screenshots are being
            generated because it seems that 'import' can only take screenshots
            of foreground windows.
        """))
        generate_screenshots()
Beispiel #24
0
    def parent_directory(self):
        """
        The pathname of the parent directory of the backup directory (a string).

        :raises: :exc:`.ParentDirectoryUnavailable` when the parent directory
                 can't be determined because :attr:`directory` is empty or '/'.
        """
        directory = os.path.dirname(self.directory.rstrip('/'))
        if not directory:
            raise ParentDirectoryUnavailable(compact("""
                Failed to determine the parent directory of the destination
                directory! This makes it impossible to create and rotate
                snapshots for the destination {dest}.
            """, dest=self.expression))
        return directory
    def test_kill_worker_that_times_out(self):
        """Test killing of active workers based on time usage thresholds."""
        if os.getuid() != 0:
            logger.warning(
                "Skipping test that kills workers that time out (superuser privileges are required)"
            )
            return
        pid_file = os.path.join(tempfile.gettempdir(),
                                'apache-manager-worker-pid.txt')
        with TemporaryWSGIApp('wsgi-timeout') as context:
            # Create a WSGI application that doesn't allocate too much memory but never returns.
            context.install_wsgi_app('''
                import itertools
                import os
                import time

                def application(environ, start_response):
                    # Store the PID of the Apache worker handling this request.
                    with open({pid_file}, 'w') as handle:
                        handle.write(str(os.getpid()))
                    # Start the response.
                    start_response('200 OK', [])
                    # Waste time doing nothing ;-).
                    for i in itertools.count():
                        time.sleep(1)
            ''',
                                     pid_file=repr(pid_file))
            # Activate the WSGI application by making a request.
            context.make_request()
            # Make sure the PID file was created.
            assert os.path.isfile(pid_file), compact("""
                It looks like the WSGI application (called "wsgi-timeout")
                never got a chance to run! Please review the messages Apache
                emitted when its configuration was reloaded to pinpoint the
                cause of this issue.
            """)
            # Get the PID of the Apache worker handling the request.
            with open(pid_file) as handle:
                worker_pid = int(handle.read())

            # Use the Apache manager to kill the worker handling the request.
            def kill_timeout_worker():
                manager = ApacheManager()
                killed_processes = manager.kill_workers(timeout=30)
                assert worker_pid in killed_processes

            # It will take a while for the worker to hit the time limit.
            retry(kill_timeout_worker)
Beispiel #26
0
 def initializer_hint(self):
     """A hint that properties can be set using keyword arguments to the initializer (a string or :data:`None`)."""
     names = sorted(name for name, value in self.custom_properties
                    if value.key or value.required or value.writable)
     if names:
         return compact(
             """
             You can set the {values} of the {names} {properties}
             by passing {arguments} to the class initializer.
             """,
             names=self.format_properties(names),
             values=("value" if len(names) == 1 else "values"),
             properties=("property" if len(names) == 1 else "properties"),
             arguments=("a keyword argument"
                        if len(names) == 1 else "keyword arguments"),
         )
Beispiel #27
0
 def fetch_status_page(self, status_url):
     timer = Timer()
     logger.debug("Fetching Apache status page from %s ..", status_url)
     try:
         response = urlopen(status_url)
     except HTTPError as e:
         response = e
     response_code = response.getcode()
     if response_code != 200:
         self.status_response = False
         raise StatusPageError(compact("""
             Failed to retrieve Apache status page from {url}! Expected to
             get HTTP response status 200, got {code} instead.
         """, url=status_url, code=response_code))
     response_body = response.read()
     logger.debug("Fetched %s in %s.", format_size(len(response_body)), timer)
     self.status_response = True
     return response_body
Beispiel #28
0
    def test_kill_worker_that_times_out(self):
        """Test killing of active workers based on time usage thresholds."""
        if os.getuid() != 0:
            logger.warning("Skipping test that kills workers that time out (superuser privileges are required)")
            return
        pid_file = os.path.join(tempfile.gettempdir(), 'apache-manager-worker-pid.txt')
        with TemporaryWSGIApp('wsgi-timeout') as context:
            # Create a WSGI application that doesn't allocate too much memory but never returns.
            context.install_wsgi_app('''
                import itertools
                import os
                import time

                def application(environ, start_response):
                    # Store the PID of the Apache worker handling this request.
                    with open({pid_file}, 'w') as handle:
                        handle.write(str(os.getpid()))
                    # Start the response.
                    start_response('200 OK', [])
                    # Waste time doing nothing ;-).
                    for i in itertools.count():
                        time.sleep(1)
            ''', pid_file=repr(pid_file))
            # Activate the WSGI application by making a request.
            context.make_request()
            # Make sure the PID file was created.
            assert os.path.isfile(pid_file), compact("""
                It looks like the WSGI application (called "wsgi-timeout")
                never got a chance to run! Please review the messages Apache
                emitted when its configuration was reloaded to pinpoint the
                cause of this issue.
            """)
            # Get the PID of the Apache worker handling the request.
            with open(pid_file) as handle:
                worker_pid = int(handle.read())

            # Use the Apache manager to kill the worker handling the request.
            def kill_timeout_worker():
                manager = ApacheManager()
                killed_processes = manager.kill_workers(timeout=30)
                assert worker_pid in killed_processes

            # It will take a while for the worker to hit the time limit.
            retry(kill_timeout_worker)
    def slots(self):
        """
        The status of Apache workers (a list of :class:`WorkerStatus` objects).

        :raises: Any exceptions raised by :attr:`html_status` or
                 :exc:`.StatusPageError` if parsing of the Apache status page
                 fails.

        The :attr:`slots` property contains one :class:`WorkerStatus` object
        for each worker "slot" that Apache has allocated. This means that some
        of the :class:`WorkerStatus` objects may not have expected properties
        like :attr:`~WorkerStatus.pid` because they describe an "empty slot".
        See the :attr:`workers` property for a list of :class:`WorkerStatus`
        objects without empty slots.
        """
        # Use BeautifulSoup to parse the HTML response body.
        soup = BeautifulSoup(self.html_status, "html.parser")
        # Prepare a list of normalized column headings expected to be defined in the table.
        required_columns = [normalize_text(c) for c in STATUS_COLUMNS]
        # Check each table on the Apache status page, because different
        # multiprocessing modules result in a status page with a different
        # number of tables and the table with worker details is not clearly
        # marked as such in the HTML output ...
        for table in soup.findAll('table'):
            # Parse the table into a list of dictionaries, one for each row.
            matched_rows = list(parse_status_table(table))
            # Filter out rows that don't contain the required columns.
            validated_rows = [
                r for r in matched_rows
                if all(c in r for c in required_columns)
            ]
            # If one or more rows remain we found the right table! :-)
            if validated_rows:
                return [WorkerStatus(status_fields=f) for f in validated_rows]
        raise StatusPageError(
            compact("""
            Failed to parse Apache status page! No tables found containing all
            of the required column headings and at least one row of data that
            could be parsed.
        """))
Beispiel #30
0
 def destination_context(self):
     """
     The execution context of the system that stores the backup (the destination).
     This is an execution context created by :mod:`executor.contexts`.
     :raises: :exc:`.DestinationContextUnavailable` when the destination is
              an rsync daemon module (which doesn't allow arbitrary command
              execution).
     """
     if self.destination.module:
         raise DestinationContextUnavailable(
             compact("""
             Error: The execution context of the backup destination isn't
             available because the destination ({dest}) is an rsync daemon
             module! (tip: reconsider your command line options)
         """,
                     dest=self.destination.expression))
     else:
         context_opts = dict(sudo=self.sudo_enabled)
         if self.destination.hostname:
             context_opts['ssh_alias'] = self.destination.hostname
             context_opts['ssh_user'] = self.destination.username
         return create_context(**context_opts)
Beispiel #31
0
    def transform_version(self, package_to_convert, python_requirement_name,
                          python_requirement_version):
        """
        Transform a Python requirement version to a Debian version number.

        :param package_to_convert: The :class:`.PackageToConvert` whose
                                   requirement is being transformed.
        :param python_requirement_name: The name of a Python package
                                        as found on PyPI (a string).
        :param python_requirement_version: The required version of the
                                           Python package (a string).
        :returns: The transformed version (a string).

        This method is a wrapper for :func:`.normalize_package_version()` that
        takes care of one additional quirk to ensure compatibility with pip.
        Explaining this quirk requires a bit of context:

        - When package A requires package B (via ``install_requires``) and
          package A absolutely pins the required version of package B using one
          or more trailing zeros (e.g. ``B==1.0.0``) but the actual version
          number of package B (embedded in the metadata of package B) contains
          less trailing zeros (e.g. ``1.0``) then pip will not complain but
          silently fetch version ``1.0`` of package B to satisfy the
          requirement.

        - However this doesn't change the absolutely pinned version in the
          ``install_requires`` metadata of package A.

        - When py2deb converts the resulting requirement set, the dependency of
          package A is converted as ``B (= 1.0.0)``. The resulting packages
          will not be installable because ``apt`` considers ``1.0`` to be
          different from ``1.0.0``.

        This method analyzes the requirement set to identify occurrences of
        this quirk and strip trailing zeros in ``install_requires`` metadata
        that would otherwise result in converted packages that cannot be
        installed.
        """
        matching_packages = [
            pkg for pkg in self.packages_to_convert
            if package_names_match(pkg.python_name, python_requirement_name)
        ]
        if len(matching_packages) > 1:
            # My assumption while writing this code is that this should never
            # happen. This check is to make sure that if it does happen it will
            # be noticed because the last thing I want is for this `hack' to
            # result in packages that are silently wrongly converted.
            normalized_name = normalize_package_name(python_requirement_name)
            num_matches = len(matching_packages)
            raise Exception(
                compact("""
                Expected requirement set to contain exactly one Python package
                whose name can be normalized to {name} but encountered {count}
                packages instead! (matching packages: {matches})
            """,
                        name=normalized_name,
                        count=num_matches,
                        matches=matching_packages))
        elif matching_packages:
            # Check whether the version number included in the requirement set
            # matches the version number in a package's requirements.
            requirement_to_convert = matching_packages[0]
            if python_requirement_version != requirement_to_convert.python_version:
                logger.debug(
                    "Checking whether to strip trailing zeros from required version .."
                )
                # Check whether the version numbers share the same prefix.
                required_version = tokenize_version(python_requirement_version)
                included_version = tokenize_version(
                    requirement_to_convert.python_version)
                common_length = min(len(required_version),
                                    len(included_version))
                required_prefix = required_version[:common_length]
                included_prefix = included_version[:common_length]
                prefixes_match = (required_prefix == included_prefix)
                logger.debug("Prefix of required version: %s", required_prefix)
                logger.debug("Prefix of included version: %s", included_prefix)
                logger.debug("Prefixes match? %s", prefixes_match)
                # Check if 1) only the required version has a suffix and 2) this
                # suffix consists only of trailing zeros.
                required_suffix = required_version[common_length:]
                included_suffix = included_version[common_length:]
                logger.debug("Suffix of required version: %s", required_suffix)
                logger.debug("Suffix of included version: %s", included_suffix)
                if prefixes_match and required_suffix and not included_suffix:
                    # Check whether the suffix of the required version contains
                    # only zeros, i.e. pip considers the version numbers the same
                    # although apt would not agree.
                    if all(
                            re.match('^0+$', t) for t in required_suffix
                            if t.isdigit()):
                        modified_version = ''.join(required_prefix)
                        logger.warning(
                            "Stripping superfluous trailing zeros from required"
                            " version of %s required by %s! (%s -> %s)",
                            python_requirement_name,
                            package_to_convert.python_name,
                            python_requirement_version, modified_version)
                        python_requirement_version = modified_version
        return normalize_package_version(
            python_requirement_version,
            prerelease_workaround=self.prerelease_workaround)
Beispiel #32
0
:data:`True` if usage notes are enabled, :data:`False` otherwise.

This defaults to the environment variable :data:`USAGE_NOTES_VARIABLE` (coerced
using :func:`~humanfriendly.coerce_boolean()`) when available, otherwise
:data:`SPHINX_ACTIVE` determines the default value.

Usage notes are only injected when Sphinx is running because of performance.
It's nothing critical of course, but modifying hundreds or thousands of
docstrings that no one is going to look at seems rather pointless :-).
"""

NOTHING = object()
"""A unique object instance used to detect missing attributes."""

CUSTOM_PROPERTY_NOTE = compact("""
    The :attr:`{name}` property is a :class:`~{type}`.
""")

DYNAMIC_PROPERTY_NOTE = compact("""
    The :attr:`{name}` property is a :class:`~{type}`.
""")

ENVIRONMENT_PROPERTY_NOTE = compact("""
    If the environment variable ``${variable}`` is set it overrides the
    computed value of this property.
""")

REQUIRED_PROPERTY_NOTE = compact("""
    You are required to provide a value for this property by calling the
    constructor of the class that defines the property with a keyword argument
    named `{name}` (unless a custom constructor is defined, in this case please
    def listen_addresses(self):
        """
        The network address(es) where Apache is listening (a list of :class:`NetworkAddress` objects).

        :raises: :exc:`.AddressDiscoveryError` when discovery fails (e.g. because
                 ``/etc/apache2/ports.conf`` is missing or can't be parsed).

        Here's an example:

        >>> from apache_manager import ApacheManager
        >>> manager = ApacheManager()
        >>> manager.listen_addresses
        [NetworkAddress(protocol='http',
                        address='127.0.0.1',
                        port=81,
                        url='http://127.0.0.1:81')]
        """
        logger.debug("Discovering where Apache is listening by parsing %s ..",
                     self.ports_config)
        # Make sure the configuration file exists.
        if not os.path.isfile(self.ports_config):
            raise AddressDiscoveryError(
                compact("""
                Failed to discover any addresses or ports that Apache is
                listening on! The configuration file {filename} is missing. Are
                you sure the Apache web server is properly installed? If so
                you'll have to specify the configuration's location.
            """,
                        filename=self.ports_config))
        # Parse the configuration file.
        matched_addresses = []
        pattern = re.compile(r'^(.+):(\d+)$')
        with open(self.ports_config) as handle:
            for lnum, line in enumerate(handle, start=1):
                tokens = line.split()
                # We are looking for `Listen' directives.
                if len(tokens) >= 2 and tokens[0] == 'Listen':
                    parsed_value = None
                    # Check for a port number without an IP address.
                    if tokens[1].isdigit():
                        parsed_value = NetworkAddress(port=int(tokens[1]))
                    else:
                        # Check for an IP address with a port number.
                        match = pattern.match(tokens[1])
                        if match:
                            address = match.group(1)
                            port = int(match.group(2))
                            if address == '0.0.0.0':
                                address = '127.0.0.1'
                            parsed_value = NetworkAddress(address=address,
                                                          port=port)
                    # Check if we have a match.
                    if parsed_value is not None:
                        # Override the protocol if necessary.
                        if len(tokens) >= 3:
                            parsed_value.protocol = tokens[2]
                        logger.debug("Parsed listen directive on line %i: %s",
                                     lnum, parsed_value)
                        matched_addresses.append(parsed_value)
                    else:
                        logger.warning(
                            "Failed to parse listen directive on line %i: %s",
                            lnum, line)
        # Sanity check the results.
        if not matched_addresses:
            raise AddressDiscoveryError(
                compact("""
                Failed to discover any addresses or ports that Apache is
                listening on! Maybe I'm parsing the wrong configuration file?
                ({filename})
            """,
                        filename=self.ports_config))
        # Log and return sorted port numbers.
        logger.debug("Discovered %s that Apache is listening on: %s",
                     pluralize(len(matched_addresses), "address", "addresses"),
                     concatenate(map(str, matched_addresses)))
        return matched_addresses
Beispiel #34
0
    def set_python_callback(self, expression):
        """
        Set a Python callback to be called during the conversion process.

        :param expression: One of the following:

                           1. A callable object (to be provided by Python API callers).
                           2. A string containing the pathname of a Python
                              script and the name of a callable, separated by a
                              colon. The Python script will be loaded using
                              :keyword:`exec`.
                           3. A string containing the "dotted path" of a Python
                              module and the name of a callable, separated by a
                              colon. The Python module will be loaded using
                              :func:`importlib.import_module()`.
                           4. Any value that evaluates to :data:`False` will
                              clear an existing callback (if any).
        :raises: :exc:`~exceptions.ValueError` when the given expression does
                 not result in a valid callable. :exc:`~exceptions.ImportError`
                 when the expression contains a dotted path that cannot be
                 imported.

        The callback will be called at the very last step before the binary
        package's metadata and contents are packaged as a ``*.deb`` archive.

        This allows arbitrary manipulation of resulting binary packages, e.g.
        changing package metadata or files to be packaged.

        An example use case:

        - Consider a dependency set (group of related packages) that has
          previously been converted and deployed.

        - A new version of the dependency set switches from Python package A to
          Python package B, where the two Python packages contain conflicting
          files (installed in the same location). This could happen when
          switching to a project's fork.

        - A deployment of the new dependency set will conflict with existing
          installations due to "unrelated" packages (in the eyes of ``apt`` and
          ``dpkg``) installing the same files.

        - By injecting a custom Python callback the user can mark package B as
          "replacing" and "breaking" package A. Refer to `section 7.6`_ of the
          Debian policy manual for details about the required binary control
          fields (hint: ``Replaces:`` and ``Breaks:``).

        .. warning:: The callback is responsible for not making changes that
                     would break the installation of the converted dependency
                     set!

        .. _section 7.6: https://www.debian.org/doc/debian-policy/ch-relationships.html#s-replaces
        """
        if expression:
            if callable(expression):
                # Python callers get to pass a callable directly.
                self.python_callback = expression
            else:
                # Otherwise we expect a string to parse (from a command line
                # argument, environment variable or configuration file).
                callback_path, _, callback_name = expression.partition(':')
                if os.path.isfile(callback_path):
                    # Callback specified as Python script.
                    script_name = os.path.basename(callback_path)
                    if script_name.endswith('.py'):
                        script_name, _ = os.path.splitext(script_name)
                    environment = dict(__file__=callback_path, __name__=script_name)
                    logger.debug("Loading Python callback from pathname: %s", callback_path)
                    with open(callback_path) as handle:
                        exec(handle.read(), environment)
                    self.python_callback = environment.get(callback_name)
                else:
                    # Callback specified as `dotted path'.
                    logger.debug("Loading Python callback from dotted path: %s", callback_path)
                    module = importlib.import_module(callback_path)
                    self.python_callback = getattr(module, callback_name, None)
                if not callable(self.python_callback):
                    raise ValueError(compact("""
                        The Python callback expression {expr} didn't result in
                        a valid callable! (result: {value})
                    """, expr=expression, value=self.python_callback))
        else:
            # Clear an existing callback (if any).
            self.python_callback = None
def initialize_keys_device(image_file,
                           mapper_name,
                           mount_point,
                           volumes=(),
                           cleanup=None):
    """
    Initialize and activate the virtual keys device and use it to activate encrypted volumes.

    :param image_file: The absolute pathname of the image file for the virtual
                       keys device (a string). If you are using an encrypted
                       root drive this file should reside on the ``/boot``
                       partition to avoid chicken and egg problems :-).
    :param mapper_name: The device mapper name for the virtual keys device (a
                        string).
    :param mount_point: The mount point for the virtual keys device (a string).
    :param volumes: An iterable of strings that match match mapper names
                    configured in /etc/crypttab. If given then only these
                    volumes will be unlocked. By default it's empty which means
                    all of the configured and available drives are unlocked.
    :param cleanup: :data:`True` to unmount and lock the virtual keys device
                    after use, :data:`False` to leave the device mounted or
                    :data:`None` to automatically figure out what the best
                    choice is (this is the default). See also
                    :func:`.have_systemd_dependencies()`.
    """
    first_run = not os.path.isfile(image_file)
    initialized = not first_run
    mapper_device = '/dev/mapper/%s' % mapper_name
    if cleanup is None:
        # Figure out whether it's safe to unmount and lock
        # the virtual keys device after we're done.
        if have_systemd_dependencies(mount_point):
            logger.notice(
                compact("""
                The virtual keys device will remain unlocked because
                you're running systemd and you appear to be affected
                by https://github.com/systemd/systemd/issues/3816.
            """))
            cleanup = False
        else:
            logger.verbose(
                compact("""
                Locking virtual keys device after use (this should be
                safe to do because it appears that you're not affected
                by https://github.com/systemd/systemd/issues/3816).
            """))
            cleanup = True
    try:
        # Create the virtual keys device (on the first run).
        if first_run:
            logger.info("Creating virtual keys device %s ..", image_file)
            execute('dd', 'if=/dev/zero', 'of=%s' % image_file,
                    'bs=%i' % (1024 * 1024), 'count=10')
            execute('cryptsetup', 'luksFormat', image_file)
        # Unlock the keys device.
        if not os.path.exists(mapper_device):
            logger.info("Unlocking virtual keys device %s ..", image_file)
            execute('cryptsetup', 'luksOpen', image_file, mapper_name)
        unlocked_timer = Timer()
        with finalizer('cryptsetup', 'luksClose', mapper_name,
                       enabled=cleanup):
            # Create a file system on the virtual keys device (on the first run).
            if first_run:
                logger.info("Creating file system on virtual keys device ..")
                execute('mkfs.ext4', mapper_device)
                initialized = True
            # Mount the virtual keys device.
            if not os.path.isdir(mount_point):
                os.makedirs(mount_point)
            if os.path.ismount(mount_point):
                logger.info("The virtual keys device is already mounted ..")
            else:
                logger.info("Mounting the virtual keys device ..")
                execute('mount', mapper_device, mount_point)
            with finalizer('umount', mount_point, enabled=cleanup):
                os.chmod(mount_point, 0o700)
                if volumes:
                    logger.verbose(
                        "Unlocking encrypted devices matching filter: %s",
                        concatenate(map(repr, volumes)))
                else:
                    logger.verbose(
                        "Unlocking all configured and available encrypted devices .."
                    )
                # Create, install and use the keys to unlock the drives.
                num_configured = 0
                num_available = 0
                num_unlocked = 0
                for device in find_managed_drives(mount_point):
                    if volumes and device.target not in volumes:
                        logger.verbose(
                            "Ignoring %s because it doesn't match the filter.",
                            device.target)
                    elif device.is_available:
                        status = activate_encrypted_drive(
                            mapper_name=device.target,
                            physical_device=device.source_device,
                            keys_directory=mount_point,
                            reset=first_run,
                        )
                        if status & DriveStatus.UNLOCKED:
                            num_unlocked += 1
                        num_available += 1
                    num_configured += 1
                if num_unlocked > 0:
                    logger.success("Unlocked %s.",
                                   pluralize(num_unlocked, "encrypted device"))
                elif num_available > 0:
                    logger.info("Nothing to do! (%s already unlocked)",
                                pluralize(num_available, "encrypted device"))
                elif num_configured > 0:
                    logger.info(
                        "Nothing to do! (no encrypted devices available)")
                else:
                    logger.info(
                        "Nothing to do! (no encrypted drives configured)")
        if cleanup:
            logger.verbose("Virtual keys device was accessible for %s.",
                           unlocked_timer)
    finally:
        if not initialized:
            logger.warning(
                "Initialization procedure was interrupted, deleting %s ..",
                image_file)
            if os.path.isfile(image_file):
                os.unlink(image_file)
    def smart_update(self, *args, **kw):
        """
        Update the system's package lists (switching mirrors if necessary).

        :param args: Command line arguments to ``apt-get update`` (zero or more strings).
        :param max_attempts: The maximum number of attempts at successfully
                             updating the system's package lists (an integer,
                             defaults to 10).
        :param switch_mirrors: :data:`True` if we're allowed to switch mirrors
                               on 'hash sum mismatch' errors, :data:`False`
                               otherwise.
        :raises: If updating of the package lists fails 10 consecutive times
                 (`max_attempts`) an exception is raised.

        While :func:`dumb_update()` simply runs ``apt-get update`` the
        :func:`smart_update()` function works quite differently:

        - First the system's package lists are updated using
          :func:`dumb_update()`. If this is successful we're done.
        - If the update fails we check the command's output for the phrase
          'hash sum mismatch'. If we find this phrase we assume that the
          current mirror is faulty and switch to another one.
        - Failing ``apt-get update`` runs are retried up to `max_attempts`.
        """
        backoff_time = 10
        max_attempts = kw.get('max_attempts', 10)
        switch_mirrors = kw.get('switch_mirrors', True)
        for i in range(1, max_attempts + 1):
            with CaptureOutput() as session:
                try:
                    self.dumb_update(*args)
                    return
                except Exception:
                    if i < max_attempts:
                        output = session.get_text()
                        # Check for EOL releases. This somewhat peculiar way of
                        # checking is meant to ignore 404 responses from
                        # `secondary package mirrors' like PPAs. If the output
                        # of `apt-get update' implies that the release is EOL
                        # we need to verify our assumption.
                        if any(self.current_mirror in line and u'404' in line.split() for line in output.splitlines()):
                            logger.warning("%s may be EOL, checking ..", self.release)
                            if self.release_is_eol:
                                if switch_mirrors:
                                    logger.warning("Switching to old releases mirror because %s is EOL ..",
                                                   self.release)
                                    self.change_mirror(self.old_releases_url, update=False)
                                    continue
                                else:
                                    raise Exception(compact("""
                                        Failed to update package lists because it looks like
                                        the current release (%s) is end of life but I'm not
                                        allowed to switch mirrors! (there's no point in
                                        retrying so I'm not going to)
                                    """, self.distribution_codename))
                        # Check for `hash sum mismatch' errors.
                        if switch_mirrors and u'hash sum mismatch' in output.lower():
                            logger.warning("Detected 'hash sum mismatch' failure, switching to other mirror ..")
                            self.ignore_mirror(self.current_mirror)
                            self.change_mirror(update=False)
                        else:
                            logger.warning("Retrying after `apt-get update' failed (%i/%i) ..", i, max_attempts)
                            # Deal with unidentified (but hopefully transient) failures by retrying but backing off
                            # to give the environment (network connection, mirror state, etc.) time to stabilize.
                            logger.info("Sleeping for %s before retrying update ..", format_timespan(backoff_time))
                            time.sleep(backoff_time)
                            if backoff_time <= 120:
                                backoff_time *= 2
                            else:
                                backoff_time += backoff_time / 3
        raise Exception("Failed to update package lists %i consecutive times?!" % max_attempts)
Beispiel #37
0
    def cleanup_packages(self, **options):
        """
        Run ``apt-get`` to cleanup removable kernel related packages.

        :param options: Any keyword arguments are passed on to the
                        :func:`~executor.contexts.AbstractContext.execute()`
                        method of the :class:`context` object.
        :returns: :data:`True` if a system reboot is required (to switch to the
                  newest installed kernel image or because security updates
                  have been installed), :data:`False` otherwise.
        :raises: :exc:`CleanupError` when multiple Linux kernel meta packages
                 are installed and :attr:`force` is :data:`False`.
        """
        timer = Timer()
        self.render_summary()
        if self.cleanup_command:
            if len(self.installed_image_meta_packages) > 1 and not self.force:
                raise CleanupError(
                    compact("""
                    Refusing to cleanup kernel related packages on {system}
                    because results can be unreliable when multiple Linux
                    kernel image meta packages are installed! You can use
                    the -f, --force option to override this sanity check.
                """,
                            system=self.context))
            # Check if the packaging system has signaled that a system reboot
            # is required before we run the `apt-get remove' command.
            reboot_required_before = self.reboot_required
            # Get the set of installed packages before we run `apt-get remove'.
            installed_packages_before = set(
                p for p in self.installed_packages.values() if p.is_installed)
            # Actually run the `apt-get remove' command.
            logger.info("Removing %s on %s ..",
                        pluralize(len(self.removable_packages), "package"),
                        self.context)
            self.context.execute(*self.cleanup_command, sudo=True, **options)
            # Make sure `/etc/apt/apt.conf.d/01autoremove-kernels' is up to date.
            auto_removal_script = '/etc/kernel/postinst.d/apt-auto-removal'
            logger.verbose("Checking if %s needs to be run ..",
                           auto_removal_script)
            if self.context.test('test', '-x', auto_removal_script):
                if self.dry_run:
                    logger.verbose(
                        "Skipping %s script because we're performing a dry-run.",
                        auto_removal_script)
                else:
                    logger.verbose("Running %s script ..", auto_removal_script)
                    auto_removal_command = [
                        auto_removal_script, self.active_kernel_release
                    ]
                    if not self.context.execute(*auto_removal_command,
                                                check=False,
                                                sudo=True,
                                                tty=False):
                        logger.warning(
                            "Failed to update auto-remove statuses! (%s reported an error)",
                            auto_removal_script)
            logger.info("Done! (took %s)", timer)
            # The `apt-get remove' command invalidates all of our cached data
            # so we need to refresh our cached properties to avoid stale data.
            self.clear_cached_properties()
            # Check if it is safe to remove /var/run/reboot-required.
            if self.running_newest_kernel and not reboot_required_before:
                # Get the set of installed packages after running `apt-get remove'.
                installed_packages_after = set(
                    p for p in self.installed_packages.values()
                    if p.is_installed)
                if installed_packages_after.issubset(
                        installed_packages_before):
                    # We can remove the signal file(s) iff:
                    # 1. A system reboot wasn't already required.
                    # 2. We're already running on the newest kernel.
                    # 3. We only removed packages but didn't install or upgrade any.
                    if self.dry_run:
                        logger.info(
                            "Skipping signal file removal because we're performing a dry-run."
                        )
                    else:
                        logger.info(
                            "System reboot is avoidable! Removing signal file(s) .."
                        )
                        self.context.execute(
                            'rm',
                            '--force',
                            REBOOT_REQUIRED_FILE,
                            REBOOT_REQUIRED_PACKAGES_FILE,
                            sudo=True,
                            tty=False,
                        )
        # Inform the operator and caller about whether a reboot is required.
        if not self.running_newest_kernel:
            logger.info(
                "System reboot needed (not yet running the newest kernel).")
            return True
        elif self.reboot_required:
            logger.info("System reboot needed (%s exists).",
                        REBOOT_REQUIRED_FILE)
            return True
        else:
            logger.info("System reboot is not necessary.")
            return False
:data:`True` if usage notes are enabled, :data:`False` otherwise.

This defaults to the environment variable :data:`USAGE_NOTES_VARIABLE` (coerced
using :func:`~humanfriendly.coerce_boolean()`) when available, otherwise
:data:`SPHINX_ACTIVE` determines the default value.

Usage notes are only injected when Sphinx is running because of performance.
It's nothing critical of course, but modifying hundreds or thousands of
docstrings that no one is going to look at seems rather pointless :-).
"""

NOTHING = object()
"""A unique object instance used to detect missing attributes."""

CUSTOM_PROPERTY_NOTE = compact("""
    The :attr:`{name}` property is a :class:`~{type}`.
""")

DYNAMIC_PROPERTY_NOTE = compact("""
    The :attr:`{name}` property is a :class:`~{type}`.
""")

ENVIRONMENT_PROPERTY_NOTE = compact("""
    If the environment variable ``${variable}`` is set it overrides the
    computed value of this property.
""")

REQUIRED_PROPERTY_NOTE = compact("""
    You are required to provide a value for this property by calling the
    constructor of the class that defines the property with a keyword argument
    named `{name}` (unless a custom constructor is defined, in this case please
Beispiel #39
0
    def smart_update(self, max_attempts=10, switch_mirrors=True):
        """
        Update the system's package lists (switching mirrors if necessary).

        :param max_attempts: The maximum number of attempts at successfully
                             updating the system's package lists (an integer,
                             defaults to 10).
        :param switch_mirrors: :data:`True` if we're allowed to switch mirrors
                               on 'hash sum mismatch' errors, :data:`False`
                               otherwise.
        :raises: If updating of the package lists fails 10 consecutive times
                 (`max_attempts`) an exception is raised.

        While :func:`dumb_update()` simply runs ``apt-get update`` the
        :func:`smart_update()` function works quite differently:

        - First the system's package lists are updated using
          :func:`dumb_update()`. If this is successful we're done.
        - If the update fails we check the command's output for the phrase
          'hash sum mismatch'. If we find this phrase we assume that the
          current mirror is faulty and switch to another one.
        - Failing ``apt-get update`` runs are retried up to `max_attempts`.
        """
        backoff_time = 10
        for i in range(1, max_attempts + 1):
            with CaptureOutput() as session:
                try:
                    self.dumb_update()
                    return
                except Exception:
                    if i < max_attempts:
                        output = session.get_text()
                        # Check for EOL releases. This somewhat peculiar way of
                        # checking is meant to ignore 404 responses from
                        # `secondary package mirrors' like PPAs.
                        maybe_end_of_life = any(
                            self.current_mirror in line
                            and u'404' in line.split()
                            for line in output.splitlines())
                        # If the output of `apt-get update' implies that the
                        # release is EOL we need to verify our assumption.
                        if maybe_end_of_life:
                            logger.warning(
                                "It looks like the current release (%s) is EOL, verifying ..",
                                self.distribution_codename)
                            if not self.validate_mirror(self.current_mirror):
                                if switch_mirrors:
                                    logger.warning(
                                        "Switching to old releases mirror because current release is EOL .."
                                    )
                                    self.change_mirror(self.old_releases_url,
                                                       update=False)
                                    continue
                                else:
                                    # When asked to do the impossible we abort
                                    # with a clear error message :-).
                                    raise Exception(
                                        compact("""
                                        Failed to update package lists because the
                                        current release ({release}) is end of life but
                                        I'm not allowed to switch mirrors! (there's
                                        no point in retrying so I'm not going to)
                                    """,
                                                release=self.
                                                distribution_codename))
                        # Check for `hash sum mismatch' errors.
                        if switch_mirrors and u'hash sum mismatch' in output.lower(
                        ):
                            logger.warning(
                                "Detected 'hash sum mismatch' failure, switching to other mirror .."
                            )
                            self.ignore_mirror(self.current_mirror)
                            self.change_mirror(update=False)
                        else:
                            logger.warning(
                                "Retrying after `apt-get update' failed (%i/%i) ..",
                                i, max_attempts)
                            # Deal with unidentified (but hopefully transient) failures by retrying but backing off
                            # to give the environment (network connection, mirror state, etc.) time to stabilize.
                            logger.info(
                                "Sleeping for %s before retrying update ..",
                                format_timespan(backoff_time))
                            time.sleep(backoff_time)
                            if backoff_time <= 120:
                                backoff_time *= 2
                            else:
                                backoff_time += backoff_time / 3
        raise Exception(
            "Failed to update package lists %i consecutive times?!" %
            max_attempts)
Beispiel #40
0
    def transform_version(self, package_to_convert, python_requirement_name, python_requirement_version):
        """
        Transform a Python requirement version to a Debian version number.

        :param package_to_convert: The :class:`.PackageToConvert` whose
                                   requirement is being transformed.
        :param python_requirement_name: The name of a Python package
                                        as found on PyPI (a string).
        :param python_requirement_version: The required version of the
                                           Python package (a string).
        :returns: The transformed version (a string).

        This method is a wrapper for :func:`.normalize_package_version()` that
        takes care of one additional quirk to ensure compatibility with pip.
        Explaining this quirk requires a bit of context:

        - When package A requires package B (via ``install_requires``) and
          package A absolutely pins the required version of package B using one
          or more trailing zeros (e.g. ``B==1.0.0``) but the actual version
          number of package B (embedded in the metadata of package B) contains
          less trailing zeros (e.g. ``1.0``) then pip will not complain but
          silently fetch version ``1.0`` of package B to satisfy the
          requirement.

        - However this doesn't change the absolutely pinned version in the
          ``install_requires`` metadata of package A.

        - When py2deb converts the resulting requirement set, the dependency of
          package A is converted as ``B (= 1.0.0)``. The resulting packages
          will not be installable because ``apt`` considers ``1.0`` to be
          different from ``1.0.0``.

        This method analyzes the requirement set to identify occurrences of
        this quirk and strip trailing zeros in ``install_requires`` metadata
        that would otherwise result in converted packages that cannot be
        installed.
        """
        matching_packages = [p for p in self.packages_to_convert
                             if package_names_match(p.python_name, python_requirement_name)]
        if len(matching_packages) != 1:
            # My assumption while writing this code is that this should never
            # happen. This check is to make sure that if it does happen it will
            # be noticed because the last thing I want is for this `hack' to
            # result in packages that are silently wrongly converted.
            normalized_name = normalize_package_name(python_requirement_name)
            num_matches = len(matching_packages)
            raise Exception(compact("""
                Expected requirement set to contain exactly one Python package
                whose name can be normalized to {name} but encountered {count}
                packages instead! (matching packages: {matches})
            """, name=normalized_name, count=num_matches, matches=matching_packages))
        # Check whether the version number included in the requirement set
        # matches the version number in a package's requirements.
        requirement_to_convert = matching_packages[0]
        if python_requirement_version != requirement_to_convert.python_version:
            logger.debug("Checking whether to strip trailing zeros from required version ..")
            # Check whether the version numbers share the same prefix.
            required_version = tokenize_version(python_requirement_version)
            included_version = tokenize_version(requirement_to_convert.python_version)
            common_length = min(len(required_version), len(included_version))
            required_prefix = required_version[:common_length]
            included_prefix = included_version[:common_length]
            prefixes_match = (required_prefix == included_prefix)
            logger.debug("Prefix of required version: %s", required_prefix)
            logger.debug("Prefix of included version: %s", included_prefix)
            logger.debug("Prefixes match? %s", prefixes_match)
            # Check if 1) only the required version has a suffix and 2) this
            # suffix consists only of trailing zeros.
            required_suffix = required_version[common_length:]
            included_suffix = included_version[common_length:]
            logger.debug("Suffix of required version: %s", required_suffix)
            logger.debug("Suffix of included version: %s", included_suffix)
            if prefixes_match and required_suffix and not included_suffix:
                # Check whether the suffix of the required version contains
                # only zeros, i.e. pip considers the version numbers the same
                # although apt would not agree.
                if all(re.match('^0+$', t) for t in required_suffix if t.isdigit()):
                    modified_version = ''.join(required_prefix)
                    logger.warning("Stripping superfluous trailing zeros from required"
                                   " version of %s required by %s! (%s -> %s)",
                                   python_requirement_name, package_to_convert.python_name,
                                   python_requirement_version, modified_version)
                    python_requirement_version = modified_version
        return normalize_package_version(python_requirement_version)