예제 #1
0
def start_clear_holders_deps():
    """
    prepare system for clear holders to be able to scan old devices
    """
    # a mdadm scan has to be started in case there is a md device that needs to
    # be detected. if the scan fails, it is either because there are no mdadm
    # devices on the system, or because there is a mdadm device in a damaged
    # state that could not be started. due to the nature of mdadm tools, it is
    # difficult to know which is the case. if any errors did occur, then ignore
    # them, since no action needs to be taken if there were no mdadm devices on
    # the system, and in the case where there is some mdadm metadata on a disk,
    # but there was not enough to start the array, the call to wipe_volume on
    # all disks and partitions should be sufficient to remove the mdadm
    # metadata
    mdadm.mdadm_assemble(scan=True, ignore_errors=True)
    # scan and activate for logical volumes
    lvm.lvm_scan()
    lvm.activate_volgroups()
    # the bcache module needs to be present to properly detect bcache devs
    # on some systems (precise without hwe kernel) it may not be possible to
    # lad the bcache module bcause it is not present in the kernel. if this
    # happens then there is no need to halt installation, as the bcache devices
    # will never appear and will never prevent the disk from being reformatted
    util.load_kernel_module('bcache')

    if not zfs.zfs_supported():
        LOG.warning('zfs filesystem is not supported in this environment')
예제 #2
0
    def test_zfs_supported_returns_false_on_assert_fail(self, m_assert_zfs):
        zfs_supported = False
        m_assert_zfs.side_effect = RuntimeError('No zfs module')

        result = zfs.zfs_supported()
        self.assertEqual(zfs_supported, result)
        self.assertEqual(1, m_assert_zfs.call_count)
예제 #3
0
    def test_zfs_supported(self, m_assert_zfs):
        zfs_supported = True
        m_assert_zfs.return_value = zfs_supported

        result = zfs.zfs_supported()
        self.assertEqual(zfs_supported, result)
        self.assertEqual(1, m_assert_zfs.call_count)
예제 #4
0
    def test_zfs_assert_supported_returns_true(self, mock_util, mock_supfs):
        """zfs_assert_supported returns True on supported platforms"""
        mock_util.get_platform_arch.return_value = 'amd64'
        mock_util.lsb_release.return_value = {'codename': 'bionic'}
        mock_util.subp.return_value = ("", "")
        mock_supfs.return_value = ['zfs']
        mock_util.which.side_effect = iter(['/wark/zpool', '/wark/zfs'])

        self.assertNotIn(mock_util.get_platform_arch.return_value,
                         zfs.ZFS_UNSUPPORTED_ARCHES)
        self.assertNotIn(mock_util.lsb_release.return_value['codename'],
                         zfs.ZFS_UNSUPPORTED_RELEASES)
        self.assertTrue(zfs.zfs_supported())
예제 #5
0
def start_clear_holders_deps():
    """
    prepare system for clear holders to be able to scan old devices
    """
    # a mdadm scan has to be started in case there is a md device that needs to
    # be detected. if the scan fails, it is either because there are no mdadm
    # devices on the system, or because there is a mdadm device in a damaged
    # state that could not be started. due to the nature of mdadm tools, it is
    # difficult to know which is the case. if any errors did occur, then ignore
    # them, since no action needs to be taken if there were no mdadm devices on
    # the system, and in the case where there is some mdadm metadata on a disk,
    # but there was not enough to start the array, the call to wipe_volume on
    # all disks and partitions should be sufficient to remove the mdadm
    # metadata
    mdadm.mdadm_assemble(scan=True, ignore_errors=True)
    # collect detail on any assembling arrays
    for md in [
            md for md in glob.glob('/dev/md*')
            if not os.path.isdir(md) and not identify_partition(md)
    ]:
        mdstat = None
        if os.path.exists('/proc/mdstat'):
            mdstat = util.load_file('/proc/mdstat')
            LOG.debug("/proc/mdstat:\n%s", mdstat)
            found = [
                line for line in mdstat.splitlines()
                if os.path.basename(md) in line
            ]
            # in some cases we have a /dev/md0 device node
            # but the kernel has already renamed the device /dev/md127
            if len(found) == 0:
                LOG.debug('Ignoring md device %s, not present in mdstat', md)
                continue

        # give it a second poke to encourage running
        try:
            LOG.debug('Activating mdadm array %s', md)
            (out, err) = mdadm.mdadm_run(md)
            LOG.debug('MDADM run on %s stdout:\n%s\nstderr:\n%s', md, out, err)
        except util.ProcessExecutionError:
            LOG.debug('Non-fatal error when starting mdadm device %s', md)

        # extract details if we can
        try:
            (out, err) = mdadm.mdadm_query_detail(md,
                                                  export=False,
                                                  rawoutput=True)
            LOG.debug('MDADM detail on %s stdout:\n%s\nstderr:\n%s', md, out,
                      err)
        except util.ProcessExecutionError:
            LOG.debug('Non-fatal error when querying mdadm detail on %s', md)

    # scan and activate for logical volumes
    lvm.lvm_scan()
    lvm.activate_volgroups()
    # the bcache module needs to be present to properly detect bcache devs
    # on some systems (precise without hwe kernel) it may not be possible to
    # lad the bcache module bcause it is not present in the kernel. if this
    # happens then there is no need to halt installation, as the bcache devices
    # will never appear and will never prevent the disk from being reformatted
    util.load_kernel_module('bcache')

    if not zfs.zfs_supported():
        LOG.warning('zfs filesystem is not supported in this environment')
예제 #6
0
def wipe_superblock(device):
    """
    Wrapper for block.wipe_volume compatible with shutdown function interface
    """
    blockdev = block.sysfs_to_devpath(device)
    # when operating on a disk that used to have a dos part table with an
    # extended partition, attempting to wipe the extended partition will fail
    try:
        if not block.is_online(blockdev):
            LOG.debug("Device is not online (size=0), so skipping:"
                      " '%s'", blockdev)
            return

        if block.is_extended_partition(blockdev):
            LOG.info(
                "extended partitions do not need wiping, so skipping:"
                " '%s'", blockdev)
            return
    except OSError as e:
        if util.is_file_not_found_exc(e):
            LOG.debug('Device to wipe disappeared: %s', e)
            LOG.debug('/proc/partitions says: %s',
                      util.load_file('/proc/partitions'))

            (parent, partnum) = block.get_blockdev_for_partition(blockdev)
            out, _e = util.subp(['sfdisk', '-d', parent],
                                capture=True,
                                combine_capture=True)
            LOG.debug('Disk partition info:\n%s', out)
            return
        else:
            raise e

    # gather any partitions
    partitions = block.get_sysfs_partitions(device)

    # release zfs member by exporting the pool
    if zfs.zfs_supported() and block.is_zfs_member(blockdev):
        poolname = zfs.device_to_poolname(blockdev)
        # only export pools that have been imported
        if poolname in zfs.zpool_list():
            try:
                zfs.zpool_export(poolname)
            except util.ProcessExecutionError as e:
                LOG.warning('Failed to export zpool "%s": %s', poolname, e)

    if is_swap_device(blockdev):
        shutdown_swap(blockdev)

    # some volumes will be claimed by the bcache layer but do not surface
    # an actual /dev/bcacheN device which owns the parts (backing, cache)
    # The result is that some volumes cannot be wiped while bcache claims
    # the device.  Resolve this by stopping bcache layer on those volumes
    # if present.
    for bcache_path in ['bcache', 'bcache/set']:
        stop_path = os.path.join(device, bcache_path)
        if os.path.exists(stop_path):
            LOG.debug('Attempting to release bcache layer from device: %s:%s',
                      device, stop_path)
            if stop_path.endswith('set'):
                rp = os.path.realpath(stop_path)
                bcache.stop_cacheset(rp)
            else:
                bcache._stop_device(stop_path)

    _wipe_superblock(blockdev)

    # if we had partitions, make sure they've been removed
    if partitions:
        LOG.debug('%s had partitions, issuing partition reread', device)
        retries = [.5, .5, 1, 2, 5, 7]
        for attempt, wait in enumerate(retries):
            try:
                # only rereadpt on wiped device
                block.rescan_block_devices(devices=[blockdev])
                # may raise IOError, OSError due to wiped partition table
                curparts = block.get_sysfs_partitions(device)
                if len(curparts) == 0:
                    return
            except (IOError, OSError):
                if attempt + 1 >= len(retries):
                    raise

            LOG.debug(
                "%s partitions still present, rereading pt"
                " (%s/%s).  sleeping %ss before retry", device, attempt + 1,
                len(retries), wait)
            time.sleep(wait)

    # multipath partitions are separate block devices (disks)
    if multipath.is_mpath_partition(blockdev):
        multipath.remove_partition(blockdev)
    # multipath devices must be hidden to utilize a single member (path)
    elif multipath.is_mpath_device(blockdev):
        mp_id = multipath.find_mpath_id(blockdev)
        if mp_id:
            multipath.remove_map(mp_id)
        else:
            raise RuntimeError('Failed to find multipath id for %s' % blockdev)
예제 #7
0
 def test_supported_arch(self):
     self.assertTrue(zfs.zfs_supported())