Example #1
0
def shutdown_lvm(device):
    """
    Shutdown specified lvm device.
    """
    device = block.sys_block_path(device)
    # lvm devices have a dm directory that containes a file 'name' containing
    # '{volume group}-{logical volume}'. The volume can be freed using lvremove
    name_file = os.path.join(device, 'dm', 'name')
    lvm_name = util.load_file(name_file).strip()
    (vg_name, lv_name) = lvm.split_lvm_name(lvm_name)
    vg_lv_name = "%s/%s" % (vg_name, lv_name)
    devname = "/dev/" + vg_lv_name

    # wipe contents of the logical volume first
    LOG.info('Wiping lvm logical volume: %s', devname)
    block.quick_zero(devname, partitions=False)

    # remove the logical volume
    LOG.debug('using "lvremove" on %s', vg_lv_name)
    util.subp(['lvremove', '--force', '--force', vg_lv_name])

    # if that was the last lvol in the volgroup, get rid of volgroup
    if len(lvm.get_lvols_in_volgroup(vg_name)) == 0:
        pvols = lvm.get_pvols_in_volgroup(vg_name)
        util.subp(['vgremove', '--force', '--force', vg_name], rcs=[0, 5])

        # wipe the underlying physical volumes
        for pv in pvols:
            LOG.info('Wiping lvm physical volume: %s', pv)
            block.quick_zero(pv, partitions=False)

    # refresh lvmetad
    lvm.lvm_scan()
Example #2
0
def start_clear_holders_deps():
    """
    prepare system for clear holders to be able to scan old devices
    """
    # a mdadm scan has to be started in case there is a md device that needs to
    # be detected. if the scan fails, it is either because there are no mdadm
    # devices on the system, or because there is a mdadm device in a damaged
    # state that could not be started. due to the nature of mdadm tools, it is
    # difficult to know which is the case. if any errors did occur, then ignore
    # them, since no action needs to be taken if there were no mdadm devices on
    # the system, and in the case where there is some mdadm metadata on a disk,
    # but there was not enough to start the array, the call to wipe_volume on
    # all disks and partitions should be sufficient to remove the mdadm
    # metadata
    mdadm.mdadm_assemble(scan=True, ignore_errors=True)
    # scan and activate for logical volumes
    lvm.lvm_scan()
    lvm.activate_volgroups()
    # the bcache module needs to be present to properly detect bcache devs
    # on some systems (precise without hwe kernel) it may not be possible to
    # lad the bcache module bcause it is not present in the kernel. if this
    # happens then there is no need to halt installation, as the bcache devices
    # will never appear and will never prevent the disk from being reformatted
    util.load_kernel_module('bcache')

    if not zfs.zfs_supported():
        LOG.warning('zfs filesystem is not supported in this environment')
Example #3
0
 def test_lvm_scan_multipath(self, mock_distro, mock_util, mock_lvmetad):
     """check that lvm_scan formats commands correctly for multipath."""
     cmds = [['pvscan'], ['vgscan']]
     mock_distro.lsb_release.return_value = {'codename': 'focal'}
     mock_lvmetad.return_value = False
     lvm.lvm_scan(multipath=True)
     cmd_filter = [
         '--config',
         'devices{ filter = [ "a|/dev/mapper/mpath.*|", "r|.*|" ] }'
     ]
     expected = [cmd + cmd_filter for cmd in cmds]
     calls = [mock.call(cmd, capture=True) for cmd in expected]
     self.assertEqual(len(expected), len(mock_util.subp.call_args_list))
     mock_util.subp.has_calls(calls)
Example #4
0
    def test_lvm_scan(self, mock_distro, mock_util, mock_lvmetad):
        """check that lvm_scan formats commands correctly for each release"""
        cmds = [['pvscan'], ['vgscan']]
        for (count, (codename, lvmetad_status,
                     use_cache)) in enumerate([('precise', False, False),
                                               ('trusty', False, False),
                                               ('xenial', False, False),
                                               ('xenial', True, True),
                                               (None, True, True),
                                               (None, False, False)]):
            mock_distro.lsb_release.return_value = {'codename': codename}
            mock_lvmetad.return_value = lvmetad_status
            lvm.lvm_scan()
            expected = [cmd for cmd in cmds]
            for cmd in expected:
                if lvmetad_status:
                    cmd.append('--cache')

            calls = [mock.call(cmd, capture=True) for cmd in expected]
            self.assertEqual(len(expected), len(mock_util.subp.call_args_list))
            mock_util.subp.has_calls(calls)
            mock_util.subp.reset_mock()
Example #5
0
def wipe_volume(path, mode="superblock", exclusive=True, strict=False):
    """wipe a volume/block device

    :param path: a path to a block device
    :param mode: how to wipe it.
       pvremove: wipe a lvm physical volume
       zero: write zeros to the entire volume
       random: write random data (/dev/urandom) to the entire volume
       superblock: zero the beginning and the end of the volume
       superblock-recursive: zero the beginning of the volume, the end of the
                    volume and beginning and end of any partitions that are
                    known to be on this device.
    :param exclusive: boolean to control how path is opened
    :param strict: boolean to control when to raise errors on write failures
    """
    if mode == "pvremove":
        # We need to use --force --force in case it's already in a volgroup and
        # pvremove doesn't want to remove it

        # If pvremove is run and there is no label on the system,
        # then it exits with 5. That is also okay, because we might be
        # wiping something that is already blank
        util.subp(['pvremove', '--force', '--force', '--yes', path],
                  rcs=[0, 5],
                  capture=True)
        lvm.lvm_scan()
    elif mode == "zero":
        wipe_file(path, exclusive=exclusive)
    elif mode == "random":
        with open("/dev/urandom", "rb") as reader:
            wipe_file(path, reader=reader.read, exclusive=exclusive)
    elif mode == "superblock":
        quick_zero(path, partitions=False, exclusive=exclusive, strict=strict)
    elif mode == "superblock-recursive":
        quick_zero(path, partitions=True, exclusive=exclusive, strict=strict)
    else:
        raise ValueError("wipe mode %s not supported" % mode)
Example #6
0
def start_clear_holders_deps():
    """
    prepare system for clear holders to be able to scan old devices
    """
    # a mdadm scan has to be started in case there is a md device that needs to
    # be detected. if the scan fails, it is either because there are no mdadm
    # devices on the system, or because there is a mdadm device in a damaged
    # state that could not be started. due to the nature of mdadm tools, it is
    # difficult to know which is the case. if any errors did occur, then ignore
    # them, since no action needs to be taken if there were no mdadm devices on
    # the system, and in the case where there is some mdadm metadata on a disk,
    # but there was not enough to start the array, the call to wipe_volume on
    # all disks and partitions should be sufficient to remove the mdadm
    # metadata
    mdadm.mdadm_assemble(scan=True, ignore_errors=True)
    # collect detail on any assembling arrays
    for md in [
            md for md in glob.glob('/dev/md*')
            if not os.path.isdir(md) and not identify_partition(md)
    ]:
        mdstat = None
        if os.path.exists('/proc/mdstat'):
            mdstat = util.load_file('/proc/mdstat')
            LOG.debug("/proc/mdstat:\n%s", mdstat)
            found = [
                line for line in mdstat.splitlines()
                if os.path.basename(md) in line
            ]
            # in some cases we have a /dev/md0 device node
            # but the kernel has already renamed the device /dev/md127
            if len(found) == 0:
                LOG.debug('Ignoring md device %s, not present in mdstat', md)
                continue

        # give it a second poke to encourage running
        try:
            LOG.debug('Activating mdadm array %s', md)
            (out, err) = mdadm.mdadm_run(md)
            LOG.debug('MDADM run on %s stdout:\n%s\nstderr:\n%s', md, out, err)
        except util.ProcessExecutionError:
            LOG.debug('Non-fatal error when starting mdadm device %s', md)

        # extract details if we can
        try:
            (out, err) = mdadm.mdadm_query_detail(md,
                                                  export=False,
                                                  rawoutput=True)
            LOG.debug('MDADM detail on %s stdout:\n%s\nstderr:\n%s', md, out,
                      err)
        except util.ProcessExecutionError:
            LOG.debug('Non-fatal error when querying mdadm detail on %s', md)

    # scan and activate for logical volumes
    lvm.lvm_scan()
    lvm.activate_volgroups()
    # the bcache module needs to be present to properly detect bcache devs
    # on some systems (precise without hwe kernel) it may not be possible to
    # lad the bcache module bcause it is not present in the kernel. if this
    # happens then there is no need to halt installation, as the bcache devices
    # will never appear and will never prevent the disk from being reformatted
    util.load_kernel_module('bcache')

    if not zfs.zfs_supported():
        LOG.warning('zfs filesystem is not supported in this environment')