Esempio n. 1
0
def force_devmapper_symlinks():
    """Check if /dev/mapper/mpath* files are symlinks, if not trigger udev."""
    LOG.debug('Verifying /dev/mapper/mpath* files are symlinks')
    needs_trigger = []
    for mp_id, dm_dev in dmname_to_blkdev_mapping().items():
        if mp_id.startswith('mpath'):
            mapper_path = '/dev/mapper/' + mp_id
            if not os.path.islink(mapper_path):
                LOG.warning(
                    'Found invalid device mapper mp path: %s, removing',
                    mapper_path)
                util.del_file(mapper_path)
                needs_trigger.append((mapper_path, dm_dev))

    if len(needs_trigger):
        for (mapper_path, dm_dev) in needs_trigger:
            LOG.debug('multipath: regenerating symlink for %s (%s)',
                      mapper_path, dm_dev)
            util.subp([
                'udevadm', 'trigger', '--subsystem-match=block',
                '--action=add', '/sys/class/block/' + os.path.basename(dm_dev)
            ])
            udev.udevadm_settle(exists=mapper_path)
            if not os.path.islink(mapper_path):
                LOG.error('Failed to regenerate udev symlink %s', mapper_path)
Esempio n. 2
0
def mdadm_assemble(md_devname=None, devices=[], spares=[], scan=False,
                   ignore_errors=False):
    # md_devname is a /dev/XXXX
    # devices is non-empty list of /dev/xxx
    # if spares is non-empt list append of /dev/xxx
    cmd = ["mdadm", "--assemble"]
    if scan:
        cmd += ['--scan', '-v']
    else:
        valid_mdname(md_devname)
        cmd += [md_devname, "--run"] + devices
        if spares:
            cmd += spares

    try:
        # mdadm assemble returns 1 when no arrays are found. this might not be
        # an error depending on the situation this function was called in, so
        # accept a return code of 1
        # mdadm assemble returns 2 when called on an array that is already
        # assembled. this is not an error, so accept return code of 2
        # all other return codes can be accepted with ignore_error set to true
        scan, err = util.subp(cmd, capture=True, rcs=[0, 1, 2])
        LOG.debug('mdadm assemble scan results:\n%s\n%s', scan, err)
        scan, err = util.subp(['mdadm', '--detail', '--scan', '-v'],
                              capture=True, rcs=[0, 1])
        LOG.debug('mdadm detail scan after assemble:\n%s\n%s',
                  scan, err)
    except util.ProcessExecutionError:
        LOG.warning("mdadm_assemble had unexpected return code")
        if not ignore_errors:
            raise

    udev.udevadm_settle()
Esempio n. 3
0
def iscsiadm_logout(target, portal):
    LOG.debug('iscsiadm_logout: target=%s portal=%s', target, portal)

    cmd = ['iscsiadm', '--mode=node', '--targetname=%s' % target,
           '--portal=%s' % portal, '--logout']
    util.subp(cmd, capture=True, log_captured=True)

    udev.udevadm_settle()
Esempio n. 4
0
def ensure_bcache_is_registered(bcache_device, expected, retry=None):
    """ Test that bcache_device is found at an expected path and
        re-register the device if it's not ready.

        Retry the validation and registration as needed.
    """
    if not retry:
        retry = BCACHE_REGISTRATION_RETRY

    for attempt, wait in enumerate(retry):
        # find the actual bcache device name via sysfs using the
        # backing device's holders directory.
        LOG.debug(
            'check just created bcache %s if it is registered,'
            ' try=%s', bcache_device, attempt + 1)
        try:
            udevadm_settle()
            if os.path.exists(expected):
                LOG.debug('Found bcache dev %s at expected path %s',
                          bcache_device, expected)
                validate_bcache_ready(bcache_device, expected)
            else:
                msg = 'bcache device path not found: %s' % expected
                LOG.debug(msg)
                raise ValueError(msg)

            # if bcache path exists and holders are > 0 we can return
            LOG.debug(
                'bcache dev %s at path %s successfully registered'
                ' on attempt %s/%s', bcache_device, expected, attempt + 1,
                len(retry))
            return

        except (OSError, IndexError, ValueError):
            # Some versions of bcache-tools will register the bcache device
            # as soon as we run make-bcache using udev rules, so wait for
            # udev to settle, then try to locate the dev, on older versions
            # we need to register it manually though
            LOG.debug(
                'bcache device was not registered, registering %s '
                'at /sys/fs/bcache/register', bcache_device)
            try:
                register_bcache(bcache_device)
            except IOError:
                # device creation is notoriously racy and this can trigger
                # "Invalid argument" IOErrors if it got created in "the
                # meantime" - just restart the function a few times to
                # check it all again
                pass

        LOG.debug("bcache dev %s not ready, waiting %ss", bcache_device, wait)
        time.sleep(wait)

    # we've exhausted our retries
    LOG.warning('Repetitive error registering the bcache dev %s',
                bcache_device)
    raise RuntimeError("bcache device %s can't be registered" % bcache_device)
Esempio n. 5
0
def remove_partition(devpath, retries=10):
    LOG.debug('multipath: removing multipath partition: %s', devpath)
    for _ in range(0, retries):
        util.subp(['dmsetup', 'remove', devpath], rcs=[0, 1])
        udev.udevadm_settle()
        if not os.path.exists(devpath):
            return

    util.wait_for_removal(devpath)
Esempio n. 6
0
def remove_partition(devpath, retries=10):
    """ Remove a multipath partition mapping. """
    LOG.debug('multipath: removing multipath partition: %s', devpath)
    for _ in range(0, retries):
        util.subp(['dmsetup', 'remove', '--force', '--retry', devpath])
        udev.udevadm_settle()
        if not os.path.exists(devpath):
            return

    util.wait_for_removal(devpath)
Esempio n. 7
0
def remove_map(map_id, retries=10):
    LOG.debug('multipath: removing multipath map: %s', map_id)
    devpath = '/dev/mapper/%s' % map_id
    for _ in range(0, retries):
        util.subp(['multipath', '-f', map_id], rcs=[0, 1])
        udev.udevadm_settle()
        if not os.path.exists(devpath):
            return

    util.wait_for_removal(devpath)
Esempio n. 8
0
    def connect(self):
        if self.target not in iscsiadm_sessions():
            iscsiadm_discovery(self.portal)

            iscsiadm_authenticate(self.target, self.portal, self.user,
                                  self.password, self.iuser, self.ipassword)

            iscsiadm_login(self.target, self.portal)

            udev.udevadm_settle(self.devdisk_path)

        # always set automatic mode
        iscsiadm_set_automatic(self.target, self.portal)
Esempio n. 9
0
def find_mpath_members(multipath_id, paths=None):
    """ Return a list of device path for each member of aspecified mpath_id."""
    if not paths:
        paths = show_paths()
        for retry in range(0, 5):
            orphans = [path for path in paths if 'orphan' in path['multipath']]
            if len(orphans):
                udev.udevadm_settle()
                paths = show_paths()
            else:
                break

    members = [
        '/dev/' + path['device'] for path in paths
        if path['multipath'] == multipath_id
    ]
    return members
Esempio n. 10
0
def rescan_block_devices(devices=None, warn_on_fail=True):
    """
    run 'blockdev --rereadpt' for all block devices not currently mounted
    """
    if not devices:
        unused = get_unused_blockdev_info()
        devices = []
        for devname, data in unused.items():
            if data.get('RM') == "1":
                continue
            if data.get('RO') != "0" or data.get('TYPE') != "disk":
                continue
            devices.append(data['device_path'])

    if not devices:
        LOG.debug("no devices found to rescan")
        return

    # blockdev needs /dev/ parameters, convert if needed
    cmd = ['blockdev', '--rereadpt'] + [
        dev if dev.startswith('/dev/') else sysfs_to_devpath(dev)
        for dev in devices
    ]
    try:
        util.subp(cmd, capture=True)
    except util.ProcessExecutionError as e:
        if warn_on_fail:
            # FIXME: its less than ideal to swallow this error, but until
            # we fix LP: #1489521 we kind of need to.
            LOG.warn(
                "Error rescanning devices, possibly known issue LP: #1489521")
            # Reformatting the exception output so as to not trigger
            # vmtest scanning for Unexepected errors in install logfile
            LOG.warn("cmd: %s\nstdout:%s\nstderr:%s\nexit_code:%s", e.cmd,
                     e.stdout, e.stderr, e.exit_code)

    udevadm_settle()

    return
Esempio n. 11
0
def mdadm_create(md_devname, raidlevel, devices, spares=None, container=None,
                 md_name="", metadata=None):
    LOG.debug('mdadm_create: ' +
              'md_name=%s raidlevel=%s ' % (md_devname, raidlevel) +
              ' devices=%s spares=%s name=%s' % (devices, spares, md_name))

    assert_valid_devpath(md_devname)
    if not metadata:
        metadata = 'default'

    if raidlevel not in VALID_RAID_LEVELS:
        raise ValueError('Invalid raidlevel: [{}]'.format(raidlevel))

    min_devices = md_minimum_devices(raidlevel)
    devcnt = len(devices) if not container else \
        len(md_get_devices_list(container))
    if devcnt < min_devices:
        err = 'Not enough devices (' + str(devcnt) + ') '
        err += 'for raidlevel: ' + str(raidlevel)
        err += ' minimum devices needed: ' + str(min_devices)
        raise ValueError(err)

    if spares and raidlevel not in SPARE_RAID_LEVELS:
        err = ('Raidlevel does not support spare devices: ' + str(raidlevel))
        raise ValueError(err)

    (hostname, _err) = util.subp(["hostname", "-s"], rcs=[0], capture=True)

    cmd = ["mdadm", "--create", md_devname, "--run",
           "--homehost=%s" % hostname.strip(),
           "--raid-devices=%s" % devcnt]

    if not container:
        cmd.append("--metadata=%s" % metadata)
    if raidlevel != 'container':
        cmd.append("--level=%s" % raidlevel)

    if md_name:
        cmd.append("--name=%s" % md_name)

    if container:
        cmd.append(container)

    for device in devices:
        holders = get_holders(device)
        if len(holders) > 0:
            LOG.warning('Detected holders during mdadm creation: %s', holders)
            raise OSError('Failed to remove holders from %s', device)
        zero_device(device)
        cmd.append(device)

    if spares:
        cmd.append("--spare-devices=%s" % len(spares))
        for device in spares:
            zero_device(device)
            cmd.append(device)

    # Create the raid device
    udev.udevadm_settle()
    util.subp(["udevadm", "control", "--stop-exec-queue"])
    try:
        util.subp(cmd, capture=True)
    except util.ProcessExecutionError:
        # frequent issues by modules being missing (LP: #1519470) - add debug
        LOG.debug('mdadm_create failed - extra debug regarding md modules')
        (out, _err) = util.subp(["lsmod"], capture=True)
        if not _err:
            LOG.debug('modules loaded: \n%s' % out)
        raidmodpath = '/lib/modules/%s/kernel/drivers/md' % os.uname()[2]
        (out, _err) = util.subp(["find", raidmodpath],
                                rcs=[0, 1], capture=True)
        if out:
            LOG.debug('available md modules: \n%s' % out)
        else:
            LOG.debug('no available md modules found')

        for dev in devices + spares:
            h = get_holders(dev)
            LOG.debug('Device %s has holders: %s', dev, h)
        raise

    util.subp(["udevadm", "control", "--start-exec-queue"])
    udev.udevadm_settle(exists=md_devname)
Esempio n. 12
0
def shutdown_bcache(device):
    """
    Shut down bcache for specified bcache device

    1. wipe the bcache device contents
    2. extract the cacheset uuid (if cached)
    3. extract the backing device
    4. stop cacheset (if present)
    5. stop the bcacheN device
    6. wait for removal of sysfs path to bcacheN, bcacheN/bcache and
       backing/bcache to go away
    """
    if not device.startswith('/sys/class/block'):
        raise ValueError(
            'Invalid Device (%s): '
            'Device path must start with /sys/class/block/', device)

    # bcache device removal should be fast but in an extreme
    # case, might require the cache device to flush large
    # amounts of data to a backing device.  The strategy here
    # is to wait for approximately 30 seconds but to check
    # frequently since curtin cannot proceed until devices
    # cleared.
    bcache_shutdown_message = ('shutdown_bcache running on {} has determined '
                               'that the device has already been shut down '
                               'during handling of another bcache dev. '
                               'skipping'.format(device))

    if not os.path.exists(device):
        LOG.info(bcache_shutdown_message)
        return

    LOG.info('Wiping superblock on bcache device: %s', device)
    _wipe_superblock(block.sysfs_to_devpath(device), exclusive=False)

    # collect required information before stopping bcache device
    # UUID from /sys/fs/cache/UUID
    cset_uuid = bcache.get_attached_cacheset(device)
    # /sys/class/block/vdX which is a backing dev of device (bcacheN)
    backing_sysfs = bcache.get_backing_device(block.path_to_kname(device))
    # /sys/class/block/bcacheN/bache
    bcache_sysfs = bcache.sysfs_path(device, strict=False)

    # stop cacheset if one is presennt
    if cset_uuid:
        LOG.info('%s was attached to cacheset %s, stopping cacheset', device,
                 cset_uuid)
        bcache.stop_cacheset(cset_uuid)

        # let kernel settle before the next remove
        udev.udevadm_settle()
        LOG.info('bcache cacheset stopped: %s', cset_uuid)

    # test and log whether the device paths are still present
    to_check = [bcache_sysfs, backing_sysfs]
    found_devs = [os.path.exists(p) for p in to_check]
    LOG.debug('os.path.exists on blockdevs:\n%s',
              list(zip(to_check, found_devs)))
    if not any(found_devs):
        LOG.info('bcache backing device already removed: %s (%s)',
                 bcache_sysfs, device)
        LOG.debug('bcache backing device checked: %s', backing_sysfs)
    else:
        LOG.info('stopping bcache backing device at: %s', bcache_sysfs)
        bcache.stop_device(bcache_sysfs)
    return
Esempio n. 13
0
def shutdown_bcache(device):
    """
    Shut down bcache for specified bcache device

    1. Stop the cacheset that `device` is connected to
    2. Stop the 'device'
    """
    if not device.startswith('/sys/class/block'):
        raise ValueError(
            'Invalid Device (%s): '
            'Device path must start with /sys/class/block/', device)

    LOG.info('Wiping superblock on bcache device: %s', device)
    _wipe_superblock(block.sysfs_to_devpath(device), exclusive=False)

    # bcache device removal should be fast but in an extreme
    # case, might require the cache device to flush large
    # amounts of data to a backing device.  The strategy here
    # is to wait for approximately 30 seconds but to check
    # frequently since curtin cannot proceed until devices
    # cleared.
    removal_retries = [0.2] * 150  # 30 seconds total
    bcache_shutdown_message = ('shutdown_bcache running on {} has determined '
                               'that the device has already been shut down '
                               'during handling of another bcache dev. '
                               'skipping'.format(device))

    if not os.path.exists(device):
        LOG.info(bcache_shutdown_message)
        return

    # get slaves [vdb1, vdc], allow for slaves to not have bcache dir
    slave_paths = [
        get_bcache_sys_path(k, strict=False)
        for k in os.listdir(os.path.join(device, 'slaves'))
    ]

    # stop cacheset if it exists
    bcache_cache_sysfs = get_bcache_using_dev(device, strict=False)
    if not os.path.exists(bcache_cache_sysfs):
        LOG.info('bcache cacheset already removed: %s',
                 os.path.basename(bcache_cache_sysfs))
    else:
        LOG.info('stopping bcache cacheset at: %s', bcache_cache_sysfs)
        maybe_stop_bcache_device(bcache_cache_sysfs)
        try:
            util.wait_for_removal(bcache_cache_sysfs, retries=removal_retries)
        except OSError:
            LOG.info('Failed to stop bcache cacheset %s', bcache_cache_sysfs)
            raise

        # let kernel settle before the next remove
        udev.udevadm_settle()

    # after stopping cache set, we may need to stop the device
    # both the dev and sysfs entry should be gone.

    # we know the bcacheN device is really gone when we've removed:
    #  /sys/class/block/{bcacheN}
    #  /sys/class/block/slaveN1/bcache
    #  /sys/class/block/slaveN2/bcache
    bcache_block_sysfs = get_bcache_sys_path(device, strict=False)
    to_check = [device] + slave_paths
    found_devs = [os.path.exists(p) for p in to_check]
    LOG.debug('os.path.exists on blockdevs:\n%s',
              list(zip(to_check, found_devs)))
    if not any(found_devs):
        LOG.info('bcache backing device already removed: %s (%s)',
                 bcache_block_sysfs, device)
        LOG.debug('bcache slave paths checked: %s', slave_paths)
        return
    else:
        LOG.info('stopping bcache backing device at: %s', bcache_block_sysfs)
        maybe_stop_bcache_device(bcache_block_sysfs)
        try:
            # wait for them all to go away
            for dev in [device, bcache_block_sysfs] + slave_paths:
                util.wait_for_removal(dev, retries=removal_retries)
        except OSError:
            LOG.info('Failed to stop bcache backing device %s',
                     bcache_block_sysfs)
            raise

    return
Esempio n. 14
0
def start_clear_holders_deps():
    """
    prepare system for clear holders to be able to scan old devices
    """
    # a mdadm scan has to be started in case there is a md device that needs to
    # be detected. if the scan fails, it is either because there are no mdadm
    # devices on the system, or because there is a mdadm device in a damaged
    # state that could not be started. due to the nature of mdadm tools, it is
    # difficult to know which is the case. if any errors did occur, then ignore
    # them, since no action needs to be taken if there were no mdadm devices on
    # the system, and in the case where there is some mdadm metadata on a disk,
    # but there was not enough to start the array, the call to wipe_volume on
    # all disks and partitions should be sufficient to remove the mdadm
    # metadata
    mdadm.mdadm_assemble(scan=True, ignore_errors=True)
    # collect detail on any assembling arrays
    for md in [md for md in glob.glob('/dev/md*')
               if not os.path.isdir(md) and not identify_partition(md)]:
        mdstat = None
        if os.path.exists('/proc/mdstat'):
            mdstat = util.load_file('/proc/mdstat')
            LOG.debug("/proc/mdstat:\n%s", mdstat)
            found = [line for line in mdstat.splitlines()
                     if os.path.basename(md) in line]
            # in some cases we have a /dev/md0 device node
            # but the kernel has already renamed the device /dev/md127
            if len(found) == 0:
                LOG.debug('Ignoring md device %s, not present in mdstat', md)
                continue

        # give it a second poke to encourage running
        try:
            LOG.debug('Activating mdadm array %s', md)
            (out, err) = mdadm.mdadm_run(md)
            LOG.debug('MDADM run on %s stdout:\n%s\nstderr:\n%s', md, out, err)
        except util.ProcessExecutionError:
            LOG.debug('Non-fatal error when starting mdadm device %s', md)

        # extract details if we can
        try:
            (out, err) = mdadm.mdadm_query_detail(md, export=False,
                                                  rawoutput=True)
            LOG.debug('MDADM detail on %s stdout:\n%s\nstderr:\n%s',
                      md, out, err)
        except util.ProcessExecutionError:
            LOG.debug('Non-fatal error when querying mdadm detail on %s', md)

    mp_support = multipath.multipath_supported()
    if mp_support:
        LOG.debug('Detected multipath support, reload maps')
        multipath.reload()
        multipath.force_devmapper_symlinks()

    # scan and activate for logical volumes
    lvm.lvm_scan(multipath=mp_support)
    try:
        lvm.activate_volgroups(multipath=mp_support)
    except util.ProcessExecutionError:
        # partial vg may not come up due to missing members, that's OK
        pass
    udev.udevadm_settle()

    # the bcache module needs to be present to properly detect bcache devs
    # on some systems (precise without hwe kernel) it may not be possible to
    # lad the bcache module bcause it is not present in the kernel. if this
    # happens then there is no need to halt installation, as the bcache devices
    # will never appear and will never prevent the disk from being reformatted
    util.load_kernel_module('bcache')

    if not zfs.zfs_supported():
        LOG.warning('zfs filesystem is not supported in this environment')
Esempio n. 15
0
def mdadm_create(md_devname, raidlevel, devices, spares=None, md_name=""):
    LOG.debug('mdadm_create: ' + 'md_name=%s raidlevel=%s ' %
              (md_devname, raidlevel) + ' devices=%s spares=%s name=%s' %
              (devices, spares, md_name))

    assert_valid_devpath(md_devname)

    if raidlevel not in VALID_RAID_LEVELS:
        raise ValueError('Invalid raidlevel: [{}]'.format(raidlevel))

    min_devices = md_minimum_devices(raidlevel)
    if len(devices) < min_devices:
        err = 'Not enough devices for raidlevel: ' + str(raidlevel)
        err += ' minimum devices needed: ' + str(min_devices)
        raise ValueError(err)

    if spares and raidlevel not in SPARE_RAID_LEVELS:
        err = ('Raidlevel does not support spare devices: ' + str(raidlevel))
        raise ValueError(err)

    (hostname, _err) = util.subp(["hostname", "-s"], rcs=[0], capture=True)

    cmd = [
        "mdadm", "--create", md_devname, "--run",
        "--homehost=%s" % hostname.strip(),
        "--level=%s" % raidlevel,
        "--raid-devices=%s" % len(devices)
    ]
    if md_name:
        cmd.append("--name=%s" % md_name)

    for device in devices:
        # mdadm can be sticky, double check
        holders = get_holders(device)
        if len(holders) > 0:
            LOG.warning('Detected holders during mdadm creation: %s', holders)
            raise OSError('Failed to remove holders from %s', device)
        # Zero out device superblock just in case device has been used for raid
        # before, as this will cause many issues
        util.subp(["mdadm", "--zero-superblock", device], capture=True)
        cmd.append(device)

    if spares:
        cmd.append("--spare-devices=%s" % len(spares))
        for device in spares:
            util.subp(["mdadm", "--zero-superblock", device], capture=True)
            cmd.append(device)

    # Create the raid device
    udev.udevadm_settle()
    util.subp(["udevadm", "control", "--stop-exec-queue"])
    try:
        util.subp(cmd, capture=True)
    except util.ProcessExecutionError:
        # frequent issues by modules being missing (LP: #1519470) - add debug
        LOG.debug('mdadm_create failed - extra debug regarding md modules')
        (out, _err) = util.subp(["lsmod"], capture=True)
        if not _err:
            LOG.debug('modules loaded: \n%s' % out)
        raidmodpath = '/lib/modules/%s/kernel/drivers/md' % os.uname()[2]
        (out, _err) = util.subp(["find", raidmodpath],
                                rcs=[0, 1],
                                capture=True)
        if out:
            LOG.debug('available md modules: \n%s' % out)
        else:
            LOG.debug('no available md modules found')

        for dev in devices + spares:
            h = get_holders(dev)
            LOG.debug('Device %s has holders: %s', dev, h)
        raise

    util.subp(["udevadm", "control", "--start-exec-queue"])
    udev.udevadm_settle(exists=md_devname)