Exemplo n.º 1
0
def md_check_devices(md_devname, devices):
    if not devices or len(devices) == 0:
        raise ValueError('Cannot verify raid array with empty device list')

    # collect and compare raid devices based on md name versus
    # expected device list.
    #
    # NB: In some cases, a device might report as a spare until
    #     md has finished syncing it into the array.  Currently
    #     we fail the check since the specified raid device is not
    #     yet in its proper role.  Callers can check mdadm_sync_action
    #     state to see if the array is currently recovering, which would
    #     explain the failure.  Also  mdadm_degraded will indicate if the
    #     raid is currently degraded or not, which would also explain the
    #     failure.
    md_raid_devices = md_get_devices_list(md_devname)
    LOG.debug('md_check_devices: md_raid_devs: ' + str(md_raid_devices))
    _compare_devlist(devices, md_raid_devices)
Exemplo n.º 2
0
def mdadm_create(md_devname, raidlevel, devices, spares=None, container=None,
                 md_name="", metadata=None):
    LOG.debug('mdadm_create: ' +
              'md_name=%s raidlevel=%s ' % (md_devname, raidlevel) +
              ' devices=%s spares=%s name=%s' % (devices, spares, md_name))

    assert_valid_devpath(md_devname)
    if not metadata:
        metadata = 'default'

    if raidlevel not in VALID_RAID_LEVELS:
        raise ValueError('Invalid raidlevel: [{}]'.format(raidlevel))

    min_devices = md_minimum_devices(raidlevel)
    devcnt = len(devices) if not container else \
        len(md_get_devices_list(container))
    if devcnt < min_devices:
        err = 'Not enough devices (' + str(devcnt) + ') '
        err += 'for raidlevel: ' + str(raidlevel)
        err += ' minimum devices needed: ' + str(min_devices)
        raise ValueError(err)

    if spares and raidlevel not in SPARE_RAID_LEVELS:
        err = ('Raidlevel does not support spare devices: ' + str(raidlevel))
        raise ValueError(err)

    (hostname, _err) = util.subp(["hostname", "-s"], rcs=[0], capture=True)

    cmd = ["mdadm", "--create", md_devname, "--run",
           "--homehost=%s" % hostname.strip(),
           "--raid-devices=%s" % devcnt]

    if not container:
        cmd.append("--metadata=%s" % metadata)
    if raidlevel != 'container':
        cmd.append("--level=%s" % raidlevel)

    if md_name:
        cmd.append("--name=%s" % md_name)

    if container:
        cmd.append(container)

    for device in devices:
        holders = get_holders(device)
        if len(holders) > 0:
            LOG.warning('Detected holders during mdadm creation: %s', holders)
            raise OSError('Failed to remove holders from %s', device)
        zero_device(device)
        cmd.append(device)

    if spares:
        cmd.append("--spare-devices=%s" % len(spares))
        for device in spares:
            zero_device(device)
            cmd.append(device)

    # Create the raid device
    udev.udevadm_settle()
    util.subp(["udevadm", "control", "--stop-exec-queue"])
    try:
        util.subp(cmd, capture=True)
    except util.ProcessExecutionError:
        # frequent issues by modules being missing (LP: #1519470) - add debug
        LOG.debug('mdadm_create failed - extra debug regarding md modules')
        (out, _err) = util.subp(["lsmod"], capture=True)
        if not _err:
            LOG.debug('modules loaded: \n%s' % out)
        raidmodpath = '/lib/modules/%s/kernel/drivers/md' % os.uname()[2]
        (out, _err) = util.subp(["find", raidmodpath],
                                rcs=[0, 1], capture=True)
        if out:
            LOG.debug('available md modules: \n%s' % out)
        else:
            LOG.debug('no available md modules found')

        for dev in devices + spares:
            h = get_holders(dev)
            LOG.debug('Device %s has holders: %s', dev, h)
        raise

    util.subp(["udevadm", "control", "--start-exec-queue"])
    udev.udevadm_settle(exists=md_devname)