Beispiel #1
0
def _delete(cmd):
    """
    Deletes a RAID array as specified in the passed command

    @type  cmd: dict
    @param cmd: command to be performed

    @rtype: None
    @returns: nothing
    """
    # report the operation
    llecho('Deleting RAID array /dev/%(name)s' % cmd)

    # umount the raid partition
    run(CMD_UMOUNT_RAID % cmd)

    # failed stopping the array: report and exit
    status = run(CMD_STOP_RAID % cmd)

    if status != 0:
        llecho('Error: cannot stop the array')
        sys.exit(1)

    # failed zeroing the superblock of the raid devices: report and exit
    status = run(CMD_ZERO_RAID % {
        'devices': ' '.join(['/dev/%s' % d for d in cmd['devices']]),
    })

    if status != 0:
        llecho('Error: cannot zero the superblocks of the RAID devices')
        sys.exit(1)
Beispiel #2
0
    def __clearDisks(self, hasMultipath):
        """
        Clears all disks of the system. This cleaning will erase ALL DATA of the
        disks. There is no step back.

        @rtype: None
        @return: Nothing
        """
        try:

            # bug 109358: try to stop all raid partitions. it should be done first
            # in a scenario where there is a LVM on top of a SW RAID. However, in
            # a opposite way (RAID on top of LVM) it will fail. So we just let pass
            # any exception from sw raid.
            try:
                raid.stop()

            except:
                pass

            # delete LVM entities before partitioning
            self.__logger.info('Deleting LVM entities...')
            self.__logger.debug('PV Delete List: %s' % str(self.__pvDeleteList))
            self.__logger.debug('VG Delete List: %s' % str(self.__vgDeleteList))
            self.__logger.debug('LV Delete List: %s' % str(self.__lvDeleteList))

            lvm.delLvmEntities(self.__pvDeleteList,
                               self.__vgDeleteList,
                               self.__lvDeleteList)

            # stop LVM volume groups to avoid problems with disks partitioning
            self.__logger.info('Stopping LVM...')
            lvm.stop()

            # Bug 109358: if SW RAID is on top a LVM we can stop it safetly now
            # because LVM was just stopped above. If any problem happens from now
            # on we need to let the exception raises above.
            raid.stop()

        except PKVMError as e:
            raise

        except Exception as e:
            self.__logger.critical("Unexpected error")
            self.__logger.critical("EXCEPTION:" + str(type(e)))
            self.__logger.critical(str(e))
            self.__logger.critical("Stacktrace:" + str(traceback.format_exc()))
            raise PKVMError("PARTITIONER", "ERROR", "ERROR")

        # perform custom setup for multipath devices
        self.__logger.info('Performing custom multipath configuration...')
        self.__logger.debug('Has multipath = %s' % str(hasMultipath))
        tolerantMode = multipath.setup(hasMultipath,
                                       bool(len(self.__lvmCommands)),
                                       bool(len(self.__raidCommands)),
                                       self.__tolerantMode)

        # wait for udev to handle all events
        run(CMD_UDEV_SETTLE)
Beispiel #3
0
def createPartitions(diskCommands, hasMultipath, sector_size):
    """
    Creates all partitions.

    @type  diskCommands: dict
    @param diskCommands: abstract commands for conventional disks

    @type  hasMultipath: bool
    @param hasMultipath: info about multipath on machine

    @rtype: None
    @returns: Nothing
    """
    # report the operation
    llecho('Creating Partitions')

    # create partitions
    for cmd in diskCommands:

        # command is not create: do nothing
        if cmd['command'] != 'create:partition':
            continue

        # remove any possible LVM garbage from the PVs partition
        llecho('Clear partitions before creating')
        run('dd if=/dev/zero of=%s bs=%d seek=%s count=2048' % (cmd['disk_name'], sector_size, cmd['start']))

        # command is create: report operation and run it
        llecho('Creating partition /dev/%(name)s' % cmd)

        # get parameters to create partition
        cmd['type'] = TYPE[cmd['type']]
        partedCommand = CMD_CREATE_PARTITION % cmd
        disk = cmd['disk_name']
        errorMessage = ERROR_CREATE % cmd

        # create partition
        _runPartedCommand(partedCommand, disk, errorMessage, hasMultipath)

        # partition is PReP: set as bootable
        if cmd['fs'] == 'prep':
            _setFlag(cmd, 'boot', hasMultipath)

        # partition is PReP, RAID or LVM: set respective flag
        if cmd['fs'] in ['prep', 'raid', 'lvm']:
            _setFlag(cmd, cmd['fs'], hasMultipath)

        if cmd['fs'] == 'extended':
            continue
Beispiel #4
0
def stop():
    """
    Stops all RAID arrays so that all resources (partitions) are released and
    disks partitioning can be done

    FIXME: There is a know bug (73870) that happens when installing an
    automatic partitioning scheme after a previous RAID parition in multiple
    disks. It was added a loop with a sleep trying to stop until 10 times if
    it fails. A better solution should be investigated here.

    @rtype: None
    @returns: nothing
    """
    # report operation
    llecho('Stopping all RAID arrays')

    # stop the raid device
    status = 0

    for i in range(0, 10):
        # run the command to stop raid
        status = run(CMD_STOP_MDADM)

        # raid stopped successfully: quit
        if status == 0:
            break

        # wait 1 second before another try
        time.sleep(1)

    # problems to stop raid: exit -1
    if status != 0:
        llecho('Error: cannot stop RAID arrays')
        raise PKVMError('PARTITIONER', 'RAID', 'STOP_SWRAID')
Beispiel #5
0
def _setPartType(device):
    """
    Sets the partition type of the passed device as linux_raid_auto

    @type  devices: basestring
    @param devices: device whose partition type is to be set

    @rtype: None
    @returns: nothing
    """
    # report the operation
    llecho('Setting the partition type of %s as linux_raid_auto' % device)

    # not a valid device name: error
    match = PATTERN_PART.match(device)

    if match == None:
        llecho('Error: cannot parse %s into a device name '
              'and partition number' % device)
        sys.exit(1)

    # get device name and partition number
    info = match.groupdict()

    # cannot set partition type: error
    status = run(CMD_SET_PART_TYPE % info)

    if status != 0:
        llecho('Error: cannot set the partition type of '
              '/dev/%(device)s Id %(number)s' % info)
        sys.exit(1)
Beispiel #6
0
def _create(cmd):
    """
    Creates a RAID array as specified in the passed command

    @type  cmd: dict
    @param cmd: command to be performed

    @rtype: None
    @returns: nothing
    """
    # report the operation
    llecho('Creating RAID level %(level)d array /dev/%(name)s from %(devices)s' % cmd)

    # FIXME: stop raid before any attempt to create an array to assure
    # that there will not be any blocked device causing error
    stop()

    # build command line to be used to create the array
    cmdLine = CMD_CREATE_RAID[cmd['level']] % {
        'name': cmd['name'],
        'level': cmd['level'],
        'chunkSize': cmd['chunkSize'],
        'nDevices': len(cmd['devices']) - cmd['spares'],
        'nSpares': cmd['spares'],
        'devices': ' '.join(['/dev/%s' % d for d in cmd['devices']]),
        'metadata': cmd['metadata'],
    }

    # failed creating the array: exit
    status = run(cmdLine)

    if status != 0:
        llecho('Error: cannot create the array')
        sys.exit(1)

    # FIXME: start raid again after the command was performed
    # successfully
    start()

    # set the partition type of each device as FD
    for device in cmd['devices']:
        _setPartType(device)

    # reiserfs type not chosen on a RAID 0: make a filesystem on this array
    if cmd['fileSystem'] not in ['reiserfs', 'swap']:
        llecho('Creating filesystem of type %(fileSystem)s on /dev/%(name)s - RAID %(level)d' % cmd)
        run('mkfs\.%(fileSystem)s /dev/%(name)s' % cmd)
Beispiel #7
0
    def resetRootDevice(self):
        """
        Formats root device (according LVM default scheme) and adjusts all
        pointers to allow reinstall the system correctly.

        Important: this method is directly related to LVM default partitioning
        scheme. It assumes that root device is under /dev/mapper/vg_root-lv_root
        path. If this default scheme changes on future, this method must be
        revisited to assure its functionality.

        @rtype: bool
        @return: True if everything is ok, False otherwise
        """
        # restart LVM volume groups to assure its functionality
        self.__logger.info('Restarting LVM...')
        lvm.stop()
        lvm.start(self.__tolerantMode)
        
        # wait for udev to handle all events
        if run(CMD_UDEV_SETTLE) != 0:
            raise RuntimeError('Error: udevadm settle failure')

        # Do not trust content from / partition.  User can screw up
        # with its / partition and try a reinstall to fix it.  Thus
        # our code cannot trust on reading content from old and dirty
        # / partition.  We can just infere /boot partition by
        # appending 2 to the detected disk.
        installed_disk = self.getPreviousInstalledDisk()
        boot_partition = self.genBootPartitionName(installed_disk)
        self.__bootDevice = boot_partition

        # check multipath
        self.__hasMultipath = self.__diskParameters[installed_disk.split('/')[-1]]['mpath']

        # as consequence, configure prep device path
        self.__prepDevice = self.__bootDevice[:-1] + "1"

        # update root, log, data and swap paths
        self.__rootDevice = '/dev/mapper/%s-%s' % (VGROOT, LVROOT)
        self.__logDevice  = '/dev/mapper/%s-%s' % (VGLOG, LVLOG)
        self.__dataDevice = '/dev/mapper/%s-%s' % (VGDATA, LVDATA)
        self.__swapDevice = '/dev/mapper/%s-%s' % (VGSWAP, LVSWAP)

        self.__logger.debug("resetRootDevice(): __prepDevice = %s" % self.__prepDevice)
        self.__logger.debug("resetRootDevice(): __bootDevice = %s" % self.__bootDevice)
        self.__logger.debug("resetRootDevice(): __rootDevice = %s" % self.__rootDevice)
        self.__logger.debug("resetRootDevice(): __logDevice = %s" % self.__logDevice)
        self.__logger.debug("resetRootDevice(): __dataDevice = %s" % self.__dataDevice)
        self.__logger.debug("resetRootDevice(): __swapDevice = %s" % self.__swapDevice)

        # format boot, root and swap devices
        formatPartition(self.__bootDevice)
        formatPartition(self.__rootDevice)
        formatSwapPartition(self.__swapDevice)

        return True
Beispiel #8
0
    def __postCleanup(self, selected_disk=None, sector_size=512):
        """
        Remove /boot partitions from previous installations of PowerKVM.

        @rtype: None
        @return: Nothing
        """
        self.__logger.info("Cleaning previous PowerKVM /boot partitions...")

        # adjust disk name
        if 'mpath' in selected_disk:
            selected_disk = '/dev/mapper/%s' % selected_disk
        else:
            selected_disk = '/dev/%s' % selected_disk
        self.__logger.debug("selected_disk = %s" % selected_disk)

        if self.detectedPreviousInstall() == True:

            for part in self.__prevInstalls['pkvm_boot_parts']:

                # strip numbers from partition to obtain disk name
                diskname = ''.join([c for c in part if not c.isdigit()])

                self.__logger.debug("part = %s" % part)
                self.__logger.debug("diskname = %s" % diskname)

                if diskname == selected_disk:
                    self.__logger.info("Disk %s was already partitioned, ignoring it" % selected_disk)
                    continue

                if sector_size == 512:
                    # after deleting all partitions clear the partition table
                    cmd = CMD_WIPEFS % part
                    self.__logger.debug("Running %s" % cmd)
                    run(cmd)

                # wipefs does not consider sector size to remove GPT partition
                # signature. This hack should be here until we have the patch
                # http://www.spinics.net/lists/util-linux-ng/msg09932.html upstream
                else:
                    self.__logger.debug("4K disks wipefs issue, using dd")
                    run('dd if=/dev/zero of=%s bs=%d count=1000' % (part, sector_size))
Beispiel #9
0
def start():
    """
    Activate all existing RAID arrays so that RAID operations can be performed

    @rtype: None
    @returns: nothing
    """
    # report operation
    llecho('Activating all existing RAID arrays')

    # failed creating the array: exit
    status = run(CMD_START_MDADM)

    if status != 0 and status != 1:
        llecho('Error: cannot activate RAID arrays')
        sys.exit(1)
Beispiel #10
0
def _fixDiskLabel(device):
    """
    Fixes disk label so it can be properly partitioned afterwards.

    @type  device: basestring
    @param device: passed device (e.g. '/dev/sda', '/dev/sdb', ...)

    @rtype: None
    @returns: Nothing
    """
    # disk label could not be fixed: report error
    status = run(CMD_FIX_DISK_LABEL % {'disk': device})

    if status != 0:
        llecho(ERROR_FIX % device)
        sys.exit(1)

    # partition table was reset: report operation
    llecho('Partition table for %s was reset' % device)
Beispiel #11
0
def _reReadPartitionTable(disk, hasMultipath = False):
    """
    Asks the kernel to re-read the partition table before trying
    to format it

    @type  disk: basestring
    @param disk: disk device name

    @type  hasMultipath: bool
    @param hasMultipath: info about multipath on machine

    @rtype: bool
    @return: True if partition table sync successfull. False otherwise
    """
    # give some opportunities to sync the partition table before
    # returning false
    for i in range(1, MAX_SYNC_ATTEMPTS):

        # log the number of attempts
        llecho('Re-reading partition table for %s (try %d)' % (disk, i))

        # wait 1 second
        time.sleep(1)

        # FIXME: during the attempt to read the partitions, raid should
        # be inactive or it will block devices belonging to its array
        # and will make the next command to fail. It is not clear why
        # raid becomes active here since is has been stopped in
        # manage_parts. It demands further investigation.
        if hasMultipath:
            raid.stop()

        # partition table re-read successfully: return success
        if run(CMD_HDPARM_Z % disk) == 0:
            return True

    return False
Beispiel #12
0
    def __createConventionalPartitions(self, hasMultipath, sector_size):
        """
        Calls methods to create conventional partitions on system. Creates LVM
        partitions to allow to expand the system later.

        @rtype: None
        @return: Nothing
        """
        diskpath = ''
        if 'mpath' in self.__disk:
            diskpath = '/dev/mapper/%s' % self.__disk
        else:
            diskpath = '/dev/%s' % self.__disk

        try:
            if sector_size == 512:
                # after deleting all partitions clear the partition table
                self.__logger.debug("Wipefs on %s" % diskpath)
                run('wipefs -f -a %s' % diskpath)

            # wipefs does not consider sector size to remove GPT partition
            # signature. This hack should be here until we have the patch
            # http://www.spinics.net/lists/util-linux-ng/msg09932.html upstream
            else:
                self.__logger.debug("4K disks wipefs issue, using dd")
                run('dd if=/dev/zero of=%s bs=%d count=10000' % (diskpath, sector_size))

            # ensure disks have valid partition tables
            self.__logger.info('Fixing partition tables...')
            conventional.fixPartitionTables(self.__diskCommands)

            # log disks state
            status, output = conventional.logPartitioningScheme()
            self.__logger.debug('Partition scheme:\n\n%s\n\n' % output)

            # delete partitions
            self.__logger.info('Deleting partitions...')
            conventional.deletePartitions(self.__diskCommands, hasMultipath, True)

            # log disks state
            status, output = conventional.logPartitioningScheme()
            self.__logger.debug('Partition scheme:\n\n%s\n\n' % output)

        except Exception as e:
            self.__logger.critical("Unexpected error")
            self.__logger.critical("EXCEPTION:" + str(type(e)))
            self.__logger.critical(str(e))
            self.__logger.critical("Stacktrace:" + str(traceback.format_exc()))
            raise PKVMError("PARTITIONER", "CONVENTIONAL", "DELETE_PARTITIONS")

        try:
            # create partitions
            self.__logger.info('Creating partitions...')
            conventional.createPartitions(self.__diskCommands, hasMultipath, sector_size)

            # log disks state
            status, output = conventional.logPartitioningScheme()
            self.__logger.debug('Partition scheme:\n\n%s\n\n' % output)

            # wait for udev to handle all events
            run(CMD_UDEV_SETTLE)

        except Exception as e:
            self.__logger.critical("Unexpected error")
            self.__logger.critical("EXCEPTION:" + str(type(e)))
            self.__logger.critical(str(e))
            self.__logger.critical("Stacktrace:" + str(traceback.format_exc()))
            raise PKVMError("PARTITIONER", "CONVENTIONAL", "CREATE_PARTITIONS")