def prepare_bluestore(device_name, journal=None):
    logger.info('Start prepare bluestore OSD : ' + device_name)
    journal_part_name = ""
    disk_name = ""
    osd_id = ""

    # Check if device is physical or logical :
    lv = lvm_lib.is_lv(device_name)

    if not lv:
        # Create partition with full disk size
        disk_name = device_name
        osd_part_num = ceph_disk.create_osd_partition(device_name)
        osd_part_name = ceph_disk.get_partition_name(device_name, osd_part_num)
        osd_part_name = "/dev/" + osd_part_name

    else:
        osd_part_name = device_name
        # Get the origin disk of volume group to be used in OSD activation :
        vg_name = device_name.split("/")[0]

        # Get the OSD id from VG name :
        osd_id = vg_name.split(".")[1]
        logger.info("OSD_ID = {}".format(osd_id))

        physical_list = lvm_lib.get_physical_disks(vg_name)
        main_devices = list(physical_list["main"])

        if len(main_devices) > 0:
            part_name = main_devices[0]
            partition_name = ceph_disk.get_dev_name(part_name)
            disk_name = ceph_disk.get_device_name(partition_name)

    cmd = 'ceph-volume --log-path ' + CEPH_VOLUME_LOG_PATH + ' lvm prepare '
    cmd += ' --bluestore --data ' + osd_part_name

    if len(osd_id) > 0:
        cmd += ' --osd-id ' + osd_id

    if journal is not None and 0 < len(journal):
        journal_part_name = get_external_journal(journal)
        if journal_part_name is None:
            return False
        cmd += ' --block.db /dev/' + journal_part_name

    logger.info('Starting : ' + cmd)
    if not call_cmd_2(cmd):
        logger.error('Error executing : ' + cmd)
        # After the failure of adding OSD , change ptype of journal partition to "journal_avail_ptype" :
        if journal_part_name != "":
            set_journal_part_avail(journal_part_name)
        return False

    # Activating OSD :
    activate_osd(disk_name)
    return True
def get_disk_list():
    ceph_volume_disks = get_ceph_volumes_info()
    ceph_disk_list = get_ceph_disk_list()
    linked_osds_to_journal = {}

    linked_osds_to_cache = {}

    if len(ceph_volume_disks) == 0:
        return ceph_disk_list

    for disk, disk_info in ceph_volume_disks.iteritems():
        if len(disk_info.linked_journal) > 0:
            if disk_info.linked_journal in linked_osds_to_journal.keys():
                linked_osds_to_journal[disk_info.linked_journal].append(disk)
            else:
                linked_osds_to_journal.update(
                    {disk_info.linked_journal: [disk]})

        if len(disk_info.linked_cache) > 0:
            for cache in disk_info.linked_cache:
                cache = '/dev/' + cache
                partition_name = ceph_disk.get_dev_name(cache)
                cache_name = ceph_disk.get_device_name(partition_name)

                if cache_name in linked_osds_to_cache.keys():
                    linked_osds_to_cache[cache_name].append(disk)
                else:
                    linked_osds_to_cache.update({cache_name: [disk]})

    for disk in ceph_disk_list:
        if disk.name in ceph_volume_disks.keys():
            disk.osd_uuid = ceph_volume_disks[disk.name].osd_uuid
            disk.osd_id = ceph_volume_disks[disk.name].osd_id
            disk.linked_journal = ceph_volume_disks[disk.name].linked_journal
            disk.linked_journal_part_num = ceph_volume_disks[
                disk.name].linked_journal_part_num
            disk.linked_cache = ceph_volume_disks[disk.name].linked_cache
            disk.linked_cache_part_num = ceph_volume_disks[
                disk.name].linked_cache_part_num
            disk.vg_name = ceph_volume_disks[disk.name].vg_name
            disk.lv_name = ceph_volume_disks[disk.name].lv_name
            disk.usage = 0
        if disk.usage == 3 and disk.name in linked_osds_to_journal.keys():
            for osd in linked_osds_to_journal[disk.name]:
                if osd not in disk.linked_osds:
                    disk.linked_osds.append(osd)

        if disk.usage == 4 and disk.name in linked_osds_to_cache.keys():
            for osd in linked_osds_to_cache[disk.name]:
                if osd not in disk.linked_osds:
                    disk.linked_osds.append(osd)

    disk_list = ceph_disk_list

    return disk_list
def prepare_filestore(device_name, journal=None):
    # clean_disk(disk_name)
    logger.info('Start prepare filestore OSD : ' + device_name)
    journal_part_name = ""
    journal_option = ""
    disk_name = ""
    journal_path = '/dev/' + journal

    if ceph_disk.is_partition(journal_path):
        journal_option = ' --journal /dev/' + journal
    else:
        if journal is not None and 0 < len(journal):
            journal_part_name = get_external_journal(journal)
            if journal_part_name is None:
                return False
            journal_option = ' --journal /dev/' + journal_part_name

    # Check if device is physical or logical :
    lv = lvm_lib.is_lv(device_name)
    if not lv:
        # create partition with full disk size
        disk_name = device_name
        osd_part_num = ceph_disk.create_osd_partition(device_name)
        osd_part_name = ceph_disk.get_partition_name(device_name, osd_part_num)
        osd_part_name = "/dev/" + osd_part_name

    else:
        osd_part_name = device_name
        # Get the origin disk of volume group to be used in OSD activation :
        vg_name = device_name.split("/")[0]
        physical_list = lvm_lib.get_physical_disks(vg_name)
        main_devices = list(physical_list["main"])

        if len(main_devices) > 0:
            part_name = main_devices[0]
            partition_name = ceph_disk.get_dev_name(part_name)
            disk_name = ceph_disk.get_device_name(partition_name)

    cmd = 'ceph-volume --log-path ' + CEPH_VOLUME_LOG_PATH + ' lvm prepare '
    cmd += ' --filestore --data ' + osd_part_name + journal_option
    logger.info('Starting : ' + cmd)

    if not call_cmd_2(cmd):
        logger.error('Error executing : ' + cmd)
        # after add osd fail change ptype of journal partition to journal_avail_ptype
        if journal_part_name != "":
            set_journal_part_avail(journal_part_name)
        return False

    # Activating OSD :
    activate_osd(disk_name, True)
    return True
def get_ceph_volumes_info():
    ceph_volumes_disks = {}
    cluster_fsid = ''
    vg_name = ""
    partitions_uuid = {}
    try:
        cluster_fsid = ceph_disk.get_fsid(configuration().get_cluster_name())
        partitions_uuid = ceph_disk.get_partitions_uuid()
    except Exception as e:
        logger.error(e)
        pass
    cmd = 'ceph-volume --log-path ' + CEPH_VOLUME_LOG_PATH + ' lvm list --format json'
    ret, stdout, stderr = exec_command_ex(cmd)
    if ret != 0:
        if stderr:
            logger.error(stderr)
    if len(stdout) > 0:
        ceph_volumes_info = json.loads(stdout)
        for osd_id, osd_info in ceph_volumes_info.iteritems():
            try:
                ceph_volume_disk_info = CephVolumeInfo()
                fsid = ''
                osd_name = ''
                for element in osd_info:
                    if element['type'] == 'block' or element['type'] == 'data':
                        fsid = element['tags']['ceph.cluster_fsid']
                        if len(fsid) > 0 and fsid != cluster_fsid:
                            continue
                        # if not ['tags']['ceph.cluster_fsid'] or element['tags']['ceph.cluster_fsid'] != cluster_fsid:
                        ceph_volume_disk_info.osd_id = osd_id
                        ceph_volume_disk_info.osd_uuid = element['tags'][
                            'ceph.osd_fsid']
                        if len(element['devices']) > 0:
                            for device in element['devices']:
                                part_name = ceph_disk.get_dev_name(device)
                                osd_name = ceph_disk.get_device_name(part_name)
                                ceph_volume_disk_info.devices.append(osd_name)

                        # if there is no devices (physical disks) exists
                        # get them from get_physical_disks function by volume group name

                        else:
                            vg_name = element['vg_name']
                            lv_name = element['lv_name']
                            ceph_volume_disk_info.lv_name = lv_name
                            ceph_volume_disk_info.vg_name = vg_name
                            physical_list = lvm_lib.get_physical_disks(vg_name)
                            main_devices = list(physical_list["main"])
                            writecache_devices = list(
                                physical_list["writecache"])
                            cache_devices = list(physical_list["dmcache"])

                            if len(main_devices) > 0:
                                for main_dev in main_devices:
                                    main_part_name = ceph_disk.get_dev_name(
                                        main_dev)
                                    main_dev_name = ceph_disk.get_device_name(
                                        main_part_name)
                                    ceph_volume_disk_info.devices.append(
                                        main_dev_name)
                            if len(writecache_devices) > 0:
                                for wcache in writecache_devices:
                                    wcache_partition_name = ceph_disk.get_dev_name(
                                        wcache)
                                    ceph_volume_disk_info.linked_cache_part_num.append(
                                        ceph_disk.get_partition_num(
                                            wcache_partition_name))
                                    ceph_volume_disk_info.linked_cache.append(
                                        wcache_partition_name)
                            elif len(cache_devices) > 0:
                                for cache in cache_devices:
                                    cache_partition_name = ceph_disk.get_dev_name(
                                        cache)
                                    ceph_volume_disk_info.linked_cache_part_num.append(
                                        ceph_disk.get_partition_num(
                                            cache_partition_name))
                                    ceph_volume_disk_info.linked_cache.append(
                                        cache_partition_name)

                        journal_path = ""

                        # if 'ceph.journal_device' in element['tags']:
                        #     journal_path = element['tags']['ceph.journal_device']
                        # if 'ceph.db_device' in element['tags']:
                        #     journal_path = element['tags']['ceph.db_device']
                        uuid = ""

                        # for filestore :
                        if 'ceph.journal_uuid' in element['tags']:
                            uuid = element['tags']['ceph.journal_uuid']

                        # for bluestore :
                        if 'ceph.db_uuid' in element['tags']:
                            uuid = element['tags']['ceph.db_uuid']
                        if len(uuid) > 0 and uuid in partitions_uuid:
                            journal_path = partitions_uuid[uuid]
                        if len(journal_path) > 0:
                            try:
                                if ceph_disk.is_partition(journal_path):
                                    journal_name = get_disk_by_partition(
                                        journal_path)
                                    journal_partition_name = ceph_disk.get_dev_name(
                                        journal_path)
                                    ceph_volume_disk_info.linked_journal_part_num = ceph_disk.get_partition_num(
                                        journal_partition_name)
                                    if len(osd_name
                                           ) > 0 and osd_name in journal_name:
                                        continue
                                    ceph_volume_disk_info.linked_journal = journal_name
                            except Exception as ex:
                                logger.error(ex)
                                continue
            except Exception as e:
                logger.exception(e)
                continue

            for device in ceph_volume_disk_info.devices:
                ceph_volumes_disks.update({device: ceph_volume_disk_info})

    return ceph_volumes_disks
def get_disk_by_partition(partition_path):
    partition_name = ceph_disk.get_dev_name(partition_path)
    disk_name = ceph_disk.get_device_name(partition_name)
    return disk_name
def get_ceph_disk_list():
    disk_info_list = []

    # read fsid for our cluster from config file
    fsid = None
    try:
        fsid = ceph_disk.get_fsid(configuration().get_cluster_name())
    except Exception as e:
        pass

    journal_linked_osds = {}

    counter = 0

    while True:
        try:
            ceph_disk_list_devs = ceph_disk.list_devices()
            break
        except Exception as e:
            if counter == 120:
                return disk_info_list
            counter += 1
            logger.error(e)
            time.sleep(1)

    for device in ceph_disk_list_devs:

        no_of_partitions = 0
        no_of_availabe_partitions = 0

        path = device['path']
        name = ceph_disk.get_dev_name(path)

        # check for disk block devices
        if not name.startswith('sd') and not name.startswith(
                'xvd') and not name.startswith('nvme'):
            continue

        di = DiskInfo()
        disk_info_list.append(di)
        di.name = name
        di.usage = DiskUsage.no

        # check if disk is not partitioned
        if 'partitions' not in device:
            continue

        old_osd = False
        # first check for OSD partitions
        for partition in device['partitions']:
            if partition['ptype'] == ptype_osd and 'whoami' in partition:
                if fsid and partition['ceph_fsid'] == fsid:
                    di.usage = DiskUsage.osd
                    di.osd_id = partition['whoami']
                    di.osd_uuid = partition['uuid']

                    if 'journal_dev' in partition:
                        journal = partition['journal_dev']
                        journal_disk = get_disk_by_partition(journal)
                        if journal_disk != name:
                            di.linked_journal = journal_disk
                            if journal_disk not in journal_linked_osds:
                                journal_linked_osds[journal_disk] = []
                            journal_linked_osds[journal_disk].append(di.name)

                    if 'block.db_dev' in partition:
                        journal = partition['block.db_dev']
                        journal_disk = get_disk_by_partition(journal)
                        if journal_disk != name:
                            di.linked_journal = journal_disk
                            if journal_disk not in journal_linked_osds:
                                journal_linked_osds[journal_disk] = []
                            journal_linked_osds[journal_disk].append(di.name)

                    # do not check further partitons
                    break
                else:
                    old_osd = True

        if di.usage == DiskUsage.osd:
            # go to next disk
            continue

        # check for  journal disk
        if not old_osd:
            no_of_partitions = len(device['partitions'])
            for partition in device['partitions']:
                if partition['ptype'] == ptype_journal or partition[
                        'ptype'] == ptype_blockdb or partition[
                            'ptype'] == journal_avail_ptype:
                    di.usage = DiskUsage.journal

                    if partition['ptype'] == journal_avail_ptype:
                        no_of_availabe_partitions += 1
                    """
                    if 'journal_for' in partition:
                        journal_for = partition['journal_for']
                        journal_for_disk = get_disk_by_partition(journal_for)
                        di.linked_osds.append(journal_for_disk)
                    """
                # check for cache partition
                if partition['ptype'] == cache_used_ptype or partition[
                        'ptype'] == cache_avail_ptype:
                    di.usage = DiskUsage.cache

                    if partition['ptype'] == cache_avail_ptype:
                        no_of_availabe_partitions += 1

        if di.usage == DiskUsage.journal or di.usage == DiskUsage.cache:
            if di.usage == DiskUsage.cache and no_of_partitions > 0:
                di.no_of_partitions = no_of_partitions
                di.no_available_partitions = no_of_availabe_partitions
            # go to next disk
            continue

        # check for mounted partitions
        for partition in device['partitions']:
            if 'mount' in partition:
                mount_path = partition['mount']
                if mount_path is not None and 0 < len(mount_path):
                    di.usage = DiskUsage.mounted
                    # check for system disk
                    if mount_path == '/':
                        di.usage = DiskUsage.system
                        break

    for di in disk_info_list:
        if di.usage == DiskUsage.journal and di.name in journal_linked_osds:
            di.linked_osds = journal_linked_osds[di.name]

    return disk_info_list
Пример #7
0
    def zap_disk(self, disk_name):
        """
        Any whole (raw) device passed in as input will be zapped here.
        Device example: /dev/sda
        Requirements: None.
        """
        for device in ceph_disk.list_devices():
            device_path = device['path']
            name = ceph_disk.get_dev_name(device_path)

            if name != disk_name:
                continue

            if 'partitions' in device:
                for partition in device['partitions']:
                    if 'mount' in partition:
                        mount_path = partition['mount']

                        if mount_path is not None and 0 < len(mount_path):
                            cmd = 'umount ' + mount_path
                            logger.info('Executing : ' + cmd)
                            if not call_cmd_2(cmd):
                                logger.error('Error executing : ' + cmd)

                    if 'path' in partition:
                        partition_path = partition['path']
                        if partition_path is not None and 0 < len(
                                partition_path):
                            sleep(3)

                            # Removes the filesystem from a partition :
                            cmd = 'wipefs --all ' + partition_path
                            logger.info('Executing : ' + cmd)
                            if not call_cmd_2(cmd):
                                logger.error('Error executing : ' + cmd)
                                return False

                            #  Clears all data from the given path. Path should be an absolute path to partition.
                            #  100MB of data is written to the path to make sure that
                            #  there is no trace left of any previous Filesystem :
                            cmd = 'dd if=/dev/zero of={} bs=1M count=20 oflag=direct,dsync >/dev/null 2>&1'.format(
                                partition_path)
                            logger.info('Executing : ' + cmd)
                            if not call_cmd_2(cmd):
                                logger.error('Error executing : ' + cmd)

        # Removes the filesystem from any passed device :
        cmd = 'wipefs --all ' + '/dev/' + disk_name
        logger.info('Executing : ' + cmd)
        if not call_cmd_2(cmd):
            logger.error('Error executing : ' + cmd)
            return False

        #  Clears all data from any passed device.
        #  500MB of data is written to the path to make sure that there is no trace left of any previous Filesystem :
        cmd = 'dd if=/dev/zero of=/dev/{} bs=1M count=20 oflag=direct,dsync >/dev/null 2>&1'.format(
            disk_name)
        logger.info('Executing : ' + cmd)
        if not call_cmd_2(cmd):
            logger.error('Error executing : ' + cmd)

        cmd = 'parted -s /dev/{} mklabel gpt'.format(disk_name)
        logger.info('Executing : ' + cmd)
        if not call_cmd_2(cmd):
            logger.error('Error executing : ' + cmd)

        cmd = 'partprobe ' + '/dev/' + disk_name
        logger.info('Executing : ' + cmd)
        if not call_cmd_2(cmd):
            logger.error('Error executing : ' + cmd)

        sleep(3)
        return True