def prepare_bluestore(device_name, journal=None): logger.info('Start prepare bluestore OSD : ' + device_name) journal_part_name = "" disk_name = "" osd_id = "" # Check if device is physical or logical : lv = lvm_lib.is_lv(device_name) if not lv: # Create partition with full disk size disk_name = device_name osd_part_num = ceph_disk.create_osd_partition(device_name) osd_part_name = ceph_disk.get_partition_name(device_name, osd_part_num) osd_part_name = "/dev/" + osd_part_name else: osd_part_name = device_name # Get the origin disk of volume group to be used in OSD activation : vg_name = device_name.split("/")[0] # Get the OSD id from VG name : osd_id = vg_name.split(".")[1] logger.info("OSD_ID = {}".format(osd_id)) physical_list = lvm_lib.get_physical_disks(vg_name) main_devices = list(physical_list["main"]) if len(main_devices) > 0: part_name = main_devices[0] partition_name = ceph_disk.get_dev_name(part_name) disk_name = ceph_disk.get_device_name(partition_name) cmd = 'ceph-volume --log-path ' + CEPH_VOLUME_LOG_PATH + ' lvm prepare ' cmd += ' --bluestore --data ' + osd_part_name if len(osd_id) > 0: cmd += ' --osd-id ' + osd_id if journal is not None and 0 < len(journal): journal_part_name = get_external_journal(journal) if journal_part_name is None: return False cmd += ' --block.db /dev/' + journal_part_name logger.info('Starting : ' + cmd) if not call_cmd_2(cmd): logger.error('Error executing : ' + cmd) # After the failure of adding OSD , change ptype of journal partition to "journal_avail_ptype" : if journal_part_name != "": set_journal_part_avail(journal_part_name) return False # Activating OSD : activate_osd(disk_name) return True
def get_disk_list(): ceph_volume_disks = get_ceph_volumes_info() ceph_disk_list = get_ceph_disk_list() linked_osds_to_journal = {} linked_osds_to_cache = {} if len(ceph_volume_disks) == 0: return ceph_disk_list for disk, disk_info in ceph_volume_disks.iteritems(): if len(disk_info.linked_journal) > 0: if disk_info.linked_journal in linked_osds_to_journal.keys(): linked_osds_to_journal[disk_info.linked_journal].append(disk) else: linked_osds_to_journal.update( {disk_info.linked_journal: [disk]}) if len(disk_info.linked_cache) > 0: for cache in disk_info.linked_cache: cache = '/dev/' + cache partition_name = ceph_disk.get_dev_name(cache) cache_name = ceph_disk.get_device_name(partition_name) if cache_name in linked_osds_to_cache.keys(): linked_osds_to_cache[cache_name].append(disk) else: linked_osds_to_cache.update({cache_name: [disk]}) for disk in ceph_disk_list: if disk.name in ceph_volume_disks.keys(): disk.osd_uuid = ceph_volume_disks[disk.name].osd_uuid disk.osd_id = ceph_volume_disks[disk.name].osd_id disk.linked_journal = ceph_volume_disks[disk.name].linked_journal disk.linked_journal_part_num = ceph_volume_disks[ disk.name].linked_journal_part_num disk.linked_cache = ceph_volume_disks[disk.name].linked_cache disk.linked_cache_part_num = ceph_volume_disks[ disk.name].linked_cache_part_num disk.vg_name = ceph_volume_disks[disk.name].vg_name disk.lv_name = ceph_volume_disks[disk.name].lv_name disk.usage = 0 if disk.usage == 3 and disk.name in linked_osds_to_journal.keys(): for osd in linked_osds_to_journal[disk.name]: if osd not in disk.linked_osds: disk.linked_osds.append(osd) if disk.usage == 4 and disk.name in linked_osds_to_cache.keys(): for osd in linked_osds_to_cache[disk.name]: if osd not in disk.linked_osds: disk.linked_osds.append(osd) disk_list = ceph_disk_list return disk_list
def set_partition_uuid(part_name, part_uuid): logger.info('Start setting partition uuid for ' + part_name) disk_name = ceph_disk.get_device_name(part_name) part_num = ceph_disk.get_partition_num(part_name) cmd = 'sgdisk -u ' + part_num + ':' + str(part_uuid) + ' /dev/' + disk_name logger.info('Starting ' + cmd) if not call_cmd_2(cmd): logger.error('Error executing ' + cmd) return False ceph_disk.probe_part(disk_name) return True
def prepare_filestore(device_name, journal=None): # clean_disk(disk_name) logger.info('Start prepare filestore OSD : ' + device_name) journal_part_name = "" journal_option = "" disk_name = "" journal_path = '/dev/' + journal if ceph_disk.is_partition(journal_path): journal_option = ' --journal /dev/' + journal else: if journal is not None and 0 < len(journal): journal_part_name = get_external_journal(journal) if journal_part_name is None: return False journal_option = ' --journal /dev/' + journal_part_name # Check if device is physical or logical : lv = lvm_lib.is_lv(device_name) if not lv: # create partition with full disk size disk_name = device_name osd_part_num = ceph_disk.create_osd_partition(device_name) osd_part_name = ceph_disk.get_partition_name(device_name, osd_part_num) osd_part_name = "/dev/" + osd_part_name else: osd_part_name = device_name # Get the origin disk of volume group to be used in OSD activation : vg_name = device_name.split("/")[0] physical_list = lvm_lib.get_physical_disks(vg_name) main_devices = list(physical_list["main"]) if len(main_devices) > 0: part_name = main_devices[0] partition_name = ceph_disk.get_dev_name(part_name) disk_name = ceph_disk.get_device_name(partition_name) cmd = 'ceph-volume --log-path ' + CEPH_VOLUME_LOG_PATH + ' lvm prepare ' cmd += ' --filestore --data ' + osd_part_name + journal_option logger.info('Starting : ' + cmd) if not call_cmd_2(cmd): logger.error('Error executing : ' + cmd) # after add osd fail change ptype of journal partition to journal_avail_ptype if journal_part_name != "": set_journal_part_avail(journal_part_name) return False # Activating OSD : activate_osd(disk_name, True) return True
def get_ceph_volumes_info(): ceph_volumes_disks = {} cluster_fsid = '' vg_name = "" partitions_uuid = {} try: cluster_fsid = ceph_disk.get_fsid(configuration().get_cluster_name()) partitions_uuid = ceph_disk.get_partitions_uuid() except Exception as e: logger.error(e) pass cmd = 'ceph-volume --log-path ' + CEPH_VOLUME_LOG_PATH + ' lvm list --format json' ret, stdout, stderr = exec_command_ex(cmd) if ret != 0: if stderr: logger.error(stderr) if len(stdout) > 0: ceph_volumes_info = json.loads(stdout) for osd_id, osd_info in ceph_volumes_info.iteritems(): try: ceph_volume_disk_info = CephVolumeInfo() fsid = '' osd_name = '' for element in osd_info: if element['type'] == 'block' or element['type'] == 'data': fsid = element['tags']['ceph.cluster_fsid'] if len(fsid) > 0 and fsid != cluster_fsid: continue # if not ['tags']['ceph.cluster_fsid'] or element['tags']['ceph.cluster_fsid'] != cluster_fsid: ceph_volume_disk_info.osd_id = osd_id ceph_volume_disk_info.osd_uuid = element['tags'][ 'ceph.osd_fsid'] if len(element['devices']) > 0: for device in element['devices']: part_name = ceph_disk.get_dev_name(device) osd_name = ceph_disk.get_device_name(part_name) ceph_volume_disk_info.devices.append(osd_name) # if there is no devices (physical disks) exists # get them from get_physical_disks function by volume group name else: vg_name = element['vg_name'] lv_name = element['lv_name'] ceph_volume_disk_info.lv_name = lv_name ceph_volume_disk_info.vg_name = vg_name physical_list = lvm_lib.get_physical_disks(vg_name) main_devices = list(physical_list["main"]) writecache_devices = list( physical_list["writecache"]) cache_devices = list(physical_list["dmcache"]) if len(main_devices) > 0: for main_dev in main_devices: main_part_name = ceph_disk.get_dev_name( main_dev) main_dev_name = ceph_disk.get_device_name( main_part_name) ceph_volume_disk_info.devices.append( main_dev_name) if len(writecache_devices) > 0: for wcache in writecache_devices: wcache_partition_name = ceph_disk.get_dev_name( wcache) ceph_volume_disk_info.linked_cache_part_num.append( ceph_disk.get_partition_num( wcache_partition_name)) ceph_volume_disk_info.linked_cache.append( wcache_partition_name) elif len(cache_devices) > 0: for cache in cache_devices: cache_partition_name = ceph_disk.get_dev_name( cache) ceph_volume_disk_info.linked_cache_part_num.append( ceph_disk.get_partition_num( cache_partition_name)) ceph_volume_disk_info.linked_cache.append( cache_partition_name) journal_path = "" # if 'ceph.journal_device' in element['tags']: # journal_path = element['tags']['ceph.journal_device'] # if 'ceph.db_device' in element['tags']: # journal_path = element['tags']['ceph.db_device'] uuid = "" # for filestore : if 'ceph.journal_uuid' in element['tags']: uuid = element['tags']['ceph.journal_uuid'] # for bluestore : if 'ceph.db_uuid' in element['tags']: uuid = element['tags']['ceph.db_uuid'] if len(uuid) > 0 and uuid in partitions_uuid: journal_path = partitions_uuid[uuid] if len(journal_path) > 0: try: if ceph_disk.is_partition(journal_path): journal_name = get_disk_by_partition( journal_path) journal_partition_name = ceph_disk.get_dev_name( journal_path) ceph_volume_disk_info.linked_journal_part_num = ceph_disk.get_partition_num( journal_partition_name) if len(osd_name ) > 0 and osd_name in journal_name: continue ceph_volume_disk_info.linked_journal = journal_name except Exception as ex: logger.error(ex) continue except Exception as e: logger.exception(e) continue for device in ceph_volume_disk_info.devices: ceph_volumes_disks.update({device: ceph_volume_disk_info}) return ceph_volumes_disks
def get_disk_by_partition(partition_path): partition_name = ceph_disk.get_dev_name(partition_path) disk_name = ceph_disk.get_device_name(partition_name) return disk_name