def full_report(self): """ Generate a report for all the logical volumes and associated devices that have been previously prepared by Ceph """ lvs = api.Volumes() report = {} for lv in lvs: try: _id = lv.tags['ceph.osd_id'] except KeyError: # only consider ceph-based logical volumes, everything else # will get ignored continue report.setdefault(_id, []) report[_id].append( lv.as_dict() ) journal_uuid = lv.tags['ceph.journal_uuid'] if not api.get_lv(lv_uuid=journal_uuid): # means we have a regular device, so query blkid journal_device = disk.get_device_from_partuuid(journal_uuid) if journal_device: report[_id].append( { 'tags': {'PARTUUID': journal_uuid}, 'type': 'journal', 'path': journal_device, } ) return report
def test_single_lv_is_matched(self, volumes, monkeypatch): FooVolume = api.Volume(lv_name='foo', lv_path='/dev/vg/foo', lv_tags="ceph.type=data") volumes.append(FooVolume) monkeypatch.setattr(api, 'Volumes', lambda: volumes) assert api.get_lv(lv_name='foo') == FooVolume
def full_report(self): """ Generate a report for all the logical volumes and associated devices that have been previously prepared by Ceph """ lvs = api.Volumes() report = {} for lv in lvs: try: _id = lv.tags['ceph.osd_id'] except KeyError: # only consider ceph-based logical volumes, everything else # will get ignored continue report.setdefault(_id, []) report[_id].append(lv.as_dict()) journal_uuid = lv.tags['ceph.journal_uuid'] if not api.get_lv(lv_uuid=journal_uuid): # means we have a regular device, so query blkid journal_device = disk.get_device_from_partuuid(journal_uuid) if journal_device: report[_id].append({ 'tags': { 'PARTUUID': journal_uuid }, 'type': 'journal', 'path': journal_device, }) return report
def zap_lv(self, device): """ Device examples: vg-name/lv-name, /dev/vg-name/lv-name Requirements: Must be a logical volume (LV) """ lv = api.get_lv(lv_name=device.lv_name, vg_name=device.vg_name) self.unmount_lv(lv) wipefs(device.abspath) zap_data(device.abspath) if self.args.destroy: lvs = api.Volumes() lvs.filter(vg_name=device.vg_name) if len(lvs) <= 1: mlogger.info( 'Only 1 LV left in VG, will proceed to destroy volume group %s', device.vg_name) api.remove_vg(device.vg_name) else: mlogger.info( 'More than 1 LV left in VG, will proceed to destroy LV only' ) mlogger.info('Removing LV because --destroy was given: %s', device.abspath) api.remove_lv(device.abspath) elif lv: # just remove all lvm metadata, leaving the LV around lv.clear_tags()
def test_single_lv_is_matched_by_uuid(self, volumes, monkeypatch): FooVolume = api.Volume( lv_name='foo', lv_path='/dev/vg/foo', lv_uuid='1111', lv_tags="ceph.type=data") volumes.append(FooVolume) monkeypatch.setattr(api, 'Volumes', lambda: volumes) assert api.get_lv(lv_uuid='1111') == FooVolume
def _set_lvm_membership(self): if self._is_lvm_member is None: # this is contentious, if a PV is recognized by LVM but has no # VGs, should we consider it as part of LVM? We choose not to # here, because most likely, we need to use VGs from this PV. self._is_lvm_member = False for path in self._get_pv_paths(): # check if there was a pv created with the # name of device pvs = lvm.PVolumes() pvs.filter(pv_name=path) has_vgs = [pv.vg_name for pv in pvs if pv.vg_name] if has_vgs: self.vgs = list(set(has_vgs)) # a pv can only be in one vg, so this should be safe self.vg_name = has_vgs[0] self._is_lvm_member = True self.pvs_api = pvs for pv in pvs: if pv.vg_name and pv.lv_uuid: lv = lvm.get_lv(vg_name=pv.vg_name, lv_uuid=pv.lv_uuid) if lv: self.lvs.append(lv) else: self.vgs = [] return self._is_lvm_member
def zap(self, args): device = args.device lv = api.get_lv_from_argument(device) if lv: # we are zapping a logical volume path = lv.lv_path else: # we are zapping a partition #TODO: ensure device is a partition path = device mlogger.info("Zapping: %s", path) # check if there was a pv created with the # name of device pv = api.get_pv(pv_name=device) if pv: vg_name = pv.vg_name lv = api.get_lv(vg_name=vg_name) dmcrypt = False dmcrypt_uuid = None if lv: osd_path = "/var/lib/ceph/osd/{}-{}".format(lv.tags['ceph.cluster_name'], lv.tags['ceph.osd_id']) dmcrypt_uuid = lv.lv_uuid dmcrypt = lv.encrypted if system.path_is_mounted(osd_path): mlogger.info("Unmounting %s", osd_path) system.unmount(osd_path) else: # we're most likely dealing with a partition here, check to # see if it was encrypted partuuid = disk.get_partuuid(device) if encryption.status("/dev/mapper/{}".format(partuuid)): dmcrypt_uuid = partuuid dmcrypt = True if dmcrypt and dmcrypt_uuid: dmcrypt_path = "/dev/mapper/{}".format(dmcrypt_uuid) mlogger.info("Closing encrypted path %s", dmcrypt_path) encryption.dmcrypt_close(dmcrypt_path) if args.destroy and pv: logger.info("Found a physical volume created from %s, will destroy all it's vgs and lvs", device) vg_name = pv.vg_name mlogger.info("Destroying volume group %s because --destroy was given", vg_name) api.remove_vg(vg_name) mlogger.info("Destroying physical volume %s because --destroy was given", device) api.remove_pv(device) elif args.destroy and not pv: mlogger.info("Skipping --destroy because no associated physical volumes are found for %s", device) wipefs(path) zap_data(path) if lv and not pv: # remove all lvm metadata lv.clear_tags() terminal.success("Zapping successful for: %s" % path)
def zap(self, args): for device in args.devices: if disk.is_mapper_device(device): terminal.error( "Refusing to zap the mapper device: {}".format(device)) raise SystemExit(1) lv = api.get_lv_from_argument(device) if lv: # we are zapping a logical volume path = lv.lv_path self.unmount_lv(lv) else: # we are zapping a partition #TODO: ensure device is a partition path = device # check to if it is encrypted to close partuuid = disk.get_partuuid(device) if encryption.status("/dev/mapper/{}".format(partuuid)): dmcrypt_uuid = partuuid self.dmcrypt_close(dmcrypt_uuid) mlogger.info("Zapping: %s", path) # check if there was a pv created with the # name of device pvs = api.PVolumes() pvs.filter(pv_name=device) vgs = set([pv.vg_name for pv in pvs]) for pv in pvs: vg_name = pv.vg_name lv = None if pv.lv_uuid: lv = api.get_lv(vg_name=vg_name, lv_uuid=pv.lv_uuid) if lv: self.unmount_lv(lv) if args.destroy: for vg_name in vgs: mlogger.info( "Destroying volume group %s because --destroy was given", vg_name) api.remove_vg(vg_name) mlogger.info( "Destroying physical volume %s because --destroy was given", device) api.remove_pv(device) wipefs(path) zap_data(path) if lv and not pvs: # remove all lvm metadata lv.clear_tags() terminal.success("Zapping successful for: %s" % ", ".join(args.devices))
def single_report(self, device, lvs=None): """ Generate a report for a single device. This can be either a logical volume in the form of vg/lv or a device with an absolute path like /dev/sda1 or /dev/sda """ if lvs is None: lvs = api.Volumes() report = {} lv = api.get_lv_from_argument(device) # check if there was a pv created with the # name of device pv = api.get_pv(pv_name=device) if pv and not lv: try: lv = api.get_lv(vg_name=pv.vg_name) except MultipleLVsError: lvs.filter(vg_name=pv.vg_name) return self.full_report(lvs=lvs) if lv: try: _id = lv.tags['ceph.osd_id'] except KeyError: logger.warning('device is not part of ceph: %s', device) return report report.setdefault(_id, []) lv_report = lv.as_dict() lv_report['devices'] = self.match_devices(lv.lv_uuid) report[_id].append(lv_report) else: # this has to be a journal/wal/db device (not a logical volume) so try # to find the PARTUUID that should be stored in the OSD logical # volume for device_type in ['journal', 'block', 'wal', 'db']: device_tag_name = 'ceph.%s_device' % device_type device_tag_uuid = 'ceph.%s_uuid' % device_type associated_lv = lvs.get(lv_tags={device_tag_name: device}) if associated_lv: _id = associated_lv.tags['ceph.osd_id'] uuid = associated_lv.tags[device_tag_uuid] report.setdefault(_id, []) report[_id].append( { 'tags': {'PARTUUID': uuid}, 'type': device_type, 'path': device, } ) return report
def get_lvm_osd_info(final): pv = api.get_pv(pv_name=final['osd_partition']) if not pv: return lv = api.get_lv(vg_name=pv.vg_name) if lv: final['osd_data_block'] = lv.tags.get('ceph.block_device', '') final['osd_data_wal'] = lv.tags.get('ceph.wal_device', '') final['osd_data_db'] = lv.tags.get('ceph.db_device', '') final['osd_data_osd'] = lv.tags.get('ceph.data_device', '') final['osd_data_id'] = lv.tags.get('ceph.osd_id', '')
def get_journal_lv(self, argument): """ Perform some parsing of the value of ``--journal`` so that the process can determine correctly if it got a device path or an lv :param argument: The value of ``--journal``, that will need to be split to retrieve the actual lv """ try: vg_name, lv_name = argument.split('/') except (ValueError, AttributeError): return None return api.get_lv(lv_name=lv_name, vg_name=vg_name)
def zap(self, args): device = args.device lv = api.get_lv_from_argument(device) if lv: # we are zapping a logical volume path = lv.lv_path else: # we are zapping a partition #TODO: ensure device is a partition path = device mlogger.info("Zapping: %s", path) # check if there was a pv created with the # name of device pv = api.get_pv(pv_name=device) if pv: vg_name = pv.vg_name lv = api.get_lv(vg_name=vg_name) if lv: osd_path = "/var/lib/ceph/osd/{}-{}".format( lv.tags['ceph.cluster_name'], lv.tags['ceph.osd_id']) if system.path_is_mounted(osd_path): mlogger.info("Unmounting %s", osd_path) system.unmount(osd_path) if args.destroy and pv: logger.info( "Found a physical volume created from %s, will destroy all it's vgs and lvs", device) vg_name = pv.vg_name mlogger.info( "Destroying volume group %s because --destroy was given", vg_name) api.remove_vg(vg_name) mlogger.info( "Destroying physical volume %s because --destroy was given", device) api.remove_pv(device) elif args.destroy and not pv: mlogger.info( "Skipping --destroy because no associated physical volumes are found for %s", device) wipefs(path) zap_data(path) if lv and not pv: # remove all lvm metadata lv.clear_tags() terminal.success("Zapping successful for: %s" % path)
def single_report(self, device): """ Generate a report for a single device. This can be either a logical volume in the form of vg/lv or a device with an absolute path like /dev/sda1 or /dev/sda """ lvs = api.Volumes() report = {} lv = api.get_lv_from_argument(device) # check if there was a pv created with the # name of device pv = api.get_pv(pv_name=device) if pv and not lv: try: lv = api.get_lv(vg_name=pv.vg_name) except MultipleLVsError: lvs.filter(vg_name=pv.vg_name) return self.full_report(lvs=lvs) if lv: try: _id = lv.tags['ceph.osd_id'] except KeyError: logger.warning('device is not part of ceph: %s', device) return report report.setdefault(_id, []) report[_id].append( lv.as_dict() ) else: # this has to be a journal/wal/db device (not a logical volume) so try # to find the PARTUUID that should be stored in the OSD logical # volume for device_type in ['journal', 'block', 'wal', 'db']: device_tag_name = 'ceph.%s_device' % device_type device_tag_uuid = 'ceph.%s_uuid' % device_type associated_lv = lvs.get(lv_tags={device_tag_name: device}) if associated_lv: _id = associated_lv.tags['ceph.osd_id'] uuid = associated_lv.tags[device_tag_uuid] report.setdefault(_id, []) report[_id].append( { 'tags': {'PARTUUID': uuid}, 'type': device_type, 'path': device, } ) return report
def zap(self, args): for device in args.devices: if disk.is_mapper_device(device): terminal.error("Refusing to zap the mapper device: {}".format(device)) raise SystemExit(1) lv = api.get_lv_from_argument(device) if lv: # we are zapping a logical volume path = lv.lv_path self.unmount_lv(lv) else: # we are zapping a partition #TODO: ensure device is a partition path = device # check to if it is encrypted to close partuuid = disk.get_partuuid(device) if encryption.status("/dev/mapper/{}".format(partuuid)): dmcrypt_uuid = partuuid self.dmcrypt_close(dmcrypt_uuid) mlogger.info("Zapping: %s", path) # check if there was a pv created with the # name of device pvs = api.PVolumes() pvs.filter(pv_name=device) vgs = set([pv.vg_name for pv in pvs]) for pv in pvs: vg_name = pv.vg_name lv = None if pv.lv_uuid: lv = api.get_lv(vg_name=vg_name, lv_uuid=pv.lv_uuid) if lv: self.unmount_lv(lv) if args.destroy: for vg_name in vgs: mlogger.info("Destroying volume group %s because --destroy was given", vg_name) api.remove_vg(vg_name) if not lv: mlogger.info("Destroying physical volume %s because --destroy was given", device) api.remove_pv(device) wipefs(path) zap_data(path) if lv and not pvs: # remove all lvm metadata lv.clear_tags() terminal.success("Zapping successful for: %s" % ", ".join(args.devices))
def get_lv(self, argument): """ Perform some parsing of the command-line value so that the process can determine correctly if it got a device path or an lv. :param argument: The command-line value that will need to be split to retrieve the actual lv """ #TODO is this efficient? try: vg_name, lv_name = argument.split('/') except (ValueError, AttributeError): return None return api.get_lv(lv_name=lv_name, vg_name=vg_name)
def zap(self, args): device = args.device lv = api.get_lv_from_argument(device) if lv: # we are zapping a logical volume path = lv.lv_path else: # we are zapping a partition #TODO: ensure device is a partition path = device mlogger.info("Zapping: %s", path) # check if there was a pv created with the # name of device pv = api.get_pv(pv_name=device) if pv: vg_name = pv.vg_name lv = api.get_lv(vg_name=vg_name) if lv: osd_path = "/var/lib/ceph/osd/{}-{}".format(lv.tags['ceph.cluster_name'], lv.tags['ceph.osd_id']) if system.path_is_mounted(osd_path): mlogger.info("Unmounting %s", osd_path) system.unmount(osd_path) if args.destroy and pv: logger.info("Found a physical volume created from %s, will destroy all it's vgs and lvs", device) vg_name = pv.vg_name mlogger.info("Destroying volume group %s because --destroy was given", vg_name) api.remove_vg(vg_name) mlogger.info("Destroying physical volume %s because --destroy was given", device) api.remove_pv(device) elif args.destroy and not pv: mlogger.info("Skipping --destroy because no associated physical volumes are found for %s", device) wipefs(path) zap_data(path) if lv and not pv: # remove all lvm metadata lv.clear_tags() terminal.success("Zapping successful for: %s" % path)
def full_report(self, lvs=None): """ Generate a report for all the logical volumes and associated devices that have been previously prepared by Ceph """ if lvs is None: lvs = api.Volumes() report = {} for lv in lvs: try: _id = lv.tags['ceph.osd_id'] except KeyError: # only consider ceph-based logical volumes, everything else # will get ignored continue report.setdefault(_id, []) lv_report = lv.as_dict() lv_report['devices'] = self.match_devices(lv.lv_uuid) report[_id].append(lv_report) for device_type in ['journal', 'block', 'wal', 'db']: device_uuid = lv.tags.get('ceph.%s_uuid' % device_type) if not device_uuid: # bluestore will not have a journal, filestore will not have # a block/wal/db, so we must skip if not present continue if not api.get_lv(lv_uuid=device_uuid, lvs=lvs): # means we have a regular device, so query blkid disk_device = disk.get_device_from_partuuid(device_uuid) if disk_device: report[_id].append({ 'tags': { 'PARTUUID': device_uuid }, 'type': device_type, 'path': disk_device, }) return report
def full_report(self, lvs=None): """ Generate a report for all the logical volumes and associated devices that have been previously prepared by Ceph """ if lvs is None: lvs = api.Volumes() report = {} for lv in lvs: try: _id = lv.tags['ceph.osd_id'] except KeyError: # only consider ceph-based logical volumes, everything else # will get ignored continue report.setdefault(_id, []) report[_id].append( lv.as_dict() ) for device_type in ['journal', 'block', 'wal', 'db']: device_uuid = lv.tags.get('ceph.%s_uuid' % device_type) if not device_uuid: # bluestore will not have a journal, filestore will not have # a block/wal/db, so we must skip if not present continue if not api.get_lv(lv_uuid=device_uuid): # means we have a regular device, so query blkid disk_device = disk.get_device_from_partuuid(device_uuid) if disk_device: report[_id].append( { 'tags': {'PARTUUID': device_uuid}, 'type': device_type, 'path': disk_device, } ) return report
def zap_lv(self, device): """ Device examples: vg-name/lv-name, /dev/vg-name/lv-name Requirements: Must be a logical volume (LV) """ lv = api.get_lv(lv_name=device.lv_name, vg_name=device.vg_name) self.unmount_lv(lv) wipefs(device.abspath) zap_data(device.abspath) if self.args.destroy: lvs = api.Volumes() lvs.filter(vg_name=device.vg_name) if len(lvs) <= 1: mlogger.info('Only 1 LV left in VG, will proceed to destroy volume group %s', device.vg_name) api.remove_vg(device.vg_name) else: mlogger.info('More than 1 LV left in VG, will proceed to destroy LV only') mlogger.info('Removing LV because --destroy was given: %s', device.abspath) api.remove_lv(device.abspath) elif lv: # just remove all lvm metadata, leaving the LV around lv.clear_tags()
def test_nothing_is_passed_in(self): # so we return a None assert api.get_lv() is None
def prepare(self, args): # FIXME we don't allow re-using a keyring, we always generate one for the # OSD, this needs to be fixed. This could either be a file (!) or a string # (!!) or some flags that we would need to compound into a dict so that we # can convert to JSON (!!!) secrets = {'cephx_secret': prepare_utils.create_key()} cluster_fsid = conf.ceph.get('global', 'fsid') fsid = args.osd_fsid or system.generate_uuid() # allow re-using an id, in case a prepare failed osd_id = args.osd_id or prepare_utils.create_id( fsid, json.dumps(secrets)) vg_name, lv_name = args.data.split('/') if args.filestore: data_lv = api.get_lv(lv_name=lv_name, vg_name=vg_name) # we must have either an existing data_lv or a newly created, so lets make # sure that the tags are correct if not data_lv: raise RuntimeError('no data logical volume found with: %s' % args.data) if not args.journal: raise RuntimeError( '--journal is required when using --filestore') journal_lv = self.get_journal_lv(args.journal) if journal_lv: journal_device = journal_lv.lv_path journal_uuid = journal_lv.lv_uuid # we can only set tags on an lv, the pv (if any) can't as we # aren't making it part of an lvm group (vg) journal_lv.set_tags({ 'ceph.type': 'journal', 'ceph.osd_fsid': fsid, 'ceph.osd_id': osd_id, 'ceph.cluster_fsid': cluster_fsid, 'ceph.journal_device': journal_device, 'ceph.journal_uuid': journal_uuid, 'ceph.data_device': data_lv.lv_path, 'ceph.data_uuid': data_lv.lv_uuid, }) # allow a file elif os.path.isfile(args.journal): journal_uuid = '' journal_device = args.journal # otherwise assume this is a regular disk partition else: journal_uuid = self.get_journal_ptuuid(args.journal) journal_device = args.journal data_lv.set_tags({ 'ceph.type': 'data', 'ceph.osd_fsid': fsid, 'ceph.osd_id': osd_id, 'ceph.cluster_fsid': cluster_fsid, 'ceph.journal_device': journal_device, 'ceph.journal_uuid': journal_uuid, 'ceph.data_device': data_lv.lv_path, 'ceph.data_uuid': data_lv.lv_uuid, }) prepare_filestore( data_lv.lv_path, journal_device, secrets, id_=osd_id, fsid=fsid, ) elif args.bluestore: prepare_bluestore(args)
def prepare(self, args): # FIXME we don't allow re-using a keyring, we always generate one for the # OSD, this needs to be fixed. This could either be a file (!) or a string # (!!) or some flags that we would need to compound into a dict so that we # can convert to JSON (!!!) secrets = {'cephx_secret': prepare_utils.create_key()} cluster_fsid = conf.ceph.get('global', 'fsid') fsid = args.osd_fsid or system.generate_uuid() # allow re-using an id, in case a prepare failed osd_id = args.osd_id or prepare_utils.create_id(fsid, json.dumps(secrets)) vg_name, lv_name = args.data.split('/') if args.filestore: data_lv = api.get_lv(lv_name=lv_name, vg_name=vg_name) # we must have either an existing data_lv or a newly created, so lets make # sure that the tags are correct if not data_lv: raise RuntimeError('no data logical volume found with: %s' % args.data) if not args.journal: raise RuntimeError('--journal is required when using --filestore') journal_lv = self.get_journal_lv(args.journal) if journal_lv: journal_device = journal_lv.lv_path journal_uuid = journal_lv.lv_uuid # we can only set tags on an lv, the pv (if any) can't as we # aren't making it part of an lvm group (vg) journal_lv.set_tags({ 'ceph.type': 'journal', 'ceph.osd_fsid': fsid, 'ceph.osd_id': osd_id, 'ceph.cluster_fsid': cluster_fsid, 'ceph.journal_device': journal_device, 'ceph.journal_uuid': journal_uuid, 'ceph.data_device': data_lv.lv_path, 'ceph.data_uuid': data_lv.lv_uuid, }) # allow a file elif os.path.isfile(args.journal): journal_uuid = '' journal_device = args.journal # otherwise assume this is a regular disk partition else: journal_uuid = self.get_journal_ptuuid(args.journal) journal_device = args.journal data_lv.set_tags({ 'ceph.type': 'data', 'ceph.osd_fsid': fsid, 'ceph.osd_id': osd_id, 'ceph.cluster_fsid': cluster_fsid, 'ceph.journal_device': journal_device, 'ceph.journal_uuid': journal_uuid, 'ceph.data_device': data_lv.lv_path, 'ceph.data_uuid': data_lv.lv_uuid, }) prepare_filestore( data_lv.lv_path, journal_device, secrets, id_=osd_id, fsid=fsid, ) elif args.bluestore: prepare_bluestore(args)
def zap(self, args): device = args.device if disk.is_mapper_device(device): terminal.error( "Refusing to zap the mapper device: {}".format(device)) raise SystemExit(1) lv = api.get_lv_from_argument(device) if lv: # we are zapping a logical volume path = lv.lv_path else: # we are zapping a partition #TODO: ensure device is a partition path = device mlogger.info("Zapping: %s", path) # check if there was a pv created with the # name of device pv = api.get_pv(pv_name=device) if pv: vg_name = pv.vg_name lv = api.get_lv(vg_name=vg_name) dmcrypt = False dmcrypt_uuid = None if lv: if lv.tags.get('ceph.cluster_name') and lv.tags.get('ceph.osd_id'): lv_path = "/var/lib/ceph/osd/{}-{}".format( lv.tags['ceph.cluster_name'], lv.tags['ceph.osd_id']) else: lv_path = lv.path dmcrypt_uuid = lv.lv_uuid dmcrypt = lv.encrypted if system.path_is_mounted(lv_path): mlogger.info("Unmounting %s", lv_path) system.unmount(lv_path) else: # we're most likely dealing with a partition here, check to # see if it was encrypted partuuid = disk.get_partuuid(device) if encryption.status("/dev/mapper/{}".format(partuuid)): dmcrypt_uuid = partuuid dmcrypt = True if dmcrypt and dmcrypt_uuid: dmcrypt_path = "/dev/mapper/{}".format(dmcrypt_uuid) mlogger.info("Closing encrypted path %s", dmcrypt_path) encryption.dmcrypt_close(dmcrypt_path) if args.destroy and pv: logger.info( "Found a physical volume created from %s, will destroy all it's vgs and lvs", device) vg_name = pv.vg_name mlogger.info( "Destroying volume group %s because --destroy was given", vg_name) api.remove_vg(vg_name) mlogger.info( "Destroying physical volume %s because --destroy was given", device) api.remove_pv(device) elif args.destroy and not pv: mlogger.info( "Skipping --destroy because no associated physical volumes are found for %s", device) wipefs(path) zap_data(path) if lv and not pv: # remove all lvm metadata lv.clear_tags() terminal.success("Zapping successful for: %s" % path)