def activate(self, args, osd_id=None, osd_fsid=None): """ :param args: The parsed arguments coming from the CLI :param osd_id: When activating all, this gets populated with an existing OSD ID :param osd_fsid: When activating all, this gets populated with an existing OSD FSID """ osd_id = osd_id if osd_id is not None else args.osd_id osd_fsid = osd_fsid if osd_fsid is not None else args.osd_fsid lvs = api.Volumes() # filter them down for the OSD ID and FSID we need to activate if osd_id and osd_fsid: lvs.filter(lv_tags={'ceph.osd_id': osd_id, 'ceph.osd_fsid': osd_fsid}) elif osd_fsid and not osd_id: lvs.filter(lv_tags={'ceph.osd_fsid': osd_fsid}) if not lvs: raise RuntimeError('could not find osd.%s with fsid %s' % (osd_id, osd_fsid)) # This argument is only available when passed in directly or via # systemd, not when ``create`` is being used if getattr(args, 'auto_detect_objectstore', False): logger.info('auto detecting objectstore') # may get multiple lvs, so can't do lvs.get() calls here for lv in lvs: has_journal = lv.tags.get('ceph.journal_uuid') if has_journal: logger.info('found a journal associated with the OSD, assuming filestore') return activate_filestore(lvs, no_systemd=args.no_systemd) logger.info('unable to find a journal associated with the OSD, assuming bluestore') return activate_bluestore(lvs, no_systemd=args.no_systemd) if args.bluestore: activate_bluestore(lvs, no_systemd=args.no_systemd) elif args.filestore: activate_filestore(lvs, no_systemd=args.no_systemd)
def update(self): """ Ensure all journal devices are up to date if they aren't a logical volume """ lvs = api.Volumes() for lv in lvs: try: lv.tags['ceph.osd_id'] except KeyError: # only consider ceph-based logical volumes, everything else # will get ignored continue for device_type in ['journal', 'block', 'wal', 'db']: device_name = 'ceph.%s_device' % device_type device_uuid = lv.tags.get('ceph.%s_uuid' % device_type) if not device_uuid: # bluestore will not have a journal, filestore will not have # a block/wal/db, so we must skip if not present continue disk_device = disk.get_device_from_partuuid(device_uuid) if disk_device: if lv.tags[device_name] != disk_device: # this means that the device has changed, so it must be updated # on the API to reflect this lv.set_tags({device_name: disk_device})
def activate(self, args): lvs = api.Volumes() # filter them down for the OSD ID and FSID we need to activate if args.osd_id and args.osd_fsid: lvs.filter(lv_tags={'ceph.osd_id': args.osd_id, 'ceph.osd_fsid': args.osd_fsid}) elif args.osd_fsid and not args.osd_id: lvs.filter(lv_tags={'ceph.osd_fsid': args.osd_fsid}) if not lvs: raise RuntimeError('could not find osd.%s with fsid %s' % (args.osd_id, args.osd_fsid)) # This argument is only available when passed in directly or via # systemd, not when ``create`` is being used if getattr(args, 'auto_detect_objectstore', False): logger.info('auto detecting objectstore') # may get multiple lvs, so can't do lvs.get() calls here for lv in lvs: has_journal = lv.tags.get('ceph.journal_uuid') if has_journal: logger.info('found a journal associated with the OSD, assuming filestore') return activate_filestore(lvs) logger.info('unable to find a journal associated with the OSD, assuming bluestore') return activate_bluestore(lvs) if args.bluestore: activate_bluestore(lvs) elif args.filestore: activate_filestore(lvs) terminal.success("ceph-volume lvm activate successful for osd ID: %s" % args.osd_id)
def zap_lv(self, device): """ Device examples: vg-name/lv-name, /dev/vg-name/lv-name Requirements: Must be a logical volume (LV) """ lv = api.get_lv(lv_name=device.lv_name, vg_name=device.vg_name) self.unmount_lv(lv) wipefs(device.abspath) zap_data(device.abspath) if self.args.destroy: lvs = api.Volumes() lvs.filter(vg_name=device.vg_name) if len(lvs) <= 1: mlogger.info( 'Only 1 LV left in VG, will proceed to destroy volume group %s', device.vg_name) api.remove_vg(device.vg_name) else: mlogger.info( 'More than 1 LV left in VG, will proceed to destroy LV only' ) mlogger.info('Removing LV because --destroy was given: %s', device.abspath) api.remove_lv(device.abspath) elif lv: # just remove all lvm metadata, leaving the LV around lv.clear_tags()
def full_report(self): """ Generate a report for all the logical volumes and associated devices that have been previously prepared by Ceph """ lvs = api.Volumes() report = {} for lv in lvs: try: _id = lv.tags['ceph.osd_id'] except KeyError: # only consider ceph-based logical volumes, everything else # will get ignored continue report.setdefault(_id, []) report[_id].append(lv.as_dict()) journal_uuid = lv.tags['ceph.journal_uuid'] if not api.get_lv(lv_uuid=journal_uuid): # means we have a regular device, so query blkid journal_device = disk.get_device_from_partuuid(journal_uuid) if journal_device: report[_id].append({ 'tags': { 'PARTUUID': journal_uuid }, 'type': 'journal', 'path': journal_device, }) return report
def volumes(monkeypatch): monkeypatch.setattr(process, 'call', lambda x, **kw: ('', '', 0)) volumes = api.Volumes() volumes._purge() # also patch api.Volumes so that when it is called, it will use the newly # created fixture, with whatever the test method wants to append to it monkeypatch.setattr(api, 'Volumes', lambda: volumes) return volumes
def single_report(self, device, lvs=None): """ Generate a report for a single device. This can be either a logical volume in the form of vg/lv or a device with an absolute path like /dev/sda1 or /dev/sda """ if lvs is None: lvs = api.Volumes() report = {} lv = api.get_lv_from_argument(device) # check if there was a pv created with the # name of device pv = api.get_pv(pv_name=device) if pv and not lv: try: lv = api.get_lv(vg_name=pv.vg_name) except MultipleLVsError: lvs.filter(vg_name=pv.vg_name) return self.full_report(lvs=lvs) if lv: try: _id = lv.tags['ceph.osd_id'] except KeyError: logger.warning('device is not part of ceph: %s', device) return report report.setdefault(_id, []) lv_report = lv.as_dict() lv_report['devices'] = self.match_devices(lv.lv_uuid) report[_id].append(lv_report) else: # this has to be a journal/wal/db device (not a logical volume) so try # to find the PARTUUID that should be stored in the OSD logical # volume for device_type in ['journal', 'block', 'wal', 'db']: device_tag_name = 'ceph.%s_device' % device_type device_tag_uuid = 'ceph.%s_uuid' % device_type associated_lv = lvs.get(lv_tags={device_tag_name: device}) if associated_lv: _id = associated_lv.tags['ceph.osd_id'] uuid = associated_lv.tags[device_tag_uuid] report.setdefault(_id, []) report[_id].append( { 'tags': {'PARTUUID': uuid}, 'type': device_type, 'path': device, } ) return report
def activate(self, args): lvs = api.Volumes() # filter them down for the OSD ID and FSID we need to activate if args.osd_id and args.osd_fsid: lvs.filter(lv_tags={ 'ceph.osd_id': args.osd_id, 'ceph.osd_fsid': args.osd_fsid }) elif args.osd_fsid and not args.osd_id: lvs.filter(lv_tags={'ceph.osd_fsid': args.osd_fsid}) if not lvs: raise RuntimeError('could not find osd.%s with fsid %s' % (args.osd_id, args.osd_fsid)) activate_filestore(lvs)
def find_associated_devices(osd_id=None, osd_fsid=None): """ From an ``osd_id`` and/or an ``osd_fsid``, filter out all the LVs in the system that match those tag values, further detect if any partitions are part of the OSD, and then return the set of LVs and partitions (if any). """ lv_tags = {} if osd_id: lv_tags['ceph.osd_id'] = osd_id if osd_fsid: lv_tags['ceph.osd_fsid'] = osd_fsid lvs = api.Volumes() lvs.filter(lv_tags=lv_tags) if not lvs: raise RuntimeError('Unable to find any LV for zapping OSD: %s' % osd_id or osd_fsid) devices_to_zap = ensure_associated_lvs(lvs) return [Device(path) for path in set(devices_to_zap) if path]
def full_report(self, lvs=None): """ Generate a report for all the logical volumes and associated devices that have been previously prepared by Ceph """ if lvs is None: lvs = api.Volumes() report = {} for lv in lvs: try: _id = lv.tags['ceph.osd_id'] except KeyError: # only consider ceph-based logical volumes, everything else # will get ignored continue report.setdefault(_id, []) lv_report = lv.as_dict() lv_report['devices'] = self.match_devices(lv.lv_uuid) report[_id].append(lv_report) for device_type in ['journal', 'block', 'wal', 'db']: device_uuid = lv.tags.get('ceph.%s_uuid' % device_type) if not device_uuid: # bluestore will not have a journal, filestore will not have # a block/wal/db, so we must skip if not present continue if not api.get_lv(lv_uuid=device_uuid, lvs=lvs): # means we have a regular device, so query blkid disk_device = disk.get_device_from_partuuid(device_uuid) if disk_device: report[_id].append({ 'tags': { 'PARTUUID': device_uuid }, 'type': device_type, 'path': disk_device, }) return report
def single_report(self, device): """ Generate a report for a single device. This can be either a logical volume in the form of vg/lv or a device with an absolute path like /dev/sda1 """ lvs = api.Volumes() report = {} lv = api.get_lv_from_argument(device) if lv: try: _id = lv.tags['ceph.osd_id'] except KeyError: logger.warning('device is not part of ceph: %s', device) return report report.setdefault(_id, []) report[_id].append(lv.as_dict()) else: # this has to be a journal/wal/db device (not a logical volume) so try # to find the PARTUUID that should be stored in the OSD logical # volume for device_type in ['journal', 'block', 'wal', 'db']: device_tag_name = 'ceph.%s_device' % device_type device_tag_uuid = 'ceph.%s_uuid' % device_type associated_lv = lvs.get(lv_tags={device_tag_name: device}) if associated_lv: _id = associated_lv.tags['ceph.osd_id'] uuid = associated_lv.tags[device_tag_uuid] report.setdefault(_id, []) report[_id].append({ 'tags': { 'PARTUUID': uuid }, 'type': device_type, 'path': device, }) return report
def update(self): """ Ensure all journal devices are up to date if they aren't a logical volume """ lvs = api.Volumes() for lv in lvs: try: lv.tags['ceph.osd_id'] except KeyError: # only consider ceph-based logical volumes, everything else # will get ignored continue journal_uuid = lv.tags['ceph.journal_uuid'] # query blkid for the UUID, if it matches we have a physical device # which needs to be compared to ensure it is up to date journal_device = disk.get_device_from_partuuid(journal_uuid) if journal_device: if lv.tags['ceph.journal_device'] != journal_device: # this means that the device has changed, so it must be updated # on the API to reflect this lv.set_tags({'ceph.journal_device': journal_device})
def volumes(monkeypatch): monkeypatch.setattr('ceph_volume.process.call', lambda x: ('', '', 0)) volumes = lvm_api.Volumes() volumes._purge() return volumes
def zap(self, args): for device in args.devices: if disk.is_mapper_device(device): terminal.error( "Refusing to zap the mapper device: {}".format(device)) raise SystemExit(1) lv = api.get_lv_from_argument(device) if lv: # we are zapping a logical volume path = lv.lv_path self.unmount_lv(lv) else: # we are zapping a partition #TODO: ensure device is a partition path = device # check to if it is encrypted to close partuuid = disk.get_partuuid(device) if encryption.status("/dev/mapper/{}".format(partuuid)): dmcrypt_uuid = partuuid self.dmcrypt_close(dmcrypt_uuid) mlogger.info("Zapping: %s", path) # check if there was a pv created with the # name of device pvs = api.PVolumes() pvs.filter(pv_name=device) vgs = set([pv.vg_name for pv in pvs]) for pv in pvs: vg_name = pv.vg_name lv = None if pv.lv_uuid: lv = api.get_lv(vg_name=vg_name, lv_uuid=pv.lv_uuid) if lv: self.unmount_lv(lv) if args.destroy: for vg_name in vgs: mlogger.info( "Destroying volume group %s because --destroy was given", vg_name) api.remove_vg(vg_name) if not lv: mlogger.info( "Destroying physical volume %s because --destroy was given", device) api.remove_pv(device) wipefs(path) zap_data(path) if lv and not pvs: if args.destroy: lvs = api.Volumes() lvs.filter(vg_name=lv.vg_name) if len(lvs) <= 1: mlogger.info( 'Only 1 LV left in VG, will proceed to destroy volume group %s', lv.vg_name) api.remove_vg(lv.vg_name) else: mlogger.info( 'More than 1 LV left in VG, will proceed to destroy LV only' ) mlogger.info( 'Removing LV because --destroy was given: %s', lv) api.remove_lv(lv) else: # just remove all lvm metadata, leaving the LV around lv.clear_tags() terminal.success("Zapping successful for: %s" % ", ".join(args.devices))
def volumes(monkeypatch): monkeypatch.setattr(process, 'call', lambda x: ('', '', 0)) volumes = api.Volumes() volumes._purge() return volumes