def _set_lvm_membership(self): if self._is_lvm_member is None: # this is contentious, if a PV is recognized by LVM but has no # VGs, should we consider it as part of LVM? We choose not to # here, because most likely, we need to use VGs from this PV. self._is_lvm_member = False for path in self._get_pv_paths(): vgs = lvm.get_device_vgs(path) if vgs: self.vgs.extend(vgs) # a pv can only be in one vg, so this should be safe # FIXME: While the above assumption holds, sda1 and sda2 # can each host a PV and VG. I think the vg_name property is # actually unused (not 100% sure) and can simply be removed self.vg_name = vgs[0] self._is_lvm_member = True self.lvs.extend(lvm.get_device_lvs(path)) return self._is_lvm_member
def has_common_vg(ssd_devices): """ Ensure that devices have a common VG between them """ msg = 'Could not find a common VG between devices: %s' ssd_vgs = {} for ssd_device in ssd_devices: vgs = lvm.get_device_vgs(ssd_device.abspath) if not vgs: continue for vg in vgs: try: ssd_vgs[vg.name].append(ssd_device.abspath) except KeyError: ssd_vgs[vg.name] = [ssd_device.abspath] # len of 1 means they all have a common vg, and len of 0 means that these # are blank if len(ssd_vgs) <= 1: return raise RuntimeError(msg % ', '.join(ssd_vgs.keys()))
def test_get_device_vgs_with_empty_pv(self, patched_output_parser, pcall): patched_output_parser.return_value = [{'vg_name': ''}] pcall.return_value = ('', '', '') vgs = api.get_device_vgs('/dev/foo') assert vgs == []
def get_common_vg(self, devs): # find all the vgs associated with the current device for dev in devs: vgs = get_device_vgs(dev.abspath) if vgs: return vgs[0]