def test_report_a_ceph_lv_with_multiple_pvs_of_same_name( self, pvolumes, monkeypatch): tags = 'ceph.osd_id=0,ceph.journal_uuid=x,ceph.type=data' lv = api.Volume(lv_name='lv', vg_name='VolGroup', lv_uuid='aaaa', lv_path='/dev/VolGroup/lv', lv_tags=tags) monkeypatch.setattr(api, 'get_lv_from_argument', lambda device: None) monkeypatch.setattr(api, 'get_lv', lambda vg_name: lv) FooPVolume = api.PVolume(vg_name="vg", pv_name='/dev/sda', pv_uuid="0000", pv_tags={}, lv_uuid="aaaa") BarPVolume = api.PVolume(vg_name="vg", pv_name='/dev/sda', pv_uuid="0000", pv_tags={}) pvolumes.append(FooPVolume) pvolumes.append(BarPVolume) monkeypatch.setattr(api, 'PVolumes', lambda: pvolumes) listing = lvm.listing.List([]) result = listing.single_report('/dev/sda') assert result['0'][0]['name'] == 'lv' assert result['0'][0]['lv_tags'] == tags assert result['0'][0]['path'] == '/dev/VolGroup/lv' assert len(result) == 1
def test_multiple_pvs_is_matched_by_name(self, pvolumes, monkeypatch): FooPVolume = api.PVolume(vg_name="vg", pv_name='/dev/sda', pv_uuid="0000", pv_tags={}, lv_uuid="0000000") BarPVolume = api.PVolume(vg_name="vg", pv_name='/dev/sda', pv_uuid="0000", pv_tags={}) pvolumes.append(FooPVolume) pvolumes.append(BarPVolume) monkeypatch.setattr(api, 'PVolumes', lambda: pvolumes) assert api.get_pv(pv_name='/dev/sda') == FooPVolume
def test_no_common_vg(self, volumes, stub_vgs, fakedevice, factory, conf_ceph): # fast PV, because ssd is an LVM member CephPV1 = lvm.PVolume(vg_name='fast1', pv_name='/dev/sda', pv_tags='') CephPV2 = lvm.PVolume(vg_name='fast2', pv_name='/dev/sdb', pv_tags='') ssd1 = fakedevice( used_by_ceph=False, is_lvm_member=True, rotational=False, sys_api=dict(size=6073740000), pvs_api=[CephPV1] ) ssd2 = fakedevice( used_by_ceph=False, is_lvm_member=True, rotational=False, sys_api=dict(size=6073740000), pvs_api=[CephPV2] ) hdd = fakedevice(used_by_ceph=False, is_lvm_member=False, rotational=True, sys_api=dict(size=6073740000)) # when get_api_vgs() gets called, it will return this one VG stub_vgs([ dict( vg_free='7g', vg_name='fast1', lv_name='foo', lv_path='/dev/vg/fast1', lv_tags="ceph.type=data" ), dict( vg_free='7g', vg_name='fast2', lv_name='foo', lv_path='/dev/vg/fast2', lv_tags="ceph.type=data" ) ]) conf_ceph(get_safe=lambda *a: '5120') args = factory(filtered_devices=[], osds_per_device=1, journal_size=None, osd_ids=[]) devices = [ssd1, ssd2, hdd] with pytest.raises(RuntimeError) as error: filestore.MixedType.with_auto_devices(args, devices) assert 'Could not find a common VG between devices' in str(error.value)
def test_report_a_ceph_lv_with_devices(self, monkeypatch): pvolumes = [] tags = 'ceph.osd_id=0,ceph.type=data' pv1 = api.PVolume(vg_name="VolGroup", pv_name='/dev/sda1', pv_uuid='', pv_tags={}, lv_uuid="aaaa") pv2 = api.PVolume(vg_name="VolGroup", pv_name='/dev/sdb1', pv_uuid='', pv_tags={}, lv_uuid="aaaa") pvolumes.append(pv1) pvolumes.append(pv2) volumes = [] lv = api.Volume(lv_name='lv', vg_name='VolGroup',lv_uuid='aaaa', lv_path='/dev/VolGroup/lv', lv_tags=tags) volumes.append(lv) monkeypatch.setattr(lvm.listing.api, 'get_pvs', lambda **kwargs: pvolumes) monkeypatch.setattr(lvm.listing.api, 'get_lvs', lambda **kwargs: volumes) listing = lvm.listing.List([]) listing._pvs = [ {'lv_uuid': 'aaaa', 'pv_name': '/dev/sda1', 'pv_tags': '', 'pv_uuid': ''}, {'lv_uuid': 'aaaa', 'pv_name': '/dev/sdb1', 'pv_tags': '', 'pv_uuid': ''}, ] result = listing.single_report('VolGroup/lv') assert result['0'][0]['name'] == 'lv' assert result['0'][0]['lv_tags'] == tags assert result['0'][0]['path'] == '/dev/VolGroup/lv' assert result['0'][0]['devices'] == ['/dev/sda1', '/dev/sdb1']
def test_multiple_pvs_is_matched_by_tags(self, pvolumes, monkeypatch): FooPVolume = api.PVolume(vg_name="vg1", pv_name='/dev/sdc', pv_uuid="1000", pv_tags="ceph.foo=bar", lv_uuid="0000000") BarPVolume = api.PVolume(vg_name="vg", pv_name='/dev/sda', pv_uuid="0000", pv_tags="ceph.foo=bar") pvolumes.append(FooPVolume) pvolumes.append(BarPVolume) monkeypatch.setattr(api, 'PVolumes', lambda: pvolumes) with pytest.raises(exceptions.MultiplePVsError): api.get_pv(pv_tags={"ceph.foo": "bar"})
def test_get_single_pv_multiple_matches_raises_runtimeerror(self, m_get_pvs): fake_pvs = [] fake_pvs.append(api.PVolume(pv_name='/dev/sda', pv_tags={})) fake_pvs.append(api.PVolume(pv_name='/dev/sdb', pv_tags={})) m_get_pvs.return_value = fake_pvs with pytest.raises(RuntimeError) as e: api.get_single_pv() assert "matched more than 1 PV present on this host." in str(e.value)
def test_get_first_pv(self, monkeypatch): pv1 = api.PVolume(pv_name='/dev/sda', pv_uuid='0000', pv_tags={}, vg_name='vg1') pv2 = api.PVolume(pv_name='/dev/sdb', pv_uuid='0001', pv_tags={}, vg_name='vg2') stdout = ['{};{};{};{};;'.format(pv1.pv_name, pv1.pv_tags, pv1.pv_uuid, pv1.vg_name), '{};{};{};{};;'.format(pv2.pv_name, pv2.pv_tags, pv2.pv_uuid, pv2.vg_name)] monkeypatch.setattr(api.process, 'call', lambda x,**kw: (stdout, '', 0)) pv_ = api.get_first_pv() assert isinstance(pv_, api.PVolume) assert pv_.pv_name == pv1.pv_name
def test_get_pvs(self, monkeypatch): pv1 = api.PVolume(pv_name='/dev/sda', pv_uuid='0000', pv_tags={}, vg_name='vg1') pv2 = api.PVolume(pv_name='/dev/sdb', pv_uuid='0001', pv_tags={}, vg_name='vg2') pvs = [pv1, pv2] stdout = ['{};{};{};{};;'.format(pv1.pv_name, pv1.pv_tags, pv1.pv_uuid, pv1.vg_name), '{};{};{};{};;'.format(pv2.pv_name, pv2.pv_tags, pv2.pv_uuid, pv2.vg_name)] monkeypatch.setattr(api.process, 'call', lambda x,**kw: (stdout, '', 0)) pvs_ = api.get_pvs() assert len(pvs_) == len(pvs) for pv, pv_ in zip(pvs, pvs_): assert pv_.pv_name == pv.pv_name
def test_ceph_journal_lv_reported(self, pvolumes, volumes, monkeypatch): tags = 'ceph.osd_id=0,ceph.journal_uuid=x,ceph.type=data' journal_tags = 'ceph.osd_id=0,ceph.journal_uuid=x,ceph.type=journal' pv = api.PVolume(pv_name='/dev/sda1', pv_tags={}, pv_uuid="0000", vg_name="VolGroup", lv_uuid="aaaa") osd = api.Volume(lv_name='volume1', lv_uuid='y', lv_tags=tags, lv_path='/dev/VolGroup/lv', vg_name='VolGroup') journal = api.Volume(lv_name='journal', lv_uuid='x', lv_tags=journal_tags, lv_path='/dev/VolGroup/journal', vg_name='VolGroup') pvolumes.append(pv) volumes.append(osd) volumes.append(journal) monkeypatch.setattr(lvm.listing.api, 'get_pvs', lambda **kwargs: pvolumes) monkeypatch.setattr(lvm.listing.api, 'get_lvs', lambda **kwargs: volumes) result = lvm.listing.List([]).full_report() assert result['0'][0]['name'] == 'volume1' assert result['0'][1]['name'] == 'journal'
def test_used_by_ceph(self, device_info, pvolumes, pvolumes_empty, monkeypatch, ceph_type): FooPVolume = api.PVolume(pv_name='/dev/sda', pv_uuid="0000", lv_uuid="0000", pv_tags={}, vg_name="vg") pvolumes.append(FooPVolume) monkeypatch.setattr(api, 'PVolumes', lambda populate=True: pvolumes if populate else pvolumes_empty) data = {"/dev/sda": {"foo": "bar"}} lsblk = {"TYPE": "part"} lv_data = { "lv_path": "vg/lv", "vg_name": "vg", "lv_uuid": "0000", "tags": { "ceph.osd_id": 0, "ceph.type": ceph_type } } device_info(devices=data, lsblk=lsblk, lv=lv_data) vg = api.VolumeGroup(vg_name='foo/bar', vg_free_count=6, vg_extent_size=1073741824) monkeypatch.setattr(api, 'get_device_vgs', lambda x: [vg]) disk = device.Device("/dev/sda") assert disk.used_by_ceph
def test_report_a_ceph_journal_device(self, volumes, pvolumes, monkeypatch): # ceph lvs are detected by looking into its tags tags = 'ceph.osd_id=0,ceph.journal_uuid=x,ceph.type=journal,' + \ 'ceph.journal_device=/dev/sda1' pv = api.PVolume(pv_name='/dev/sda1', pv_uuid="0000", pv_tags={}, vg_name="VolGroup", lv_uuid="aaaa") lv = api.Volume(lv_name='lv', lv_uuid='aaa', lv_tags=tags, lv_path='/dev/VolGroup/lv', vg_name='VolGroup') pvolumes.append(pv) volumes.append(lv) monkeypatch.setattr(lvm.listing.api, 'get_pvs', lambda **kwargs: pvolumes) monkeypatch.setattr(lvm.listing.api, 'get_lvs', lambda **kwargs: volumes) result = lvm.listing.List([]).single_report('/dev/sda1') assert result['0'][0]['tags'] == {'PARTUUID': 'x'} assert result['0'][0]['type'] == 'journal' assert result['0'][0]['path'] == '/dev/sda1'
def test_ssd_is_lvm_member_doesnt_fail(self, volumes, stub_vgs, fakedevice, factory, conf_ceph): # fast PV, because ssd is an LVM member CephPV = lvm.PVolume(vg_name='fast', pv_name='/dev/sda', pv_tags='') ssd = fakedevice(used_by_ceph=False, is_lvm_member=True, rotational=False, sys_api=dict(size=6073740000), pvs_api=[CephPV]) hdd = fakedevice(used_by_ceph=False, is_lvm_member=False, rotational=True, sys_api=dict(size=6073740000)) # when get_api_vgs() gets called, it will return this one VG stub_vgs([ dict(vg_free='7g', vg_name='fast', lv_name='foo', lv_path='/dev/vg/foo', lv_tags="ceph.type=data") ]) conf_ceph(get_safe=lambda *a: '5120') args = factory(filtered_devices=[], osds_per_device=1, journal_size=None) devices = [ssd, hdd] result = filestore.MixedType(devices, args).computed['osds'][0] assert result['journal']['path'] == 'vg: fast' assert result['journal']['percentage'] == 71 assert result['journal']['human_readable_size'] == '5.00 GB'
def test_single_pv_is_matched_by_uuid(self, pvolumes, monkeypatch): FooPVolume = api.PVolume( pv_name='/dev/vg/foo', pv_uuid='1111', pv_tags="ceph.type=data") pvolumes.append(FooPVolume) monkeypatch.setattr(api, 'PVolumes', lambda: pvolumes) assert api.get_pv(pv_uuid='1111') == FooPVolume
def test_single_pv_is_matched(self, pvolumes, monkeypatch): FooPVolume = api.PVolume(pv_name='/dev/sda', pv_uuid="0000", pv_tags={}) pvolumes.append(FooPVolume) monkeypatch.setattr(api, 'PVolumes', lambda: pvolumes) assert api.get_pv(pv_uuid='0000') == FooPVolume
def test_used_by_ceph(self, device_info, monkeypatch, ceph_type): data = {"/dev/sda": {"foo": "bar"}} lsblk = {"TYPE": "part"} FooPVolume = api.PVolume(pv_name='/dev/sda', pv_uuid="0000", lv_uuid="0000", pv_tags={}, vg_name="vg") pvolumes = [] pvolumes.append(FooPVolume) lv_data = { "lv_name": "lv", "lv_path": "vg/lv", "vg_name": "vg", "lv_uuid": "0000", "lv_tags": "ceph.osd_id=0,ceph.type=" + ceph_type } volumes = [] lv = api.Volume(**lv_data) volumes.append(lv) monkeypatch.setattr(api, 'get_pvs', lambda **kwargs: pvolumes) monkeypatch.setattr(api, 'get_lvs', lambda **kwargs: deepcopy(volumes)) device_info(devices=data, lsblk=lsblk, lv=lv_data) vg = api.VolumeGroup(vg_name='foo/bar', vg_free_count=6, vg_extent_size=1073741824) monkeypatch.setattr(api, 'get_device_vgs', lambda x: [vg]) disk = device.Device("/dev/sda") assert disk.used_by_ceph
def test_not_used_by_ceph(self, device_info, pvolumes, pvolumes_empty, monkeypatch): FooPVolume = api.PVolume(pv_name='/dev/sda', pv_uuid="0000", lv_uuid="0000", pv_tags={}, vg_name="vg") pvolumes.append(FooPVolume) monkeypatch.setattr(api, 'PVolumes', lambda populate=True: pvolumes if populate else pvolumes_empty) data = {"/dev/sda": {"foo": "bar"}} lsblk = {"TYPE": "part"} lv_data = { "lv_path": "vg/lv", "vg_name": "vg", "lv_uuid": "0000", "tags": { "ceph.osd_id": 0, "ceph.type": "journal" } } device_info(devices=data, lsblk=lsblk, lv=lv_data) disk = device.Device("/dev/sda") assert not disk.used_by_ceph
def test_vgs_is_not_empty(self, device_info, pvolumes, monkeypatch): BarPVolume = api.PVolume(vg_name='foo', lv_uuid='111', pv_name='/dev/nvme0n1', pv_uuid="0000", pv_tags={}) pvolumes.append(BarPVolume) monkeypatch.setattr(api, 'PVolumes', lambda: pvolumes) lsblk = {"TYPE": "disk"} device_info(lsblk=lsblk) disk = device.Device("/dev/nvme0n1") assert len(disk.vgs) == 1
def test_vg_name_is_set(self, pvolumes, monkeypatch): FooPVolume = api.PVolume( pv_name='/dev/vg/foo', pv_uuid='1111', pv_tags="ceph.type=data", vg_name="vg") pvolumes.append(FooPVolume) monkeypatch.setattr(api, 'PVolumes', lambda: pvolumes) pv = api.get_pv(pv_name="/dev/vg/foo") assert pv.vg_name == "vg"
def test_filter_by_tags_matches(self, pvolumes, monkeypatch): pv_tags = "ceph.type=journal,ceph.osd_id=1" FooPVolume = api.PVolume( pv_name='/dev/vg/foo', pv_uuid='1111', pv_tags=pv_tags) pvolumes.append(FooPVolume) pvolumes.filter(pv_tags={'ceph.type': 'journal', 'ceph.osd_id': '1'}) assert pvolumes == [FooPVolume]
def test_filter_by_tag_does_not_match_one(self, pvolumes, monkeypatch): pv_tags = "ceph.type=journal,ceph.osd_id=1,ceph.fsid=000-aaa" FooPVolume = api.PVolume( pv_name='/dev/vg/foo', pv_uuid='1111', pv_tags=pv_tags) pvolumes.append(FooPVolume) pvolumes.filter(pv_tags={'ceph.type': 'journal', 'ceph.osd_id': '2'}) assert pvolumes == []
def test_get_single_pv_one_match(self, m_get_pvs): fake_pvs = [] fake_pvs.append(api.PVolume(pv_name='/dev/sda', pv_tags={})) m_get_pvs.return_value = fake_pvs pv = api.get_single_pv() assert isinstance(pv, api.PVolume) assert pv.name == '/dev/sda'
def test_pv_api(self, device_info, pvolumes, monkeypatch): FooPVolume = api.PVolume(pv_name='/dev/sda', pv_uuid="0000", lv_uuid="0000", pv_tags={}, vg_name="vg") pvolumes.append(FooPVolume) monkeypatch.setattr(api, 'PVolumes', lambda: pvolumes) data = {"/dev/sda": {"foo": "bar"}} lsblk = {"TYPE": "part"} device_info(devices=data, lsblk=lsblk) disk = device.Device("/dev/sda") assert disk.pvs_api
def test_get_pvs_single_pv(self, monkeypatch): pv1 = api.PVolume(pv_name='/dev/sda', pv_uuid='0000', pv_tags={}, vg_name='vg1') pvs = [pv1] stdout = ['{};;;;;;'.format(pv1.pv_name)] monkeypatch.setattr(api.process, 'call', lambda x,**kw: (stdout, '', 0)) pvs_ = api.get_pvs() assert len(pvs_) == 1 assert pvs_[0].pv_name == pvs[0].pv_name
def test_vgs_is_empty(self, device_info, pvolumes, pvolumes_empty, monkeypatch): BarPVolume = api.PVolume(pv_name='/dev/sda', pv_uuid="0000", pv_tags={}) pvolumes.append(BarPVolume) lsblk = {"TYPE": "disk"} device_info(lsblk=lsblk) monkeypatch.setattr(api, 'get_pvs', lambda **kwargs: {}) disk = device.Device("/dev/nvme0n1") assert disk.vgs == []
def test_not_used_by_ceph(self, fake_call, device_info, monkeypatch): FooPVolume = api.PVolume(pv_name='/dev/sda', pv_uuid="0000", lv_uuid="0000", pv_tags={}, vg_name="vg") pvolumes = [] pvolumes.append(FooPVolume) data = {"/dev/sda": {"foo": "bar"}} lsblk = {"TYPE": "part", "PKNAME": "sda"} lv_data = {"lv_path": "vg/lv", "vg_name": "vg", "lv_uuid": "0000", "tags": {"ceph.osd_id": 0, "ceph.type": "journal"}} monkeypatch.setattr(api, 'get_pvs', lambda **kwargs: pvolumes) device_info(devices=data, lsblk=lsblk, lv=lv_data) disk = device.Device("/dev/sda") assert not disk.used_by_ceph
def test_vgs_is_empty(self, device_info, pvolumes, pvolumes_empty, monkeypatch): BarPVolume = api.PVolume(pv_name='/dev/sda', pv_uuid="0000", pv_tags={}) pvolumes.append(BarPVolume) monkeypatch.setattr(api, 'PVolumes', lambda populate=True: pvolumes if populate else pvolumes_empty) lsblk = {"TYPE": "disk"} device_info(lsblk=lsblk) disk = device.Device("/dev/nvme0n1") assert disk.vgs == []
def test_physical_wal_gets_reported(self, pvolumes, volumes, monkeypatch): tags = 'ceph.osd_id=0,ceph.wal_uuid=x,ceph.type=wal' pv = api.PVolume(pv_name='/dev/sda1', pv_tags={}, pv_uuid="0000", vg_name="VolGroup", lv_uuid="aaaa") osd = api.Volume(lv_name='volume1', lv_uuid='y', lv_tags=tags, lv_path='/dev/VolGroup/lv', vg_name="VolGroup") pvolumes.append(pv) volumes.append(osd) monkeypatch.setattr(lvm.listing.api, 'get_pvs', lambda **kwargs: pvolumes) monkeypatch.setattr(lvm.listing.api, 'get_lvs', lambda **kwargs: volumes) result = lvm.listing.List([]).full_report() assert result['0'][1]['path'] == '/dev/sda1' assert result['0'][1]['tags'] == {'PARTUUID': 'x'} assert result['0'][1]['type'] == 'wal'