def expected_locks(self, subchain): img_ns = rm.getNamespace(sc.IMAGE_NAMESPACE, subchain.sd_id) return [ rm.Lock(sc.STORAGE, subchain.sd_id, rm.SHARED), rm.Lock(img_ns, subchain.img_id, rm.EXCLUSIVE), volume.VolumeLease(subchain.host_id, subchain.sd_id, subchain.img_id, subchain.base_id) ]
def locks(self): img_ns = rm.getNamespace(sc.IMAGE_NAMESPACE, self.sd_id) ret = [rm.Lock(sc.STORAGE, self.sd_id, rm.SHARED), rm.Lock(img_ns, self.img_id, rm.EXCLUSIVE)] dom = sdCache.produce_manifest(self.sd_id) if dom.hasVolumeLeases(): # We take only the base lease since no other volumes are modified ret.append(volume.VolumeLease(self.host_id, self.sd_id, self.img_id, self.base_id)) return ret
def locks(self): img_ns = rm.getNamespace(sc.IMAGE_NAMESPACE, self.sd_id) mode = rm.EXCLUSIVE if self._writable else rm.SHARED ret = [rm.Lock(sc.STORAGE, self.sd_id, rm.SHARED), rm.Lock(img_ns, self.img_id, mode)] if self._writable: dom = sdCache.produce_manifest(self.sd_id) if dom.hasVolumeLeases(): ret.append(volume.VolumeLease(self._host_id, self.sd_id, self.img_id, self.vol_id)) return ret
def expected_locks(self, base_vol): img_ns = rm.getNamespace(sc.IMAGE_NAMESPACE, base_vol.sdUUID) ret = [ # Domain lock rm.Lock(sc.STORAGE, base_vol.sdUUID, rm.SHARED), # Image lock rm.Lock(img_ns, base_vol.imgUUID, rm.EXCLUSIVE), # Volume lease volume.VolumeLease(0, base_vol.sdUUID, base_vol.imgUUID, base_vol.volUUID) ] return ret
def expected_locks(src_vol, dst_vol): src_img_ns = rm.getNamespace(sc.IMAGE_NAMESPACE, src_vol.sdUUID) dst_img_ns = rm.getNamespace(sc.IMAGE_NAMESPACE, dst_vol.sdUUID) ret = [ # Domain lock for each volume rm.Lock(sc.STORAGE, src_vol.sdUUID, rm.SHARED), rm.Lock(sc.STORAGE, dst_vol.sdUUID, rm.SHARED), # Image lock for each volume, exclusive for the destination rm.Lock(src_img_ns, src_vol.imgUUID, rm.SHARED), rm.Lock(dst_img_ns, dst_vol.imgUUID, rm.EXCLUSIVE), # Volume lease for the destination volume volume.VolumeLease(0, dst_vol.sdUUID, dst_vol.imgUUID, dst_vol.volUUID) ] return ret
def test_repr(self): mode = rm.SHARED lock = rm.Lock('ns', 'name', mode) lock_string = str(lock) assert "Lock" in lock_string assert "ns=ns" in lock_string assert "name=name" in lock_string assert "mode=" + mode in lock_string assert "%x" % id(lock) in lock_string
def locks(self): # A shared lock is always required ret = [rm.Lock(sc.STORAGE, self.sd_id, rm.SHARED)] # An exclusive lock will be taken if source and destination images # are not the same, otherwise there will be a deadlock. if self.lock_image: img_ns = rm.getNamespace(sc.IMAGE_NAMESPACE, self.sd_id) mode = rm.EXCLUSIVE if self._writable else rm.SHARED ret.append(rm.Lock(img_ns, self.img_id, mode)) if self._writable: dom = sdCache.produce_manifest(self.sd_id) if dom.hasVolumeLeases(): ret.append( volume.VolumeLease(self._host_id, self.sd_id, self.img_id, self.vol_id)) return ret
def test_acquire_release(self, monkeypatch): monkeypatch.setattr(rm, "_manager", FakeResourceManager()) lock = rm.Lock('ns_A', 'name_A', rm.SHARED) expected = [] lock.acquire() expected.append(('acquireResource', (lock.ns, lock.name, lock.mode), { "timeout": None })) assert expected == rm._manager.__calls__ lock.release() expected.append(('releaseResource', (lock.ns, lock.name), {})) assert expected == rm._manager.__calls__
def test_with_release_error(self, monkeypatch): monkeypatch.setattr(rm, "_manager", FakeResourceManager()) lock = rm.Lock('ns_A', 'name_A', rm.SHARED) class ReleaseError(Exception): pass def releaseResource(ns, name): raise ReleaseError rm._manager.releaseResource = releaseResource with pytest.raises(ReleaseError): with lock: pass
def test_with_user_error(self, monkeypatch): monkeypatch.setattr(rm, "_manager", FakeResourceManager()) lock = rm.Lock('ns_A', 'name_A', rm.SHARED) class UserError(Exception): pass with pytest.raises(UserError): with lock: raise UserError assert rm._manager.__calls__ == [ ('acquireResource', (lock.ns, lock.name, lock.mode), { "timeout": None }), ('releaseResource', (lock.ns, lock.name), {}) ]
def test_with(self, monkeypatch): monkeypatch.setattr(rm, "_manager", FakeResourceManager()) lock = rm.Lock('ns_A', 'name_A', rm.SHARED) with lock: # Resource should be aquired here. assert rm._manager.__calls__ == [ ('acquireResource', (lock.ns, lock.name, lock.mode), { "timeout": None }) ] rm._manager.__calls__.clear() # Resource should be released here. assert rm._manager.__calls__ == [('releaseResource', (lock.ns, lock.name), {})]
def test_mode_ignored_for_sorting(self): a = rm.Lock('nsA', 'nameA', 'modeA') b = rm.Lock('nsA', 'nameA', 'modeB') assert not a < b assert not b < a
def test_mode_used_for_equality(self): a = rm.Lock('nsA', 'nameA', 'modeA') b = rm.Lock('nsA', 'nameA', 'modeB') assert a != b
def test_equality(self): a = rm.Lock('ns', 'name', 'mode') b = rm.Lock('ns', 'name', 'mode') assert a == b
def test_less_than(self, a, b): b = rm.Lock(*b) a = rm.Lock(*a) assert a < b
def test_properties(self): a = rm.Lock('ns', 'name', 'mode') assert a.ns == 'ns' assert a.name == 'name' assert a.mode == 'mode'