def testRequestInvalidResource(self, tmp_manager): with pytest.raises(se.InvalidResourceName) as e: rm.acquireResource("storage", "DOT.DOT", rm.SHARED) assert "DOT.DOT" in str(e) with pytest.raises(ValueError): rm.acquireResource("DOT.DOT", "resource", rm.SHARED)
def testAcquireNonExistingResource(self): try: rm.acquireResource("null", "resource", rm.EXCLUSIVE) except KeyError: return self.fail("Managed to get status on a non existing resource")
def testResourceStatuses(self, tmp_manager): status = rm._getResourceStatus("storage", "resource") assert status == rm.STATUS_FREE exclusive1 = rm.acquireResource("storage", "resource", rm.EXCLUSIVE) status = rm._getResourceStatus("storage", "resource") assert status == rm.STATUS_LOCKED exclusive1.release() shared1 = rm.acquireResource("storage", "resource", rm.SHARED) status = rm._getResourceStatus("storage", "resource") assert status == rm.STATUS_SHARED shared1.release() with pytest.raises(KeyError): status = rm._getResourceStatus("null", "resource")
def testResourceStatuses(self): status = rm._getResourceStatus("storage", "resource") assert status == rm.LockState.free exclusive1 = rm.acquireResource("storage", "resource", rm.EXCLUSIVE) status = rm._getResourceStatus("storage", "resource") assert status == rm.LockState.locked exclusive1.release() shared1 = rm.acquireResource("storage", "resource", rm.SHARED) status = rm._getResourceStatus("storage", "resource") assert status == rm.LockState.shared shared1.release() with pytest.raises( KeyError, message="Managed to get status on a non existing resource"): status = rm._getResourceStatus("null", "resource")
def testResourceWrapper(self, tmp_manager): s = six.StringIO with rm.acquireResource("string", "test", rm.EXCLUSIVE) as resource: for attr in dir(s): if attr == "close": continue assert hasattr(resource, attr)
def testResourceWrapper(self): s = StringIO with rm.acquireResource("string", "test", rm.EXCLUSIVE) as resource: for attr in dir(s): if attr == "close": continue self.assertTrue(hasattr(resource, attr))
def testAccessAttributeNotExposedByWrapper(self): with rm.acquireResource("string", "test", rm.EXCLUSIVE) as resource: with pytest.raises( AttributeError, message= "Managed to access an attribute not exposed by wrapper"): resource.THERE_IS_NO_WAY_I_EXIST
def testResourceInvalidation(self): resource = rm.acquireResource("string", "test", rm.EXCLUSIVE) try: resource.write("dsada") except: self.fail() resource.release() self.assertRaises(Exception, resource.write, "test")
def testResourceStatuses(self): self.assertEqual(rm._getResourceStatus("storage", "resource"), rm.LockState.free) exclusive1 = rm.acquireResource("storage", "resource", rm.EXCLUSIVE) self.assertEqual(rm._getResourceStatus("storage", "resource"), rm.LockState.locked) exclusive1.release() shared1 = rm.acquireResource("storage", "resource", rm.SHARED) self.assertEqual(rm._getResourceStatus("storage", "resource"), rm.LockState.shared) shared1.release() try: self.assertEqual(rm._getResourceStatus("null", "resource"), rm.LockState.free) except KeyError: return self.fail("Managed to get status on a non existing resource")
def testRequestWithBadCallbackOnCancel(self): def callback(req, res): raise Exception("BUY MILK!") blocker = rm.acquireResource("string", "resource", rm.EXCLUSIVE) req = rm._registerResource( "string", "resource", rm.EXCLUSIVE, callback) req.cancel() blocker.release()
def testRequestWithBadCallbackOnCancel(self, tmp_manager): def callback(req, res): raise Exception("BUY MILK!") blocker = rm.acquireResource("string", "resource", rm.EXCLUSIVE) req = rm._registerResource("string", "resource", rm.EXCLUSIVE, callback) req.cancel() blocker.release()
def testAccessAttributeNotExposedByWrapper(self): with rm.acquireResource("string", "test", rm.EXCLUSIVE) as resource: try: resource.THERE_IS_NO_WAY_I_EXIST except AttributeError: return except Exception as ex: self.fail("Wrong exception was raised. " "Expected AttributeError got %s", ex.__class__.__name__) self.fail("Managed to access an attribute not exposed by wrapper")
def _run(self): sd_manifest = sdCache.produce_manifest(self.params.sd_id) if not sd_manifest.supports_device_reduce(): raise se.StorageDomainVersionError( "move device not supported for domain version %s" % sd_manifest.getVersion()) # TODO: we assume at this point that the domain isn't active and can't # be activated - we need to ensure that. with rm.acquireResource(STORAGE, self.params.sd_id, rm.EXCLUSIVE): with sd_manifest.domain_id(self.host_id), \ sd_manifest.domain_lock(self.host_id): sd_manifest.movePV(self.params.src_guid, self.params.dst_guids)
def _run(self): sd_manifest = sdCache.produce_manifest(self.params.sd_id) if not sd_manifest.supports_device_reduce(): raise se.UnsupportedOperation( "Storage domain does not support reduce operation", sdUUID=sd_manifest.sdUUID(), sdType=sd_manifest.getStorageType()) # TODO: we assume at this point that the domain isn't active and can't # be activated - we need to ensure that. with rm.acquireResource(STORAGE, self.params.sd_id, rm.EXCLUSIVE): with sd_manifest.domain_id(self.host_id), \ sd_manifest.domain_lock(self.host_id): sd_manifest.reduceVG(self.params.guid)
def testFailCreateAfterSwitch(self): resources = [] def callback(req, res): resources.append(res) exclusive1 = rm.acquireResource( "failAfterSwitch", "resource", rm.EXCLUSIVE) sharedReq1 = rm._registerResource( "failAfterSwitch", "resource", rm.SHARED, callback) exclusive1.release() self.assertTrue(sharedReq1.canceled()) self.assertEqual(resources[0], None)
def testFailCreateAfterSwitch(self, tmp_manager): resources = [] def callback(req, res): resources.append(res) exclusive1 = rm.acquireResource("failAfterSwitch", "resource", rm.EXCLUSIVE) sharedReq1 = rm._registerResource("failAfterSwitch", "resource", rm.SHARED, callback) exclusive1.release() assert sharedReq1.canceled() assert resources[0] is None
def llPrepare(self, rw=False, setrw=False): """ Perform low level volume use preparation For the Block Volumes the actual LV activation is wrapped into lvmActivation resource. It is being initialized by the storage domain sitting on top of the encapsulating VG. We just use it here. """ if setrw: self.setrw(rw=rw) access = rm.EXCLUSIVE if rw else rm.SHARED activation = rm.acquireResource(self.lvmActivationNamespace, self.volUUID, access) activation.autoRelease = False
def _run(self): vol_format = sc.name2type(self.vol_info.vol_format) with self.sd_manifest.domain_lock(self.host_id): image_res_ns = sd.getNamespace(sc.IMAGE_NAMESPACE, self.sd_manifest.sdUUID) with rm.acquireResource(image_res_ns, self.vol_info.img_id, rm.EXCLUSIVE): artifacts = self.sd_manifest.get_volume_artifacts( self.vol_info.img_id, self.vol_info.vol_id) artifacts.create( self.vol_info.virtual_size, vol_format, self.vol_info.disk_type, self.vol_info.description, self.vol_info.parent, self.vol_info.initial_size) artifacts.commit()
def testRequestRecancel(self): resources = [] def callback(req, res): resources.insert(0, res) blocker = rm.acquireResource("string", "resource", rm.EXCLUSIVE) req = rm._registerResource( "string", "resource", rm.EXCLUSIVE, callback) req.cancel() self.assertRaises(rm.RequestAlreadyProcessedError, req.cancel) blocker.release()
def _run(self): vol_format = sc.name2type(self.vol_info.vol_format) with self.sd_manifest.domain_lock(self.host_id): image_res_ns = rm.getNamespace(sc.IMAGE_NAMESPACE, self.sd_manifest.sdUUID) with rm.acquireResource(image_res_ns, self.vol_info.img_id, rm.EXCLUSIVE): artifacts = self.sd_manifest.get_volume_artifacts( self.vol_info.img_id, self.vol_info.vol_id) artifacts.create(self.vol_info.virtual_size, vol_format, self.vol_info.disk_type, self.vol_info.description, self.vol_info.parent, self.vol_info.initial_size) artifacts.commit()
def testResourceAutorelease(self, tmp_manager): log.info("Acquiring resource", extra={'resource': "bob"}) res = rm.acquireResource("storage", "resource", rm.SHARED) resProxy = proxy(res) res = None # wait for object to die log.info("Waiting for request") try: while True: resProxy.granted() except: pass log.info("Waiting for autoclean") while True: resStatus = rm._getResourceStatus("storage", "resource") if resStatus == rm.STATUS_FREE: break time.sleep(1)
def testResourceAutorelease(self): self.log.info("Acquiring resource", extra={'resource': "bob"}) res = rm.acquireResource("storage", "resource", rm.SHARED) resProxy = proxy(res) res = None # wait for object to die self.log.info("Waiting for request") try: while True: resProxy.granted() except: pass self.log.info("Waiting for autoclean") while True: resStatus = rm._getResourceStatus("storage", "resource") if resStatus == rm.LockState.free: break time.sleep(1)
def testCancelExclusiveBetweenShared(self, tmp_manager): resources = [] def callback(req, res): resources.insert(0, res) exclusive1 = rm.acquireResource("string", "resource", rm.EXCLUSIVE) sharedReq1 = rm._registerResource("string", "resource", rm.SHARED, callback) sharedReq2 = rm._registerResource("string", "resource", rm.SHARED, callback) exclusiveReq1 = rm._registerResource("string", "resource", rm.EXCLUSIVE, callback) sharedReq3 = rm._registerResource("string", "resource", rm.SHARED, callback) sharedReq4 = rm._registerResource("string", "resource", rm.SHARED, callback) assert not sharedReq1.granted() assert not sharedReq2.granted() assert not exclusiveReq1.granted() assert not sharedReq3.granted() assert not sharedReq4.granted() exclusiveReq1.cancel() resources.pop() assert not sharedReq1.granted() assert not sharedReq2.granted() assert not exclusiveReq1.granted() assert exclusiveReq1.canceled() assert not sharedReq3.granted() assert not sharedReq4.granted() exclusive1.release() assert sharedReq1.granted() assert sharedReq2.granted() assert sharedReq3.granted() assert sharedReq4.granted() while len(resources) > 0: resources.pop().release()
def testCancelExclusiveBetweenShared(self): resources = [] def callback(req, res): resources.insert(0, res) exclusive1 = rm.acquireResource("string", "resource", rm.EXCLUSIVE) sharedReq1 = rm._registerResource( "string", "resource", rm.SHARED, callback) sharedReq2 = rm._registerResource( "string", "resource", rm.SHARED, callback) exclusiveReq1 = rm._registerResource( "string", "resource", rm.EXCLUSIVE, callback) sharedReq3 = rm._registerResource( "string", "resource", rm.SHARED, callback) sharedReq4 = rm._registerResource( "string", "resource", rm.SHARED, callback) self.assertFalse(sharedReq1.granted()) self.assertFalse(sharedReq2.granted()) self.assertFalse(exclusiveReq1.granted()) self.assertFalse(sharedReq3.granted()) self.assertFalse(sharedReq4.granted()) exclusiveReq1.cancel() resources.pop() self.assertFalse(sharedReq1.granted()) self.assertFalse(sharedReq2.granted()) self.assertFalse(exclusiveReq1.granted()) self.assertTrue(exclusiveReq1.canceled()) self.assertFalse(sharedReq3.granted()) self.assertFalse(sharedReq4.granted()) exclusive1.release() self.assertTrue(sharedReq1.granted()) self.assertTrue(sharedReq2.granted()) self.assertTrue(sharedReq3.granted()) self.assertTrue(sharedReq4.granted()) while len(resources) > 0: resources.pop().release()
def testResourceLockSwitch(self, namespace="string"): resources = [] def callback(req, res): resources.insert(0, res) exclusive1 = rm.acquireResource(namespace, "resource", rm.EXCLUSIVE) sharedReq1 = rm._registerResource(namespace, "resource", rm.SHARED, callback) sharedReq2 = rm._registerResource(namespace, "resource", rm.SHARED, callback) exclusive2 = rm._registerResource(namespace, "resource", rm.EXCLUSIVE, callback) exclusive3 = rm._registerResource(namespace, "resource", rm.EXCLUSIVE, callback) sharedReq3 = rm._registerResource(namespace, "resource", rm.SHARED, callback) self.assertEqual(exclusive1.read(), "resource:exclusive") exclusive1.release() self.assertEqual(resources[-1].read(), "resource:shared") resources.pop().release() self.assertEqual(resources[-1].read(), "") resources.pop().release() self.assertEqual(resources[-1].read(), "resource:exclusive") resources.pop().release() self.assertEqual(resources[-1].read(), "") resources.pop().release() self.assertEqual(resources[-1].read(), "resource:shared") resources.pop().release() # This part is to stop pyflakes for complaining, the reason I need the # resourcesRefs alive is so that the manage will not autocollect during # the test hash(sharedReq1) hash(sharedReq2) hash(sharedReq3) hash(exclusive2) hash(exclusive3) hash(sharedReq3)
def testResourceLockSwitch(self, namespace="string"): resources = [] def callback(req, res): resources.insert(0, res) exclusive1 = rm.acquireResource(namespace, "resource", rm.EXCLUSIVE) sharedReq1 = rm._registerResource( namespace, "resource", rm.SHARED, callback) sharedReq2 = rm._registerResource( namespace, "resource", rm.SHARED, callback) exclusive2 = rm._registerResource( namespace, "resource", rm.EXCLUSIVE, callback) exclusive3 = rm._registerResource( namespace, "resource", rm.EXCLUSIVE, callback) sharedReq3 = rm._registerResource( namespace, "resource", rm.SHARED, callback) self.assertEqual(exclusive1.read(), "resource:exclusive") exclusive1.release() self.assertEqual(resources[-1].read(), "resource:shared") resources.pop().release() self.assertEqual(resources[-1].read(), "") resources.pop().release() self.assertEqual(resources[-1].read(), "resource:exclusive") resources.pop().release() self.assertEqual(resources[-1].read(), "") resources.pop().release() self.assertEqual(resources[-1].read(), "resource:shared") resources.pop().release() # This part is to stop pyflakes for complaining, the reason I need the # resourcesRefs alive is so that the manage will not autocollect during # the test hash(sharedReq1) hash(sharedReq2) hash(sharedReq3) hash(exclusive2) hash(exclusive3) hash(sharedReq3)
def testAcquireResourceExclusive(self): resources = [] def callback(req, res): resources.append(res) exclusive1 = rm.acquireResource("storage", "resource", rm.EXCLUSIVE) sharedReq1 = rm._registerResource( "storage", "resource", rm.SHARED, callback) sharedReq2 = rm._registerResource( "storage", "resource", rm.SHARED, callback) exclusiveReq1 = rm._registerResource( "storage", "resource", rm.EXCLUSIVE, callback) exclusiveReq2 = rm._registerResource( "storage", "resource", rm.EXCLUSIVE, callback) self.assertFalse(sharedReq1.granted()) self.assertFalse(sharedReq2.granted()) self.assertFalse(exclusiveReq1.granted()) self.assertFalse(exclusiveReq2.granted()) exclusive1.release() self.assertTrue(sharedReq1.granted()) self.assertTrue(sharedReq2.granted()) self.assertFalse(exclusiveReq1.granted()) self.assertFalse(exclusiveReq2.granted()) resources.pop().release() # Shared 1 self.assertFalse(exclusiveReq1.granted()) self.assertFalse(exclusiveReq2.granted()) resources.pop().release() # Shared 2 self.assertTrue(exclusiveReq1.granted()) self.assertFalse(exclusiveReq2.granted()) resources.pop().release() # exclusiveReq 1 self.assertTrue(exclusiveReq2.granted()) resources.pop().release() # exclusiveReq 2
def testAcquireResourceExclusive(self, tmp_manager): resources = [] def callback(req, res): resources.append(res) exclusive1 = rm.acquireResource("storage", "resource", rm.EXCLUSIVE) sharedReq1 = rm._registerResource("storage", "resource", rm.SHARED, callback) sharedReq2 = rm._registerResource("storage", "resource", rm.SHARED, callback) exclusiveReq1 = rm._registerResource("storage", "resource", rm.EXCLUSIVE, callback) exclusiveReq2 = rm._registerResource("storage", "resource", rm.EXCLUSIVE, callback) assert not sharedReq1.granted() assert not sharedReq2.granted() assert not exclusiveReq1.granted() assert not exclusiveReq2.granted() exclusive1.release() assert sharedReq1.granted() assert sharedReq2.granted() assert not exclusiveReq1.granted() assert not exclusiveReq2.granted() resources.pop().release() # Shared 1 assert not exclusiveReq1.granted() assert not exclusiveReq2.granted() resources.pop().release() # Shared 2 assert exclusiveReq1.granted() assert not exclusiveReq2.granted() resources.pop().release() # exclusiveReq 1 assert exclusiveReq2.granted() resources.pop().release() # exclusiveReq 2
def testResourceLockSwitch(self, namespace="string"): resources = [] def callback(req, res): resources.insert(0, res) exclusive1 = rm.acquireResource(namespace, "resource", rm.EXCLUSIVE) sharedReq1 = rm._registerResource( namespace, "resource", rm.SHARED, callback) sharedReq2 = rm._registerResource( namespace, "resource", rm.SHARED, callback) exclusive2 = rm._registerResource( namespace, "resource", rm.EXCLUSIVE, callback) exclusive3 = rm._registerResource( namespace, "resource", rm.EXCLUSIVE, callback) sharedReq3 = rm._registerResource( namespace, "resource", rm.SHARED, callback) self.assertEqual(exclusive1.read(), "resource:exclusive") exclusive1.release() self.assertEqual(resources[-1].read(), "resource:shared") resources.pop().release() self.assertEqual(resources[-1].read(), "") resources.pop().release() self.assertEqual(resources[-1].read(), "resource:exclusive") resources.pop().release() self.assertEqual(resources[-1].read(), "") resources.pop().release() self.assertEqual(resources[-1].read(), "resource:shared") resources.pop().release() # Silense flake8 unused local variables warnings. sharedReq1 sharedReq2 exclusive2 exclusive3 sharedReq3
def testResourceLockSwitch(self, namespace, tmp_manager): resources = [] def callback(req, res): resources.insert(0, res) exclusive1 = rm.acquireResource(namespace, "resource", rm.EXCLUSIVE) sharedReq1 = rm._registerResource(namespace, "resource", rm.SHARED, callback) sharedReq2 = rm._registerResource(namespace, "resource", rm.SHARED, callback) exclusive2 = rm._registerResource(namespace, "resource", rm.EXCLUSIVE, callback) exclusive3 = rm._registerResource(namespace, "resource", rm.EXCLUSIVE, callback) sharedReq3 = rm._registerResource(namespace, "resource", rm.SHARED, callback) assert exclusive1.read() == "resource:exclusive" exclusive1.release() assert resources[-1].read() == "resource:shared" resources.pop().release() assert resources[-1].read() == "" resources.pop().release() assert resources[-1].read() == "resource:exclusive" resources.pop().release() assert resources[-1].read() == "" resources.pop().release() assert resources[-1].read() == "resource:shared" resources.pop().release() # Silense flake8 unused local variables warnings. sharedReq1 sharedReq2 exclusive2 exclusive3 sharedReq3
def testAcquireResourceShared(self): res1 = rm.acquireResource("storage", "resource", rm.SHARED) res2 = rm.acquireResource("storage", "resource", rm.SHARED, 10) res1.release() res2.release()
def testAcquireInvalidLockType(self, tmp_manager): with pytest.raises(rm.InvalidLockType) as e: rm.acquireResource("storage", "resource", "invalid_locktype") assert "invalid_locktype" in str(e)
def testAcquireNonExistingResource(self, tmp_manager): with pytest.raises(KeyError): rm.acquireResource("null", "resource", rm.EXCLUSIVE)
def testAcquireResourceShared(self, tmp_manager): res1 = rm.acquireResource("storage", "resource", rm.SHARED) res2 = rm.acquireResource("storage", "resource", rm.SHARED, 10) res1.release() res2.release()
def testResourceAcquireTimeout(self): exclusive1 = rm.acquireResource("string", "resource", rm.EXCLUSIVE) self.assertRaises(rm.RequestTimedOutError, rm.acquireResource, "string", "resource", rm.EXCLUSIVE, 1) exclusive1.release()
def testResourceInvalidation(self, tmp_manager): resource = rm.acquireResource("string", "test", rm.EXCLUSIVE) resource.write("dsada") resource.release() with pytest.raises(Exception): resource.write("test")
def testResourceAcquireInvalidTimeout(self, tmp_manager): with pytest.raises(TypeError): rm.acquireResource("string", "resource", rm.EXCLUSIVE, "A")
def __getResourceCandidatesList(self, resourceName, lockType): """ Return list of lock candidates (template and volumes) """ # Must be imported here due to import cycles. # TODO: Move getChain to another module to we can use normal import. import vdsm.storage.image as image volResourcesList = [] template = None dom = sdCache.produce(sdUUID=self.sdUUID) # Get the list of the volumes repoPath = os.path.join(sc.REPO_DATA_CENTER, dom.getPools()[0]) try: chain = image.Image(repoPath).getChain(sdUUID=self.sdUUID, imgUUID=resourceName) except se.ImageDoesNotExistInSD: log.debug("Image %s does not exist in domain %s", resourceName, self.sdUUID) return [] # check if the chain is build above a template, or it is a standalone pvol = chain[0].getParentVolume() if pvol: template = pvol.volUUID elif chain[0].isShared(): # Image of template itself, # with no other volumes in chain template = chain[0].volUUID del chain[:] volUUIDChain = [vol.volUUID for vol in chain] volUUIDChain.sort() # Activate all volumes in chain at once. # We will attempt to activate all volumes again down to the flow with # no consequence, since they are already active. # TODO Fix resource framework to hold images, instead of specific vols. # This assumes that chains can not spread into more than one SD. if dom.__class__.__name__ == "BlockStorageDomain": lvm.activateLVs(self.sdUUID, volUUIDChain) failed = False # Acquire template locks: # - 'lockType' for template's image itself # - Always 'shared' lock for image based on template try: if template: if len(volUUIDChain) > 0: volRes = rm.acquireResource( self.volumeResourcesNamespace, template, rm.SHARED, timeout=self.resource_default_timeout) else: volRes = rm.acquireResource( self.volumeResourcesNamespace, template, lockType, timeout=self.resource_default_timeout) volResourcesList.append(volRes) # Acquire 'lockType' volume locks for volUUID in volUUIDChain: volRes = rm.acquireResource( self.volumeResourcesNamespace, volUUID, lockType, timeout=self.resource_default_timeout) volResourcesList.append(volRes) except (rm.RequestTimedOutError, se.ResourceAcqusitionFailed) as e: log.debug("Cannot acquire volume resource (%s)", str(e)) failed = True raise except Exception: log.debug("Cannot acquire volume resource", exc_info=True) failed = True raise finally: if failed: # Release already acquired template/volumes locks for volRes in volResourcesList: volRes.release() return volResourcesList
def testRereleaseResource(self): res = rm.acquireResource("string", "resource", rm.EXCLUSIVE) res.release() res.release()