"(id: %s)", self._sdUUID, hostId) def releaseHostId(self, hostId, async, unused): with self._lock: self.log.info("Releasing host id for domain %s (id: %s)", self._sdUUID, hostId) try: sanlock.rem_lockspace(self._sdUUID, hostId, self._idsPath, async=async, unused=unused) except sanlock.SanlockException as e: if e.errno != errno.ENOENT: raise se.ReleaseHostIdFailure(self._sdUUID, e) self.log.debug( "Host id for domain %s released successfully " "(id: %s)", self._sdUUID, hostId) def hasHostId(self, hostId): with self._lock: try: return sanlock.inq_lockspace(self._sdUUID, hostId, self._idsPath) except sanlock.SanlockException: self.log.debug( "Unable to inquire sanlock lockspace " "status, returning False", exc_info=True)
class LocalLock(object): log = logging.getLogger("storage.LocalLock") LVER = 0 _globalLockMap = {} _globalLockMapSync = threading.Lock() def __init__(self, sdUUID, idsPath, lease, *args): """ Note: args unused, needed only by legacy locks. """ self._sdUUID = sdUUID self._idsPath = idsPath self._lease = lease @property def supports_multiple_leases(self): # Current implemention use single lock using the ids file (see # _getLease). We can support multiple leases, but I'm not sure if there # is any value in local volume leases. return False def initLock(self, lease): if lease != self._lease: raise MultipleLeasesNotSupported("init", lease) # The LocalLock initialization is based on SANLock to maintain on-disk # domain format consistent across all the V3 types. # The advantage is that the domain can be exposed as an NFS/GlusterFS # domain later on without any modification. # XXX: Keep in mind that LocalLock and SANLock cannot detect each other # and therefore concurrently using the same domain as local domain and # NFS domain (or any other shared file-based domain) will certainly # lead to disastrous consequences. initSANLock(self._sdUUID, self._idsPath, lease) def setParams(self, *args): pass def getReservedId(self): return MAX_HOST_ID def _getLease(self): return self._globalLockMap.get(self._sdUUID, (None, None)) def acquireHostId(self, hostId, async): with self._globalLockMapSync: currentHostId, lockFile = self._getLease() if currentHostId is not None and currentHostId != hostId: self.log.error("Different host id already acquired (id: %s)", currentHostId) raise se.AcquireHostIdFailure(self._sdUUID) self._globalLockMap[self._sdUUID] = (hostId, lockFile) self.log.debug("Host id for domain %s successfully acquired (id: %s)", self._sdUUID, hostId) def releaseHostId(self, hostId, async, unused): with self._globalLockMapSync: currentHostId, lockFile = self._getLease() if currentHostId is not None and currentHostId != hostId: self.log.error("Different host id acquired (id: %s)", currentHostId) raise se.ReleaseHostIdFailure(self._sdUUID) if lockFile is not None: self.log.error("Cannot release host id when lock is acquired") raise se.ReleaseHostIdFailure(self._sdUUID) del self._globalLockMap[self._sdUUID] self.log.debug("Host id for domain %s released successfully (id: %s)", self._sdUUID, hostId)
class SANLock(object): STATUS_NAME = { sanlock.HOST_UNKNOWN: HOST_STATUS_UNKNOWN, sanlock.HOST_FREE: HOST_STATUS_FREE, sanlock.HOST_LIVE: HOST_STATUS_LIVE, sanlock.HOST_FAIL: HOST_STATUS_FAIL, sanlock.HOST_DEAD: HOST_STATUS_DEAD, } # Acquiring a host id takes about 20-30 seconds when all is good, but it # may take 2-3 minutes if a host was not shutdown properly (.e.g sanlock # was killed). ACQUIRE_HOST_ID_TIMEOUT = 180 log = logging.getLogger("storage.SANLock") _sanlock_fd = None _sanlock_lock = threading.Lock() def __init__(self, sdUUID, idsPath, lease, *args): """ Note: lease and args are unused, needed by legacy locks. """ self._lock = threading.Lock() self._sdUUID = sdUUID self._idsPath = idsPath self._ready = concurrent.ValidatingEvent() @property def supports_multiple_leases(self): return True def initLock(self, lease): initSANLock(self._sdUUID, self._idsPath, lease) def setParams(self, *args): pass def getReservedId(self): return MAX_HOST_ID def acquireHostId(self, hostId, async): self.log.info("Acquiring host id for domain %s (id=%s, async=%s)", self._sdUUID, hostId, async) # Ensure that future calls to acquire() will wait until host id is # acquired. self._ready.valid = True with self._lock: try: with utils.stopwatch("sanlock.add_lockspace"): sanlock.add_lockspace(self._sdUUID, hostId, self._idsPath, async=async) except sanlock.SanlockException as e: if e.errno == errno.EINPROGRESS: # if the request is not asynchronous wait for the ongoing # lockspace operation to complete else silently continue, # the host id has been acquired or it's in the process of # being acquired (async). if not async: if not sanlock.inq_lockspace( self._sdUUID, hostId, self._idsPath, wait=True): raise se.AcquireHostIdFailure(self._sdUUID, e) self.log.info( "Host id for domain %s successfully " "acquired (id=%s, async=%s)", self._sdUUID, hostId, async) self._ready.set() elif e.errno == errno.EEXIST: self.log.info( "Host id for domain %s already acquired " "(id=%s, async=%s)", self._sdUUID, hostId, async) self._ready.set() else: raise se.AcquireHostIdFailure(self._sdUUID, e) else: if not async: self.log.info( "Host id for domain %s successfully " "acquired (id=%s, async=%s)", self._sdUUID, hostId, async) self._ready.set() def releaseHostId(self, hostId, async, unused): self.log.info("Releasing host id for domain %s (id: %s)", self._sdUUID, hostId) # Ensure that future calls to acquire() will fail quickly. self._ready.valid = False with self._lock: try: sanlock.rem_lockspace(self._sdUUID, hostId, self._idsPath, async=async, unused=unused) except sanlock.SanlockException as e: if e.errno != errno.ENOENT: raise se.ReleaseHostIdFailure(self._sdUUID, e) self.log.info( "Host id for domain %s released successfully " "(id: %s)", self._sdUUID, hostId)