class LockManager(object): """ Implements locking functionality using a custom storage layer. """ LOCK_TIME_OUT_DEFAULT = 604800 # 1 week, in seconds def __init__(self, storage): """ storage: LockManagerStorage object """ assert hasattr(storage, "get_lock_list") self._lock = ReadWriteLock() self.storage = storage self.storage.open() def __del__(self): self.storage.close() def __repr__(self): return "{}({!r})".format(self.__class__.__name__, self.storage) def _dump(self, msg=""): urlDict = {} # { <url>: [<tokenlist>] } ownerDict = {} # { <LOCKOWNER>: [<tokenlist>] } userDict = {} # { <LOCKUSER>: [<tokenlist>] } tokenDict = {} # { <token>: <LOCKURLS> } _logger.info("{}: {}".format(self, msg)) for lock in self.storage.get_lock_list("/", includeRoot=True, includeChildren=True, tokenOnly=False): tok = lock["token"] tokenDict[tok] = lock_string(lock) userDict.setdefault(lock["principal"], []).append(tok) ownerDict.setdefault(lock["owner"], []).append(tok) urlDict.setdefault(lock["root"], []).append(tok) # assert ("URL2TOKEN:" + v["root"]) in self._dict, ("Inconsistency: missing" # "URL2TOKEN:%s") % v["root"] # assert v["token"] in self._dict["URL2TOKEN:" + v["root"]], ("Inconsistency: missing " # "token %s in URL2TOKEN:%s" % (v["token"], v["root"]) _logger.info("Locks:\n{}".format( pformat(tokenDict, indent=0, width=255))) if tokenDict: _logger.info("Locks by URL:\n{}".format( pformat(urlDict, indent=4, width=255))) _logger.info("Locks by principal:\n{}".format( pformat(userDict, indent=4, width=255))) _logger.info("Locks by owner:\n{}".format( pformat(ownerDict, indent=4, width=255))) def _generate_lock(self, principal, locktype, lockscope, lockdepth, lockowner, path, timeout): """Acquire lock and return lockDict. principal Name of the principal. locktype Must be 'write'. lockscope Must be 'shared' or 'exclusive'. lockdepth Must be '0' or 'infinity'. lockowner String identifying the owner. path Resource URL. timeout Seconds to live This function does NOT check, if the new lock creates a conflict! """ if timeout is None: timeout = LockManager.LOCK_TIME_OUT_DEFAULT elif timeout < 0: timeout = -1 lockDict = { "root": path, "type": locktype, "scope": lockscope, "depth": lockdepth, "owner": lockowner, "timeout": timeout, "principal": principal, } # self.storage.create(path, lockDict) return lockDict def acquire( self, url, locktype, lockscope, lockdepth, lockowner, timeout, principal, tokenList, ): """Check for permissions and acquire a lock. On success return new lock dictionary. On error raise a DAVError with an embedded DAVErrorCondition. """ url = normalize_lock_root(url) self._lock.acquire_write() try: # Raises DAVError on conflict: self._check_lock_permission(url, locktype, lockscope, lockdepth, tokenList, principal) return self._generate_lock(principal, locktype, lockscope, lockdepth, lockowner, url, timeout) finally: self._lock.release() def refresh(self, token, timeout=None): """Set new timeout for lock, if existing and valid.""" if timeout is None: timeout = LockManager.LOCK_TIME_OUT_DEFAULT return self.storage.refresh(token, timeout) def get_lock(self, token, key=None): """Return lockDict, or None, if not found or invalid. Side effect: if lock is expired, it will be purged and None is returned. key: name of lock attribute that will be returned instead of a dictionary. """ assert key in ( None, "type", "scope", "depth", "owner", "root", "timeout", "principal", "token", ) lock = self.storage.get(token) if key is None or lock is None: return lock return lock[key] def release(self, token): """Delete lock.""" self.storage.delete(token) def is_token_locked_by_user(self, token, principal): """Return True, if <token> exists, is valid, and bound to <principal>.""" return self.get_lock(token, "principal") == principal def get_url_lock_list(self, url): """Return list of lockDict, if <url> is protected by at least one direct, valid lock. Side effect: expired locks for this url are purged. """ url = normalize_lock_root(url) lockList = self.storage.get_lock_list(url, includeRoot=True, includeChildren=False, tokenOnly=False) return lockList def get_indirect_url_lock_list(self, url, principal=None): """Return a list of valid lockDicts, that protect <path> directly or indirectly. If a principal is given, only locks owned by this principal are returned. Side effect: expired locks for this path and all parents are purged. """ url = normalize_lock_root(url) lockList = [] u = url while u: ll = self.storage.get_lock_list(u, includeRoot=True, includeChildren=False, tokenOnly=False) for l in ll: if u != url and l["depth"] != "infinity": continue # We only consider parents with Depth: infinity # TODO: handle shared locks in some way? # if (l["scope"] == "shared" and lockscope == "shared" # and principal != l["principal"]): # continue # Only compatible with shared locks by other users if principal is None or principal == l["principal"]: lockList.append(l) u = util.get_uri_parent(u) return lockList def is_url_locked(self, url): """Return True, if url is directly locked.""" lockList = self.get_url_lock_list(url) return len(lockList) > 0 def is_url_locked_by_token(self, url, locktoken): """Check, if url (or any of it's parents) is locked by locktoken.""" lockUrl = self.get_lock(locktoken, "root") return lockUrl and util.is_equal_or_child_uri(lockUrl, url) def remove_all_locks_from_url(self, url): self._lock.acquire_write() try: lockList = self.get_url_lock_list(url) for lock in lockList: self.release(lock["token"]) finally: self._lock.release() def _check_lock_permission(self, url, locktype, lockscope, lockdepth, tokenList, principal): """Check, if <principal> can lock <url>, otherwise raise an error. If locking <url> would create a conflict, DAVError(HTTP_LOCKED) is raised. An embedded DAVErrorCondition contains the conflicting resource. @see http://www.webdav.org/specs/rfc4918.html#lock-model - Parent locks WILL NOT be conflicting, if they are depth-0. - Exclusive depth-infinity parent locks WILL be conflicting, even if they are owned by <principal>. - Child locks WILL NOT be conflicting, if we request a depth-0 lock. - Exclusive child locks WILL be conflicting, even if they are owned by <principal>. (7.7) - It is not enough to check whether a lock is owned by <principal>, but also the token must be passed with the request. (Because <principal> may run two different applications on his client.) - <principal> cannot lock-exclusive, if he holds a parent shared-lock. (This would only make sense, if he was the only shared-lock holder.) - TODO: litmus tries to acquire a shared lock on one resource twice (locks: 27 'double_sharedlock') and fails, when we return HTTP_LOCKED. So we allow multi shared locks on a resource even for the same principal. @param url: URL that shall be locked @param locktype: "write" @param lockscope: "shared"|"exclusive" @param lockdepth: "0"|"infinity" @param tokenList: list of lock tokens, that the user submitted in If: header @param principal: name of the principal requesting a lock @return: None (or raise) """ assert locktype == "write" assert lockscope in ("shared", "exclusive") assert lockdepth in ("0", "infinity") _logger.debug("checkLockPermission({}, {}, {}, {})".format( url, lockscope, lockdepth, principal)) # Error precondition to collect conflicting URLs errcond = DAVErrorCondition(PRECONDITION_CODE_LockConflict) self._lock.acquire_read() try: # Check url and all parents for conflicting locks u = url while u: ll = self.get_url_lock_list(u) for l in ll: _logger.debug(" check parent {}, {}".format( u, lock_string(l))) if u != url and l["depth"] != "infinity": # We only consider parents with Depth: infinity continue elif l["scope"] == "shared" and lockscope == "shared": # Only compatible with shared locks (even by same # principal) continue # Lock conflict _logger.debug(" -> DENIED due to locked parent {}".format( lock_string(l))) errcond.add_href(l["root"]) u = util.get_uri_parent(u) if lockdepth == "infinity": # Check child URLs for conflicting locks childLocks = self.storage.get_lock_list(url, includeRoot=False, includeChildren=True, tokenOnly=False) for l in childLocks: assert util.is_child_uri(url, l["root"]) # if util.is_child_uri(url, l["root"]): _logger.debug(" -> DENIED due to locked child {}".format( lock_string(l))) errcond.add_href(l["root"]) finally: self._lock.release() # If there were conflicts, raise HTTP_LOCKED for <url>, and pass # conflicting resource with 'no-conflicting-lock' precondition if len(errcond.hrefs) > 0: raise DAVError(HTTP_LOCKED, errcondition=errcond) return def check_write_permission(self, url, depth, tokenList, principal): """Check, if <principal> can modify <url>, otherwise raise HTTP_LOCKED. If modifying <url> is prevented by a lock, DAVError(HTTP_LOCKED) is raised. An embedded DAVErrorCondition contains the conflicting locks. <url> may be modified by <principal>, if it is not currently locked directly or indirectly (i.e. by a locked parent). For depth-infinity operations, <url> also must not have locked children. It is not enough to check whether a lock is owned by <principal>, but also the token must be passed with the request. Because <principal> may run two different applications. See http://www.webdav.org/specs/rfc4918.html#lock-model http://www.webdav.org/specs/rfc4918.html#rfc.section.7.4 TODO: verify assumptions: - Parent locks WILL NOT be conflicting, if they are depth-0. - Exclusive child locks WILL be conflicting, even if they are owned by <principal>. @param url: URL that shall be modified, created, moved, or deleted @param depth: "0"|"infinity" @param tokenList: list of lock tokens, that the principal submitted in If: header @param principal: name of the principal requesting a lock @return: None or raise error """ assert compat.is_native(url) assert depth in ("0", "infinity") _logger.debug("check_write_permission({}, {}, {}, {})".format( url, depth, tokenList, principal)) # Error precondition to collect conflicting URLs errcond = DAVErrorCondition(PRECONDITION_CODE_LockConflict) self._lock.acquire_read() try: # Check url and all parents for conflicting locks u = url while u: ll = self.get_url_lock_list(u) _logger.debug(" checking {}".format(u)) for l in ll: _logger.debug(" l={}".format(lock_string(l))) if u != url and l["depth"] != "infinity": # We only consider parents with Depth: inifinity continue elif principal == l["principal"] and l[ "token"] in tokenList: # User owns this lock continue else: # Token is owned by principal, but not passed with lock list _logger.debug( " -> DENIED due to locked parent {}".format( lock_string(l))) errcond.add_href(l["root"]) u = util.get_uri_parent(u) if depth == "infinity": # Check child URLs for conflicting locks childLocks = self.storage.get_lock_list(url, includeRoot=False, includeChildren=True, tokenOnly=False) for l in childLocks: assert util.is_child_uri(url, l["root"]) # if util.is_child_uri(url, l["root"]): _logger.debug(" -> DENIED due to locked child {}".format( lock_string(l))) errcond.add_href(l["root"]) finally: self._lock.release() # If there were conflicts, raise HTTP_LOCKED for <url>, and pass # conflicting resource with 'no-conflicting-lock' precondition if len(errcond.hrefs) > 0: raise DAVError(HTTP_LOCKED, errcondition=errcond) return
class PropertyManager(object): """ An in-memory property manager implementation using a dictionary. This is obviously not persistent, but should be enough in some cases. For a persistent implementation, see property_manager.ShelvePropertyManager(). """ def __init__(self): self._dict = None self._loaded = False self._lock = ReadWriteLock() self._verbose = 2 def __repr__(self): return "PropertyManager" def __del__(self): if __debug__ and self._verbose >= 2: self._check() self._close() def _lazyOpen(self): _logger.debug("_lazyOpen()") self._lock.acquireWrite() try: self._dict = {} self._loaded = True finally: self._lock.release() def _sync(self): pass def _close(self): _logger.debug("_close()") self._lock.acquireWrite() try: self._dict = None self._loaded = False finally: self._lock.release() def _check(self, msg=""): try: if not self._loaded: return True # for k in self._dict.keys(): # print "%s" % k # print " -> %s" % self._dict[k] # self._dump() for k, v in self._dict.items(): "%s, %s" % (k, v) # _logger.debug("%s checks ok %s" % (self.__class__.__name__, msg)) return True except Exception: _logger.exception("%s _check: ERROR %s" % (self.__class__.__name__, msg)) # traceback.print_exc() # raise # sys.exit(-1) return False def _dump(self, msg="", out=None): if out is None: out = sys.stdout print("%s(%s): %s" % (self.__class__.__name__, self.__repr__(), msg), file=out) if not self._loaded: self._lazyOpen() if self._verbose >= 2: return # Already dumped in _lazyOpen try: for k, v in self._dict.items(): print(" ", k, file=out) for k2, v2 in v.items(): try: print(" %s: '%s'" % (k2, v2), file=out) except Exception as e: print(" %s: ERROR %s" % (k2, e), file=out) out.flush() except Exception as e: util.warn("PropertyManager._dump() ERROR: %s" % e) def getProperties(self, normurl, environ=None): _logger.debug("getProperties(%s)" % normurl) self._lock.acquireRead() try: if not self._loaded: self._lazyOpen() returnlist = [] if normurl in self._dict: for propdata in self._dict[normurl].keys(): returnlist.append(propdata) return returnlist finally: self._lock.release() def getProperty(self, normurl, propname, environ=None): _logger.debug("getProperty(%s, %s)" % (normurl, propname)) self._lock.acquireRead() try: if not self._loaded: self._lazyOpen() if normurl not in self._dict: return None # TODO: sometimes we get exceptions here: (catch or otherwise make # more robust?) try: resourceprops = self._dict[normurl] except Exception as e: _logger.exception("getProperty(%s, %s) failed : %s" % (normurl, propname, e)) raise return resourceprops.get(propname) finally: self._lock.release() def writeProperty(self, normurl, propname, propertyvalue, dryRun=False, environ=None): # self._log("writeProperty(%s, %s, dryRun=%s):\n\t%s" % (normurl, # propname, dryRun, propertyvalue)) assert normurl and normurl.startswith("/") assert propname # and propname.startswith("{") assert propertyvalue is not None _logger.debug("writeProperty(%s, %s, dryRun=%s):\n\t%s" % (normurl, propname, dryRun, propertyvalue)) if dryRun: return # TODO: can we check anything here? self._lock.acquireWrite() try: if not self._loaded: self._lazyOpen() if normurl in self._dict: locatordict = self._dict[normurl] else: locatordict = {} # dict([]) locatordict[propname] = propertyvalue # This re-assignment is important, so Shelve realizes the change: self._dict[normurl] = locatordict self._sync() if __debug__ and self._verbose >= 2: self._check() finally: self._lock.release() def removeProperty(self, normurl, propname, dryRun=False, environ=None): """ Specifying the removal of a property that does not exist is NOT an error. """ _logger.debug("removeProperty(%s, %s, dryRun=%s)" % (normurl, propname, dryRun)) if dryRun: # TODO: can we check anything here? return self._lock.acquireWrite() try: if not self._loaded: self._lazyOpen() if normurl in self._dict: locatordict = self._dict[normurl] if propname in locatordict: del locatordict[propname] # This re-assignment is important, so Shelve realizes the # change: self._dict[normurl] = locatordict self._sync() if __debug__ and self._verbose >= 2: self._check() finally: self._lock.release() def removeProperties(self, normurl, environ=None): _logger.debug("removeProperties(%s)" % normurl) self._lock.acquireWrite() try: if not self._loaded: self._lazyOpen() if normurl in self._dict: del self._dict[normurl] self._sync() finally: self._lock.release() def copyProperties(self, srcurl, desturl, environ=None): _logger.debug("copyProperties(%s, %s)" % (srcurl, desturl)) self._lock.acquireWrite() try: if __debug__ and self._verbose >= 2: self._check() if not self._loaded: self._lazyOpen() if srcurl in self._dict: self._dict[desturl] = self._dict[srcurl].copy() self._sync() if __debug__ and self._verbose >= 2: self._check("after copy") finally: self._lock.release() def moveProperties(self, srcurl, desturl, withChildren, environ=None): _logger.debug("moveProperties(%s, %s, %s)" % (srcurl, desturl, withChildren)) self._lock.acquireWrite() try: if __debug__ and self._verbose >= 2: self._check() if not self._loaded: self._lazyOpen() if withChildren: # Move srcurl\* for url in self._dict.keys(): if util.isEqualOrChildUri(srcurl, url): d = url.replace(srcurl, desturl) self._dict[d] = self._dict[url] del self._dict[url] # print "moveProperties:", url, d elif srcurl in self._dict: # Move srcurl only self._dict[desturl] = self._dict[srcurl] del self._dict[srcurl] self._sync() if __debug__ and self._verbose >= 2: self._check("after move") finally: self._lock.release()
class LockStorage(object): LOCK_TIME_OUT_DEFAULT = 604800 # 1 week, in seconds LOCK_TIME_OUT_MAX = 4 * 604800 # 1 month, in seconds def __init__(self): self._session = None# todo Session() self._lock = ReadWriteLock() def __repr__(self): return "C'est bien mon verrou..." def __del__(self): pass def get_lock_db_from_token(self, token): return self._session.query(Lock).filter(Lock.token == token).one_or_none() def _flush(self): """Overloaded by Shelve implementation.""" pass def open(self): """Called before first use. May be implemented to initialize a storage. """ pass def close(self): """Called on shutdown.""" pass def cleanup(self): """Purge expired locks (optional).""" pass def clear(self): """Delete all entries.""" self._session.query(Lock).all().delete(synchronize_session=False) self._session.commit() def get(self, token): """Return a lock dictionary for a token. If the lock does not exist or is expired, None is returned. token: lock token Returns: Lock dictionary or <None> Side effect: if lock is expired, it will be purged and None is returned. """ self._lock.acquireRead() try: lock_base = self._session.query(Lock).filter(Lock.token == token).one_or_none() if lock_base is None: # Lock not found: purge dangling URL2TOKEN entries _logger.debug("Lock purged dangling: %s" % token) self.delete(token) return None expire = float(lock_base.expire) if 0 <= expire < time.time(): _logger.debug("Lock timed-out(%s): %s" % (expire, lockString(from_base_to_dict(lock_base)))) self.delete(token) return None return from_base_to_dict(lock_base) finally: self._lock.release() def create(self, path, lock): """Create a direct lock for a resource path. path: Normalized path (utf8 encoded string, no trailing '/') lock: lock dictionary, without a token entry Returns: New unique lock token.: <lock **Note:** the lock dictionary may be modified on return: - lock['root'] is ignored and set to the normalized <path> - lock['timeout'] may be normalized and shorter than requested - lock['token'] is added """ self._lock.acquireWrite() try: # We expect only a lock definition, not an existing lock assert lock.get("token") is None assert lock.get("expire") is None, "Use timeout instead of expire" assert path and "/" in path # Normalize root: /foo/bar org_path = path path = normalizeLockRoot(path) lock["root"] = path # Normalize timeout from ttl to expire-date timeout = float(lock.get("timeout")) if timeout is None: timeout = LockStorage.LOCK_TIME_OUT_DEFAULT elif timeout < 0 or timeout > LockStorage.LOCK_TIME_OUT_MAX: timeout = LockStorage.LOCK_TIME_OUT_MAX lock["timeout"] = timeout lock["expire"] = time.time() + timeout validateLock(lock) token = generateLockToken() lock["token"] = token # Store lock lock_db = from_dict_to_base(lock) self._session.add(lock_db) # Store locked path reference url2token = Url2Token( path=path, token=token ) self._session.add(url2token) self._session.commit() self._flush() _logger.debug("LockStorageDict.set(%r): %s" % (org_path, lockString(lock))) # print("LockStorageDict.set(%r): %s" % (org_path, lockString(lock))) return lock finally: self._lock.release() def refresh(self, token, timeout): """Modify an existing lock's timeout. token: Valid lock token. timeout: Suggested lifetime in seconds (-1 for infinite). The real expiration time may be shorter than requested! Returns: Lock dictionary. Raises ValueError, if token is invalid. """ lock_db = self._session.query(Lock).filter(Lock.token == token).one_or_none() assert lock_db is not None, "Lock must exist" assert timeout == -1 or timeout > 0 if timeout < 0 or timeout > LockStorage.LOCK_TIME_OUT_MAX: timeout = LockStorage.LOCK_TIME_OUT_MAX self._lock.acquireWrite() try: # Note: shelve dictionary returns copies, so we must reassign values: lock_db.timeout = timeout lock_db.expire = time.time() + timeout self._session.commit() self._flush() finally: self._lock.release() return from_base_to_dict(lock_db) def delete(self, token): """Delete lock. Returns True on success. False, if token does not exist, or is expired. """ self._lock.acquireWrite() try: lock_db = self._session.query(Lock).filter(Lock.token == token).one_or_none() _logger.debug("delete %s" % lockString(from_base_to_dict(lock_db))) if lock_db is None: return False # Remove url to lock mapping url2token = self._session.query(Url2Token).filter( Url2Token.path == lock_db.root, Url2Token.token == token).one_or_none() if url2token is not None: self._session.delete(url2token) # Remove the lock self._session.delete(lock_db) self._session.commit() self._flush() finally: self._lock.release() return True def getLockList(self, path, includeRoot, includeChildren, tokenOnly): """Return a list of direct locks for <path>. Expired locks are *not* returned (but may be purged). path: Normalized path (utf8 encoded string, no trailing '/') includeRoot: False: don't add <path> lock (only makes sense, when includeChildren is True). includeChildren: True: Also check all sub-paths for existing locks. tokenOnly: True: only a list of token is returned. This may be implemented more efficiently by some providers. Returns: List of valid lock dictionaries (may be empty). """ assert path and path.startswith("/") assert includeRoot or includeChildren def __appendLocks(toklist): # Since we can do this quickly, we use self.get() even if # tokenOnly is set, so expired locks are purged. for token in toklist: lock_db = self.get_lock_db_from_token(token) if lock_db: if tokenOnly: lockList.append(lock_db.token) else: lockList.append(from_base_to_dict(lock_db)) path = normalizeLockRoot(path) self._lock.acquireRead() try: tokList = self._session.query(Url2Token.token).filter(Url2Token.path == path).all() lockList = [] if includeRoot: __appendLocks(tokList) if includeChildren: for url, in self._session.query(Url2Token.path).group_by(Url2Token.path): if util.isChildUri(path, url): __appendLocks(self._session.query(Url2Token.token).filter(Url2Token.path == url)) return lockList finally: self._lock.release()
class LockStorage(object): LOCK_TIME_OUT_DEFAULT = 604800 # 1 week, in seconds LOCK_TIME_OUT_MAX = 4 * 604800 # 1 month, in seconds def __init__(self): self._session = None # todo Session() self._lock = ReadWriteLock() def __repr__(self): return "C'est bien mon verrou..." def __del__(self): pass def get_lock_db_from_token(self, token): return self._session.query(Lock).filter( Lock.token == token).one_or_none() def _flush(self): """Overloaded by Shelve implementation.""" pass def open(self): """Called before first use. May be implemented to initialize a storage. """ pass def close(self): """Called on shutdown.""" pass def cleanup(self): """Purge expired locks (optional).""" pass def clear(self): """Delete all entries.""" self._session.query(Lock).all().delete(synchronize_session=False) self._session.commit() def get(self, token): """Return a lock dictionary for a token. If the lock does not exist or is expired, None is returned. token: lock token Returns: Lock dictionary or <None> Side effect: if lock is expired, it will be purged and None is returned. """ self._lock.acquireRead() try: lock_base = self._session.query(Lock).filter( Lock.token == token).one_or_none() if lock_base is None: # Lock not found: purge dangling URL2TOKEN entries _logger.debug("Lock purged dangling: %s" % token) self.delete(token) return None expire = float(lock_base.expire) if 0 <= expire < time.time(): _logger.debug( "Lock timed-out(%s): %s" % (expire, lockString(from_base_to_dict(lock_base)))) self.delete(token) return None return from_base_to_dict(lock_base) finally: self._lock.release() def create(self, path, lock): """Create a direct lock for a resource path. path: Normalized path (utf8 encoded string, no trailing '/') lock: lock dictionary, without a token entry Returns: New unique lock token.: <lock **Note:** the lock dictionary may be modified on return: - lock['root'] is ignored and set to the normalized <path> - lock['timeout'] may be normalized and shorter than requested - lock['token'] is added """ self._lock.acquireWrite() try: # We expect only a lock definition, not an existing lock assert lock.get("token") is None assert lock.get("expire") is None, "Use timeout instead of expire" assert path and "/" in path # Normalize root: /foo/bar org_path = path path = normalizeLockRoot(path) lock["root"] = path # Normalize timeout from ttl to expire-date timeout = float(lock.get("timeout")) if timeout is None: timeout = LockStorage.LOCK_TIME_OUT_DEFAULT elif timeout < 0 or timeout > LockStorage.LOCK_TIME_OUT_MAX: timeout = LockStorage.LOCK_TIME_OUT_MAX lock["timeout"] = timeout lock["expire"] = time.time() + timeout validateLock(lock) token = generateLockToken() lock["token"] = token # Store lock lock_db = from_dict_to_base(lock) self._session.add(lock_db) # Store locked path reference url2token = Url2Token(path=path, token=token) self._session.add(url2token) self._session.commit() self._flush() _logger.debug("LockStorageDict.set(%r): %s" % (org_path, lockString(lock))) # print("LockStorageDict.set(%r): %s" % (org_path, lockString(lock))) return lock finally: self._lock.release() def refresh(self, token, timeout): """Modify an existing lock's timeout. token: Valid lock token. timeout: Suggested lifetime in seconds (-1 for infinite). The real expiration time may be shorter than requested! Returns: Lock dictionary. Raises ValueError, if token is invalid. """ lock_db = self._session.query(Lock).filter( Lock.token == token).one_or_none() assert lock_db is not None, "Lock must exist" assert timeout == -1 or timeout > 0 if timeout < 0 or timeout > LockStorage.LOCK_TIME_OUT_MAX: timeout = LockStorage.LOCK_TIME_OUT_MAX self._lock.acquireWrite() try: # Note: shelve dictionary returns copies, so we must reassign values: lock_db.timeout = timeout lock_db.expire = time.time() + timeout self._session.commit() self._flush() finally: self._lock.release() return from_base_to_dict(lock_db) def delete(self, token): """Delete lock. Returns True on success. False, if token does not exist, or is expired. """ self._lock.acquireWrite() try: lock_db = self._session.query(Lock).filter( Lock.token == token).one_or_none() _logger.debug("delete %s" % lockString(from_base_to_dict(lock_db))) if lock_db is None: return False # Remove url to lock mapping url2token = self._session.query(Url2Token).filter( Url2Token.path == lock_db.root, Url2Token.token == token).one_or_none() if url2token is not None: self._session.delete(url2token) # Remove the lock self._session.delete(lock_db) self._session.commit() self._flush() finally: self._lock.release() return True def getLockList(self, path, includeRoot, includeChildren, tokenOnly): """Return a list of direct locks for <path>. Expired locks are *not* returned (but may be purged). path: Normalized path (utf8 encoded string, no trailing '/') includeRoot: False: don't add <path> lock (only makes sense, when includeChildren is True). includeChildren: True: Also check all sub-paths for existing locks. tokenOnly: True: only a list of token is returned. This may be implemented more efficiently by some providers. Returns: List of valid lock dictionaries (may be empty). """ assert path and path.startswith("/") assert includeRoot or includeChildren def __appendLocks(toklist): # Since we can do this quickly, we use self.get() even if # tokenOnly is set, so expired locks are purged. for token in toklist: lock_db = self.get_lock_db_from_token(token) if lock_db: if tokenOnly: lockList.append(lock_db.token) else: lockList.append(from_base_to_dict(lock_db)) path = normalizeLockRoot(path) self._lock.acquireRead() try: tokList = self._session.query( Url2Token.token).filter(Url2Token.path == path).all() lockList = [] if includeRoot: __appendLocks(tokList) if includeChildren: for url, in self._session.query(Url2Token.path).group_by( Url2Token.path): if util.isChildUri(path, url): __appendLocks( self._session.query( Url2Token.token).filter(Url2Token.path == url)) return lockList finally: self._lock.release()
class LockStorageDict(object): """ An in-memory lock manager storage implementation using a dictionary. R/W access is guarded by a thread.lock object. Also, to make it work with a Shelve dictionary, modifying dictionary members is done by re-assignment and we call a _flush() method. This is obviously not persistent, but should be enough in some cases. For a persistent implementation, see lock_manager.LockStorageShelve(). Notes: expire is stored as expiration date in seconds since epoch (not in seconds until expiration). The dictionary is built like:: { 'URL2TOKEN:/temp/litmus/lockme': ['opaquelocktoken:0x1d7b86...', 'opaquelocktoken:0xd7d4c0...'], 'opaquelocktoken:0x1d7b86...': { 'depth': '0', 'owner': "<?xml version=\'1.0\' encoding=\'UTF-8\'?>\\n<owner xmlns="DAV:">" + "litmus test suite</owner>\\n", 'principal': 'tester', 'root': '/temp/litmus/lockme', 'scope': 'shared', 'expire': 1261328382.4530001, 'token': 'opaquelocktoken:0x1d7b86...', 'type': 'write', }, 'opaquelocktoken:0xd7d4c0...': { 'depth': '0', 'owner': '<?xml version=\'1.0\' encoding=\'UTF-8\'?>\\n<owner xmlns="DAV:">' + 'litmus: notowner_sharedlock</owner>\\n', 'principal': 'tester', 'root': '/temp/litmus/lockme', 'scope': 'shared', 'expire': 1261328381.6040001, 'token': 'opaquelocktoken:0xd7d4c0...', 'type': 'write' }, } """ # noqa LOCK_TIME_OUT_DEFAULT = 604800 # 1 week, in seconds LOCK_TIME_OUT_MAX = 4 * 604800 # 1 month, in seconds def __init__(self): self._dict = None self._lock = ReadWriteLock() def __repr__(self): return self.__class__.__name__ def __del__(self): pass def _flush(self): """Overloaded by Shelve implementation.""" pass def open(self): """Called before first use. May be implemented to initialize a storage. """ assert self._dict is None self._dict = {} def close(self): """Called on shutdown.""" self._dict = None def cleanup(self): """Purge expired locks (optional).""" pass def clear(self): """Delete all entries.""" if self._dict is not None: self._dict.clear() def get(self, token): """Return a lock dictionary for a token. If the lock does not exist or is expired, None is returned. token: lock token Returns: Lock dictionary or <None> Side effect: if lock is expired, it will be purged and None is returned. """ self._lock.acquire_read() try: lock = self._dict.get(token) if lock is None: # Lock not found: purge dangling URL2TOKEN entries _logger.debug("Lock purged dangling: {}".format(token)) self.delete(token) return None expire = float(lock["expire"]) if expire >= 0 and expire < time.time(): _logger.debug( "Lock timed-out({}): {}".format(expire, lock_string(lock)) ) self.delete(token) return None return lock finally: self._lock.release() def create(self, path, lock): """Create a direct lock for a resource path. path: Normalized path (utf8 encoded string, no trailing '/') lock: lock dictionary, without a token entry Returns: New unique lock token.: <lock **Note:** the lock dictionary may be modified on return: - lock['root'] is ignored and set to the normalized <path> - lock['timeout'] may be normalized and shorter than requested - lock['token'] is added """ self._lock.acquire_write() try: # We expect only a lock definition, not an existing lock assert lock.get("token") is None assert lock.get("expire") is None, "Use timeout instead of expire" assert path and "/" in path # Normalize root: /foo/bar org_path = path path = normalize_lock_root(path) lock["root"] = path # Normalize timeout from ttl to expire-date timeout = float(lock.get("timeout")) if timeout is None: timeout = LockStorageDict.LOCK_TIME_OUT_DEFAULT elif timeout < 0 or timeout > LockStorageDict.LOCK_TIME_OUT_MAX: timeout = LockStorageDict.LOCK_TIME_OUT_MAX lock["timeout"] = timeout lock["expire"] = time.time() + timeout validate_lock(lock) token = generate_lock_token() lock["token"] = token # Store lock self._dict[token] = lock # Store locked path reference key = "URL2TOKEN:{}".format(path) if key not in self._dict: self._dict[key] = [token] else: # Note: Shelve dictionary returns copies, so we must reassign # values: tokList = self._dict[key] tokList.append(token) self._dict[key] = tokList self._flush() _logger.debug( "LockStorageDict.set({!r}): {}".format(org_path, lock_string(lock)) ) return lock finally: self._lock.release() def refresh(self, token, timeout): """Modify an existing lock's timeout. token: Valid lock token. timeout: Suggested lifetime in seconds (-1 for infinite). The real expiration time may be shorter than requested! Returns: Lock dictionary. Raises ValueError, if token is invalid. """ assert token in self._dict, "Lock must exist" assert timeout == -1 or timeout > 0 if timeout < 0 or timeout > LockStorageDict.LOCK_TIME_OUT_MAX: timeout = LockStorageDict.LOCK_TIME_OUT_MAX self._lock.acquire_write() try: # Note: shelve dictionary returns copies, so we must reassign # values: lock = self._dict[token] lock["timeout"] = timeout lock["expire"] = time.time() + timeout self._dict[token] = lock self._flush() finally: self._lock.release() return lock def delete(self, token): """Delete lock. Returns True on success. False, if token does not exist, or is expired. """ self._lock.acquire_write() try: lock = self._dict.get(token) _logger.debug("delete {}".format(lock_string(lock))) if lock is None: return False # Remove url to lock mapping key = "URL2TOKEN:{}".format(lock.get("root")) if key in self._dict: # _logger.debug(" delete token {} from url {}".format(token, lock.get("root"))) tokList = self._dict[key] if len(tokList) > 1: # Note: shelve dictionary returns copies, so we must # reassign values: tokList.remove(token) self._dict[key] = tokList else: del self._dict[key] # Remove the lock del self._dict[token] self._flush() finally: self._lock.release() return True def get_lock_list(self, path, include_root, include_children, token_only): """Return a list of direct locks for <path>. Expired locks are *not* returned (but may be purged). path: Normalized path (utf8 encoded string, no trailing '/') include_root: False: don't add <path> lock (only makes sense, when include_children is True). include_children: True: Also check all sub-paths for existing locks. token_only: True: only a list of token is returned. This may be implemented more efficiently by some providers. Returns: List of valid lock dictionaries (may be empty). """ assert compat.is_native(path) assert path and path.startswith("/") assert include_root or include_children def __appendLocks(toklist): # Since we can do this quickly, we use self.get() even if # token_only is set, so expired locks are purged. for token in toklist: lock = self.get(token) if lock: if token_only: lockList.append(lock["token"]) else: lockList.append(lock) path = normalize_lock_root(path) self._lock.acquire_read() try: key = "URL2TOKEN:{}".format(path) tokList = self._dict.get(key, []) lockList = [] if include_root: __appendLocks(tokList) if include_children: for u, ltoks in self._dict.items(): if util.is_child_uri(key, u): __appendLocks(ltoks) return lockList finally: self._lock.release()
class PropertyManager(object): """ An in-memory property manager implementation using a dictionary. This is obviously not persistent, but should be enough in some cases. For a persistent implementation, see property_manager.ShelvePropertyManager(). """ def __init__(self): self._dict = None self._loaded = False self._lock = ReadWriteLock() self._verbose = 3 def __repr__(self): return "PropertyManager" def __del__(self): if __debug__ and self._verbose >= 4: self._check() self._close() def _lazy_open(self): _logger.debug("_lazy_open()") self._lock.acquire_write() try: self._dict = {} self._loaded = True finally: self._lock.release() def _sync(self): pass def _close(self): _logger.debug("_close()") self._lock.acquire_write() try: self._dict = None self._loaded = False finally: self._lock.release() def _check(self, msg=""): try: if not self._loaded: return True for k, v in self._dict.items(): _dummy = "{}, {}".format(k, v) # noqa # _logger.debug("{} checks ok {}".format(self.__class__.__name__, msg)) return True except Exception: _logger.exception("{} _check: ERROR {}".format( self.__class__.__name__, msg)) return False def _dump(self, msg=""): _logger.info("{}({}): {}".format(self.__class__.__name__, self.__repr__(), msg)) if not self._loaded: self._lazy_open() if self._verbose >= 4: return # Already dumped in _lazy_open try: for k, v in self._dict.items(): _logger.info(" {}".format(k)) for k2, v2 in v.items(): try: _logger.info(" {}: '{}'".format(k2, v2)) except Exception as e: _logger.info(" {}: ERROR {}".format(k2, e)) # _logger.flush() except Exception as e: _logger.error("PropertyManager._dump() ERROR: {}".format(e)) def get_properties(self, normurl, environ=None): _logger.debug("get_properties({})".format(normurl)) self._lock.acquire_read() try: if not self._loaded: self._lazy_open() returnlist = [] if normurl in self._dict: for propdata in self._dict[normurl].keys(): returnlist.append(propdata) return returnlist finally: self._lock.release() def get_property(self, normurl, propname, environ=None): _logger.debug("get_property({}, {})".format(normurl, propname)) self._lock.acquire_read() try: if not self._loaded: self._lazy_open() if normurl not in self._dict: return None # TODO: sometimes we get exceptions here: (catch or otherwise make # more robust?) try: resourceprops = self._dict[normurl] except Exception as e: _logger.exception("get_property({}, {}) failed : {}".format( normurl, propname, e)) raise return resourceprops.get(propname) finally: self._lock.release() def write_property(self, normurl, propname, propertyvalue, dryRun=False, environ=None): assert normurl and normurl.startswith("/") assert propname # and propname.startswith("{") assert propertyvalue is not None _logger.debug("write_property({}, {}, dryRun={}):\n\t{}".format( normurl, propname, dryRun, propertyvalue)) if dryRun: return # TODO: can we check anything here? self._lock.acquire_write() try: if not self._loaded: self._lazy_open() if normurl in self._dict: locatordict = self._dict[normurl] else: locatordict = {} # dict([]) locatordict[propname] = propertyvalue # This re-assignment is important, so Shelve realizes the change: self._dict[normurl] = locatordict self._sync() if __debug__ and self._verbose >= 4: self._check() finally: self._lock.release() def remove_property(self, normurl, propname, dryRun=False, environ=None): """ Specifying the removal of a property that does not exist is NOT an error. """ _logger.debug("remove_property({}, {}, dryRun={})".format( normurl, propname, dryRun)) if dryRun: # TODO: can we check anything here? return self._lock.acquire_write() try: if not self._loaded: self._lazy_open() if normurl in self._dict: locatordict = self._dict[normurl] if propname in locatordict: del locatordict[propname] # This re-assignment is important, so Shelve realizes the # change: self._dict[normurl] = locatordict self._sync() if __debug__ and self._verbose >= 4: self._check() finally: self._lock.release() def remove_properties(self, normurl, environ=None): _logger.debug("remove_properties({})".format(normurl)) self._lock.acquire_write() try: if not self._loaded: self._lazy_open() if normurl in self._dict: del self._dict[normurl] self._sync() finally: self._lock.release() def copy_properties(self, srcurl, desturl, environ=None): _logger.debug("copy_properties({}, {})".format(srcurl, desturl)) self._lock.acquire_write() try: if __debug__ and self._verbose >= 4: self._check() if not self._loaded: self._lazy_open() if srcurl in self._dict: self._dict[desturl] = self._dict[srcurl].copy() self._sync() if __debug__ and self._verbose >= 4: self._check("after copy") finally: self._lock.release() def move_properties(self, srcurl, desturl, withChildren, environ=None): _logger.debug("move_properties({}, {}, {})".format( srcurl, desturl, withChildren)) self._lock.acquire_write() try: if __debug__ and self._verbose >= 4: self._check() if not self._loaded: self._lazy_open() if withChildren: # Move srcurl\* for url in self._dict.keys(): if util.is_equal_or_child_uri(srcurl, url): d = url.replace(srcurl, desturl) self._dict[d] = self._dict[url] del self._dict[url] elif srcurl in self._dict: # Move srcurl only self._dict[desturl] = self._dict[srcurl] del self._dict[srcurl] self._sync() if __debug__ and self._verbose >= 4: self._check("after move") finally: self._lock.release()
class LockManager(object): """ Implements locking functionality using a custom storage layer. """ LOCK_TIME_OUT_DEFAULT = 604800 # 1 week, in seconds def __init__(self, storage): """ storage: LockManagerStorage object """ assert hasattr(storage, "get_lock_list") self._lock = ReadWriteLock() self.storage = storage self.storage.open() def __del__(self): self.storage.close() def __repr__(self): return "{}({!r})".format(self.__class__.__name__, self.storage) def _dump(self, msg=""): urlDict = {} # { <url>: [<tokenlist>] } ownerDict = {} # { <LOCKOWNER>: [<tokenlist>] } userDict = {} # { <LOCKUSER>: [<tokenlist>] } tokenDict = {} # { <token>: <LOCKURLS> } _logger.info("{}: {}".format(self, msg)) for lock in self.storage.get_lock_list( "/", include_root=True, include_children=True, token_only=False ): tok = lock["token"] tokenDict[tok] = lock_string(lock) userDict.setdefault(lock["principal"], []).append(tok) ownerDict.setdefault(lock["owner"], []).append(tok) urlDict.setdefault(lock["root"], []).append(tok) # assert ("URL2TOKEN:" + v["root"]) in self._dict, ("Inconsistency: missing" # "URL2TOKEN:%s") % v["root"] # assert v["token"] in self._dict["URL2TOKEN:" + v["root"]], ("Inconsistency: missing " # "token %s in URL2TOKEN:%s" % (v["token"], v["root"]) _logger.info("Locks:\n{}".format(pformat(tokenDict, indent=0, width=255))) if tokenDict: _logger.info( "Locks by URL:\n{}".format(pformat(urlDict, indent=4, width=255)) ) _logger.info( "Locks by principal:\n{}".format(pformat(userDict, indent=4, width=255)) ) _logger.info( "Locks by owner:\n{}".format(pformat(ownerDict, indent=4, width=255)) ) def _generate_lock( self, principal, lock_type, lock_scope, lock_depth, lock_owner, path, timeout ): """Acquire lock and return lock_dict. principal Name of the principal. lock_type Must be 'write'. lock_scope Must be 'shared' or 'exclusive'. lock_depth Must be '0' or 'infinity'. lock_owner String identifying the owner. path Resource URL. timeout Seconds to live This function does NOT check, if the new lock creates a conflict! """ if timeout is None: timeout = LockManager.LOCK_TIME_OUT_DEFAULT elif timeout < 0: timeout = -1 lock_dict = { "root": path, "type": lock_type, "scope": lock_scope, "depth": lock_depth, "owner": lock_owner, "timeout": timeout, "principal": principal, } # self.storage.create(path, lock_dict) return lock_dict def acquire( self, url, lock_type, lock_scope, lock_depth, lock_owner, timeout, principal, token_list, ): """Check for permissions and acquire a lock. On success return new lock dictionary. On error raise a DAVError with an embedded DAVErrorCondition. """ url = normalize_lock_root(url) self._lock.acquire_write() try: # Raises DAVError on conflict: self._check_lock_permission( url, lock_type, lock_scope, lock_depth, token_list, principal ) return self._generate_lock( principal, lock_type, lock_scope, lock_depth, lock_owner, url, timeout ) finally: self._lock.release() def refresh(self, token, timeout=None): """Set new timeout for lock, if existing and valid.""" if timeout is None: timeout = LockManager.LOCK_TIME_OUT_DEFAULT return self.storage.refresh(token, timeout) def get_lock(self, token, key=None): """Return lock_dict, or None, if not found or invalid. Side effect: if lock is expired, it will be purged and None is returned. key: name of lock attribute that will be returned instead of a dictionary. """ assert key in ( None, "type", "scope", "depth", "owner", "root", "timeout", "principal", "token", ) lock = self.storage.get(token) if key is None or lock is None: return lock return lock[key] def release(self, token): """Delete lock.""" self.storage.delete(token) def is_token_locked_by_user(self, token, principal): """Return True, if <token> exists, is valid, and bound to <principal>.""" return self.get_lock(token, "principal") == principal def get_url_lock_list(self, url): """Return list of lock_dict, if <url> is protected by at least one direct, valid lock. Side effect: expired locks for this url are purged. """ url = normalize_lock_root(url) lockList = self.storage.get_lock_list( url, include_root=True, include_children=False, token_only=False ) return lockList def get_indirect_url_lock_list(self, url, principal=None): """Return a list of valid lockDicts, that protect <path> directly or indirectly. If a principal is given, only locks owned by this principal are returned. Side effect: expired locks for this path and all parents are purged. """ url = normalize_lock_root(url) lockList = [] u = url while u: ll = self.storage.get_lock_list( u, include_root=True, include_children=False, token_only=False ) for l in ll: if u != url and l["depth"] != "infinity": continue # We only consider parents with Depth: infinity # TODO: handle shared locks in some way? # if (l["scope"] == "shared" and lock_scope == "shared" # and principal != l["principal"]): # continue # Only compatible with shared locks by other users if principal is None or principal == l["principal"]: lockList.append(l) u = util.get_uri_parent(u) return lockList def is_url_locked(self, url): """Return True, if url is directly locked.""" lockList = self.get_url_lock_list(url) return len(lockList) > 0 def is_url_locked_by_token(self, url, lock_token): """Check, if url (or any of it's parents) is locked by lock_token.""" lockUrl = self.get_lock(lock_token, "root") return lockUrl and util.is_equal_or_child_uri(lockUrl, url) def remove_all_locks_from_url(self, url): self._lock.acquire_write() try: lockList = self.get_url_lock_list(url) for lock in lockList: self.release(lock["token"]) finally: self._lock.release() def _check_lock_permission( self, url, lock_type, lock_scope, lock_depth, token_list, principal ): """Check, if <principal> can lock <url>, otherwise raise an error. If locking <url> would create a conflict, DAVError(HTTP_LOCKED) is raised. An embedded DAVErrorCondition contains the conflicting resource. @see http://www.webdav.org/specs/rfc4918.html#lock-model - Parent locks WILL NOT be conflicting, if they are depth-0. - Exclusive depth-infinity parent locks WILL be conflicting, even if they are owned by <principal>. - Child locks WILL NOT be conflicting, if we request a depth-0 lock. - Exclusive child locks WILL be conflicting, even if they are owned by <principal>. (7.7) - It is not enough to check whether a lock is owned by <principal>, but also the token must be passed with the request. (Because <principal> may run two different applications on his client.) - <principal> cannot lock-exclusive, if he holds a parent shared-lock. (This would only make sense, if he was the only shared-lock holder.) - TODO: litmus tries to acquire a shared lock on one resource twice (locks: 27 'double_sharedlock') and fails, when we return HTTP_LOCKED. So we allow multi shared locks on a resource even for the same principal. @param url: URL that shall be locked @param lock_type: "write" @param lock_scope: "shared"|"exclusive" @param lock_depth: "0"|"infinity" @param token_list: list of lock tokens, that the user submitted in If: header @param principal: name of the principal requesting a lock @return: None (or raise) """ assert lock_type == "write" assert lock_scope in ("shared", "exclusive") assert lock_depth in ("0", "infinity") _logger.debug( "checkLockPermission({}, {}, {}, {})".format( url, lock_scope, lock_depth, principal ) ) # Error precondition to collect conflicting URLs errcond = DAVErrorCondition(PRECONDITION_CODE_LockConflict) self._lock.acquire_read() try: # Check url and all parents for conflicting locks u = url while u: ll = self.get_url_lock_list(u) for l in ll: _logger.debug(" check parent {}, {}".format(u, lock_string(l))) if u != url and l["depth"] != "infinity": # We only consider parents with Depth: infinity continue elif l["scope"] == "shared" and lock_scope == "shared": # Only compatible with shared locks (even by same # principal) continue # Lock conflict _logger.debug( " -> DENIED due to locked parent {}".format(lock_string(l)) ) errcond.add_href(l["root"]) u = util.get_uri_parent(u) if lock_depth == "infinity": # Check child URLs for conflicting locks childLocks = self.storage.get_lock_list( url, include_root=False, include_children=True, token_only=False ) for l in childLocks: assert util.is_child_uri(url, l["root"]) # if util.is_child_uri(url, l["root"]): _logger.debug( " -> DENIED due to locked child {}".format(lock_string(l)) ) errcond.add_href(l["root"]) finally: self._lock.release() # If there were conflicts, raise HTTP_LOCKED for <url>, and pass # conflicting resource with 'no-conflicting-lock' precondition if len(errcond.hrefs) > 0: raise DAVError(HTTP_LOCKED, err_condition=errcond) return def check_write_permission(self, url, depth, token_list, principal): """Check, if <principal> can modify <url>, otherwise raise HTTP_LOCKED. If modifying <url> is prevented by a lock, DAVError(HTTP_LOCKED) is raised. An embedded DAVErrorCondition contains the conflicting locks. <url> may be modified by <principal>, if it is not currently locked directly or indirectly (i.e. by a locked parent). For depth-infinity operations, <url> also must not have locked children. It is not enough to check whether a lock is owned by <principal>, but also the token must be passed with the request. Because <principal> may run two different applications. See http://www.webdav.org/specs/rfc4918.html#lock-model http://www.webdav.org/specs/rfc4918.html#rfc.section.7.4 TODO: verify assumptions: - Parent locks WILL NOT be conflicting, if they are depth-0. - Exclusive child locks WILL be conflicting, even if they are owned by <principal>. @param url: URL that shall be modified, created, moved, or deleted @param depth: "0"|"infinity" @param token_list: list of lock tokens, that the principal submitted in If: header @param principal: name of the principal requesting a lock @return: None or raise error """ assert compat.is_native(url) assert depth in ("0", "infinity") _logger.debug( "check_write_permission({}, {}, {}, {})".format( url, depth, token_list, principal ) ) # Error precondition to collect conflicting URLs errcond = DAVErrorCondition(PRECONDITION_CODE_LockConflict) self._lock.acquire_read() try: # Check url and all parents for conflicting locks u = url while u: ll = self.get_url_lock_list(u) _logger.debug(" checking {}".format(u)) for l in ll: _logger.debug(" l={}".format(lock_string(l))) if u != url and l["depth"] != "infinity": # We only consider parents with Depth: inifinity continue elif principal == l["principal"] and l["token"] in token_list: # User owns this lock continue else: # Token is owned by principal, but not passed with lock list _logger.debug( " -> DENIED due to locked parent {}".format(lock_string(l)) ) errcond.add_href(l["root"]) u = util.get_uri_parent(u) if depth == "infinity": # Check child URLs for conflicting locks childLocks = self.storage.get_lock_list( url, include_root=False, include_children=True, token_only=False ) for l in childLocks: assert util.is_child_uri(url, l["root"]) # if util.is_child_uri(url, l["root"]): _logger.debug( " -> DENIED due to locked child {}".format(lock_string(l)) ) errcond.add_href(l["root"]) finally: self._lock.release() # If there were conflicts, raise HTTP_LOCKED for <url>, and pass # conflicting resource with 'no-conflicting-lock' precondition if len(errcond.hrefs) > 0: raise DAVError(HTTP_LOCKED, err_condition=errcond) return
class PropertyManager(object): """ An in-memory property manager implementation using a dictionary. This is obviously not persistent, but should be enough in some cases. For a persistent implementation, see property_manager.ShelvePropertyManager(). """ def __init__(self): self._dict = None self._loaded = False self._lock = ReadWriteLock() self._verbose = 3 def __repr__(self): return "PropertyManager" def __del__(self): if __debug__ and self._verbose >= 4: self._check() self._close() def _lazy_open(self): _logger.debug("_lazy_open()") self._lock.acquire_write() try: self._dict = {} self._loaded = True finally: self._lock.release() def _sync(self): pass def _close(self): _logger.debug("_close()") self._lock.acquire_write() try: self._dict = None self._loaded = False finally: self._lock.release() def _check(self, msg=""): try: if not self._loaded: return True for k, v in self._dict.items(): _dummy = "{}, {}".format(k, v) # noqa # _logger.debug("{} checks ok {}".format(self.__class__.__name__, msg)) return True except Exception: _logger.exception( "{} _check: ERROR {}".format(self.__class__.__name__, msg) ) return False def _dump(self, msg=""): _logger.info("{}({}): {}".format(self.__class__.__name__, self.__repr__(), msg)) if not self._loaded: self._lazy_open() if self._verbose >= 4: return # Already dumped in _lazy_open try: for k, v in self._dict.items(): _logger.info(" {}".format(k)) for k2, v2 in v.items(): try: _logger.info(" {}: '{}'".format(k2, v2)) except Exception as e: _logger.info(" {}: ERROR {}".format(k2, e)) # _logger.flush() except Exception as e: _logger.error("PropertyManager._dump() ERROR: {}".format(e)) def get_properties(self, norm_url, environ=None): _logger.debug("get_properties({})".format(norm_url)) self._lock.acquire_read() try: if not self._loaded: self._lazy_open() returnlist = [] if norm_url in self._dict: for propdata in self._dict[norm_url].keys(): returnlist.append(propdata) return returnlist finally: self._lock.release() def get_property(self, norm_url, name, environ=None): _logger.debug("get_property({}, {})".format(norm_url, name)) self._lock.acquire_read() try: if not self._loaded: self._lazy_open() if norm_url not in self._dict: return None # TODO: sometimes we get exceptions here: (catch or otherwise make # more robust?) try: resourceprops = self._dict[norm_url] except Exception as e: _logger.exception( "get_property({}, {}) failed : {}".format(norm_url, name, e) ) raise return resourceprops.get(name) finally: self._lock.release() def write_property( self, norm_url, name, property_value, dry_run=False, environ=None ): assert norm_url and norm_url.startswith("/") assert name # and name.startswith("{") assert property_value is not None _logger.debug( "write_property({}, {}, dry_run={}):\n\t{}".format( norm_url, name, dry_run, property_value ) ) if dry_run: return # TODO: can we check anything here? self._lock.acquire_write() try: if not self._loaded: self._lazy_open() if norm_url in self._dict: locatordict = self._dict[norm_url] else: locatordict = {} # dict([]) locatordict[name] = property_value # This re-assignment is important, so Shelve realizes the change: self._dict[norm_url] = locatordict self._sync() if __debug__ and self._verbose >= 4: self._check() finally: self._lock.release() def remove_property(self, norm_url, name, dry_run=False, environ=None): """ Specifying the removal of a property that does not exist is NOT an error. """ _logger.debug( "remove_property({}, {}, dry_run={})".format(norm_url, name, dry_run) ) if dry_run: # TODO: can we check anything here? return self._lock.acquire_write() try: if not self._loaded: self._lazy_open() if norm_url in self._dict: locatordict = self._dict[norm_url] if name in locatordict: del locatordict[name] # This re-assignment is important, so Shelve realizes the # change: self._dict[norm_url] = locatordict self._sync() if __debug__ and self._verbose >= 4: self._check() finally: self._lock.release() def remove_properties(self, norm_url, environ=None): _logger.debug("remove_properties({})".format(norm_url)) self._lock.acquire_write() try: if not self._loaded: self._lazy_open() if norm_url in self._dict: del self._dict[norm_url] self._sync() finally: self._lock.release() def copy_properties(self, src_url, dest_url, environ=None): _logger.debug("copy_properties({}, {})".format(src_url, dest_url)) self._lock.acquire_write() try: if __debug__ and self._verbose >= 4: self._check() if not self._loaded: self._lazy_open() if src_url in self._dict: self._dict[dest_url] = self._dict[src_url].copy() self._sync() if __debug__ and self._verbose >= 4: self._check("after copy") finally: self._lock.release() def move_properties(self, src_url, dest_url, with_children, environ=None): _logger.debug( "move_properties({}, {}, {})".format(src_url, dest_url, with_children) ) self._lock.acquire_write() try: if __debug__ and self._verbose >= 4: self._check() if not self._loaded: self._lazy_open() if with_children: # Move src_url\* for url in self._dict.keys(): if util.is_equal_or_child_uri(src_url, url): d = url.replace(src_url, dest_url) self._dict[d] = self._dict[url] del self._dict[url] elif src_url in self._dict: # Move src_url only self._dict[dest_url] = self._dict[src_url] del self._dict[src_url] self._sync() if __debug__ and self._verbose >= 4: self._check("after move") finally: self._lock.release()