def assignBillSequence(self, orderKey): """ Assigns an unique order-order to the given order. """ def getKeyTxn(kindName, orderKey): """Generates and returns a new, unique Key""" seqObj = db.GetOrInsert(kindName, "viur_bill_sequences", count=1000) idx = seqObj["count"] seqObj["count"] += 1 db.Put(seqObj) return str(idx) def setKeyTxn(orderKey, idx): """Assigns the new order to the given order""" dbObj = db.Get(db.Key(orderKey)) if not dbObj: return dbObj["idx"] = idx db.Put(dbObj) dbObj = db.Get(db.Key(orderKey)) if not dbObj: return idx = db.RunInTransaction(getKeyTxn, self.viewSkel().kindName, orderKey) db.RunInTransaction(setKeyTxn, orderKey, idx) self.billSequenceAvailable(orderKey)
def validateSecurityKey(self, key): """ Checks if key matches the current CSRF-Token of our session. On success, a new key is generated. """ if compare_digest(self.securityKey, key): # It looks good so far, check if we can acquire that skey inside a transaction def exchangeSecurityKey(): dbSession = db.Get(db.Key(self.kindName, self.cookieKey)) if not dbSession: # Should not happen (except if session.reset has been called in the same request) return False if dbSession["securityKey"] != key: # Race-Condidtion: That skey has been used in another instance return False dbSession["securityKey"] = utils.generateRandomString(13) db.Put(dbSession) return dbSession["securityKey"] try: newSkey = db.RunInTransaction(exchangeSecurityKey) except: # This should be transaction collision return False if not newSkey: return False self.securityKey = newSkey self.changed = True return True return False
def sendUserPasswordRecoveryCode(self, userName: str, recoveryKey: str) -> None: """ Sends the given recovery code to the user given in userName. This function runs deferred so there's no timing sidechannel that leaks if this user exists. Per default, we'll send the code by email (assuming we have working email delivery), but this can be overridden to send it by SMS or other means. We'll also update the changedate for this user, so no more than one code can be send to any given user in four hours. """ def updateChangeDateTxn(key): obj = db.Get(key) obj["changedate"] = utcNow() db.Put(obj) user = db.Query("user").filter("name.idx =", userName).getEntry() if user: if user.get("changedate") and user["changedate"] > utcNow( ) - datetime.timedelta(hours=4): # There is a changedate and the user has been modified in the last 4 hours - abort return # Update the changedate so no more than one email is send per 4 hours db.RunInTransaction(updateChangeDateTxn, user.key) email.sendEMail(tpl=self.passwordRecoveryMail, skel={"recoveryKey": recoveryKey}, dests=[userName])
def doCheckForUnreferencedBlobs(cursor=None): def getOldBlobKeysTxn(dbKey): obj = db.Get(dbKey) res = obj["old_blob_references"] or [] if obj["is_stale"]: db.Delete(dbKey) else: obj["has_old_blob_references"] = False obj["old_blob_references"] = [] db.Put(obj) return res query = db.Query("viur-blob-locks").filter("has_old_blob_references", True).setCursor(cursor) for lockObj in query.run(100): oldBlobKeys = db.RunInTransaction(getOldBlobKeysTxn, lockObj.key) for blobKey in oldBlobKeys: if db.Query("viur-blob-locks").filter("active_blob_references =", blobKey).getEntry(): # This blob is referenced elsewhere logging.info("Stale blob is still referenced, %s" % blobKey) continue # Add a marker and schedule it for deletion fileObj = db.Query("viur-deleted-files").filter("dlkey", blobKey).getEntry() if fileObj: # Its already marked logging.info("Stale blob already marked for deletion, %s" % blobKey) return fileObj = db.Entity(db.Key("viur-deleted-files")) fileObj["itercount"] = 0 fileObj["dlkey"] = str(blobKey) logging.info("Stale blob marked dirty, %s" % blobKey) db.Put(fileObj) newCursor = query.getCursor() if newCursor: doCheckForUnreferencedBlobs(newCursor)
def updateParentRepo(self, parentNode: str, newRepoKey: str, depth: int = 0): """ Recursively fixes the parentrepo key after a move operation. This will delete all entries which are children of *nodeKey*, except *key* nodeKey. :param parentNode: URL-safe key of the node which children should be fixed. :param newNode: URL-safe key of the new repository. :param depth: Safety level depth preventing infinitive loops. """ if depth > 99: logging.critical( "Maximum recursion depth reached in server.applications.tree/fixParentRepo" ) logging.critical("Your data is corrupt!") logging.critical("Params: parentNode: %s, newRepoKey: %s" % (parentNode, newRepoKey)) return def fixTxn(nodeKey, newRepoKey): node = db.Get(nodeKey) node["parentrepo"] = newRepoKey db.Put(node) # Fix all nodes q = db.Query(self.viewSkel("node").kindName).filter( "parententry =", parentNode) for repo in q.iter(): self.updateParentRepo(repo.key, newRepoKey, depth=depth + 1) db.RunInTransaction(fixTxn, repo.key, newRepoKey) # Fix the leafs on this level if self.leafSkelCls: q = db.Query(self.viewSkel("leaf").kindName).filter( "parententry =", parentNode) for repo in q.iter(): db.RunInTransaction(fixTxn, repo.key, newRepoKey)
def startProcessing(self, step, orderID): def setTokenTxn(key, token): order = db.Get(key) if not order: return order["paypal_token"] = urllib.unquote(token) db.Put(order) paypal = PayPal.PayPalHandler() key = db.Key(orderID) order = db.Get(key) if not order: return token = paypal.SetExpressCheckout("%.2f" % order["price"]) db.RunInTransaction(setTokenTxn, key, token) raise (errors.Redirect(paypal.getPayURL(token)))
def updateTimeDrift(self, userKey, idx): """ Updates the clock-drift value. The value is only changed in 1/10 steps, so that a late submit by an user doesn't skew it out of bounds. Maximum change per call is 0.3 minutes. :param userKey: For which user should the update occour :param idx: How many steps before/behind was that token :return: """ def updateTransaction(userKey, idx): user = db.Get(userKey) if not "otptimedrift" in user or not isinstance( user["otptimedrift"], float): user["otptimedrift"] = 0.0 user["otptimedrift"] += min(max(0.1 * idx, -0.3), 0.3) db.Put(user) db.RunInTransaction(updateTransaction, userKey, idx)
def decrementQuota(self): """ Removes one attempt from the pool of available Quota for that user/ip """ def updateTxn(cacheKey): key = db.Key(self.rateLimitKind, cacheKey) obj = db.Get(key) if obj is None: obj = db.Entity(key) obj["value"] = 0 obj["value"] += 1 obj["expires"] = utils.utcNow() + timedelta(minutes=2 * self.minutes) db.Put(obj) lockKey = "%s-%s-%s" % (self.resource, self._getEndpointKey(), self._getCurrentTimeKey()) db.RunInTransaction(updateTxn, lockKey)
def setState(self, orderKey, state, removeState=False): """ Set a status on the given order. :param orderKey: Key of the order :type orderKey: str :param state: An state out of self.states :type state: str :param removeState: Should the state be removed instead of set :type removeState: bool """ def txn(orderKey, state, removeState): dbObj = db.Get(db.Key(orderKey)) if not dbObj: return dbObj["state_%s" % state] = "1" if not removeState else "0" dbObj["changedate"] = datetime.now() db.Put(dbObj) db.RunInTransaction(txn, orderKey, state, removeState)
def ensureDerived(key: db.KeyClass, srcKey, deriveMap: Dict[str, Any]): """ Ensure that pending thumbnails or other derived Files are build :param key: DB-Key of the file-object on which we should update the derivemap :param srcKey: Prefix for a (hopefully) stable key to prevent rebuilding derives over and over again :param deriveMap: List of DeriveDicts we should build/update """ from viur.core.skeleton import skeletonByKind, updateRelations deriveFuncMap = conf["viur.file.derivers"] skel = skeletonByKind("file")() if not skel.fromDB(key): logging.info("File-Entry went missing in ensureDerived") return if not skel["derived"]: logging.info("No Derives for this file") skel["derived"] = {} skel["derived"]["deriveStatus"] = skel["derived"].get("deriveStatus") or {} skel["derived"]["files"] = skel["derived"].get("files") or {} resDict = { } # Will contain new or updated resultDicts that will be merged into our file for calleeKey, params in deriveMap.items(): fullSrcKey = "%s_%s" % (srcKey, calleeKey) paramsHash = sha256(str(params).encode( "UTF-8")).hexdigest() # Hash over given params (dict?) if skel["derived"]["deriveStatus"].get(fullSrcKey) != paramsHash: if calleeKey not in deriveFuncMap: logging.warning("File-Deriver %s not found - skipping!" % calleeKey) continue callee = deriveFuncMap[calleeKey] callRes = callee(skel, skel["derived"]["files"], params) if callRes: assert isinstance( callRes, list), "Old (non-list) return value from deriveFunc" resDict[fullSrcKey] = {"version": paramsHash, "files": {}} for fileName, size, mimetype, customData in callRes: resDict[fullSrcKey]["files"][fileName] = { "size": size, "mimetype": mimetype, "customData": customData } def updateTxn(key, resDict): obj = db.Get(key) if not obj: # File-object got deleted during building of our derives return obj["derived"] = obj.get("derived") or {} obj["derived"]["deriveStatus"] = obj["derived"].get( "deriveStatus") or {} obj["derived"]["files"] = obj["derived"].get("files") or {} for k, v in resDict.items(): obj["derived"]["deriveStatus"][k] = v["version"] for fileName, fileDict in v["files"].items(): obj["derived"]["files"][fileName] = fileDict db.Put(obj) if resDict: # Write updated results back and queue updateRelationsTask db.RunInTransaction(updateTxn, key, resDict) # Queue that updateRelations call at least 30 seconds into the future, so that other ensureDerived calls from # the same fileBone have the chance to finish, otherwise that updateRelations Task will call postSavedHandler # on that fileBone again - re-queueing any ensureDerivedCalls that have not finished yet. updateRelations(key, time() + 1, "derived", _countdown=30)