def test_auto_prolong_mongo_lock(monkeypatch, lock_collection): monkeypatch.setattr(lock.BaseMongoLock, "DEFAULT_PROLONG_TIMEOUT", 1) monkeypatch.setattr(lock.AutoProlongMongoLock, "DEFAULT_PROLONG_TIMEOUT", 1) lockname = pytest.faux.gen_alphanumeric() lock1 = lock.AutoProlongMongoLock(lockname) initial_time = timeutils.current_unix_timestamp() lock1.acquire() time.sleep(lock.AutoProlongMongoLock.DEFAULT_PROLONG_TIMEOUT + 1) db_model = lock_collection.find_one({"_id": lockname}) assert db_model assert db_model["locker"] == lock1.lock_id assert db_model["expired_at"] > initial_time + \ lock.AutoProlongMongoLock.DEFAULT_PROLONG_TIMEOUT time2 = db_model["expired_at"] time.sleep(lock.AutoProlongMongoLock.DEFAULT_PROLONG_TIMEOUT + 1) db_model = lock_collection.find_one({"_id": lockname}) assert db_model assert db_model["locker"] == lock1.lock_id assert db_model["expired_at"] > time2 lock1.release() db_model = lock_collection.find_one({"_id": lockname}) assert db_model assert db_model["locker"] is None assert db_model["expired_at"] == 0 assert not lock1.prolonger_thread.is_alive()
def try_to_acquire(self, force=False): current_time = timeutils.current_unix_timestamp() query = {"_id": self.lockname} if not force: query["locker"] = None query["expired_at"] = {"$lte": current_time} try: self.find_method( query, { "$set": { "locker": self.lock_id, "expired_at": current_time + self.DEFAULT_PROLONG_TIMEOUT } }, upsert=True, return_document=pymongo.ReturnDocument.BEFORE ) except pymongo.errors.PyMongoError as exc: raise exceptions.MongoLockCannotAcquire() from exc LOG.debug("Lock %s was acquire by locker %s", self.lockname, self.lock_id) self.acquired = True
def new_time_bounce(self): left_bound = timeutils.current_unix_timestamp() + BOUNCE_TIMEOUT right_bound = left_bound + self.bounced * BOUNCE_TIMEOUT bounce_time = random.triangular(left_bound, right_bound) bounce_time = int(bounce_time) return bounce_time
def save(self, structure=None): """This method dumps model data to the database. Important here is that new version will be created on saving. So it is OK to update a field and save, new version will be created. Since model data is immutable in some sense, this is a reason why we do not have `update` method here. """ self.check_constraints() if not structure: structure = self.make_db_document_structure() structure["version"] = self.version + 1 if structure["model_id"] is None: structure["model_id"] = self.model_id or str(uuid.uuid4()) structure["is_latest"] = True structure["time_created"] = timeutils.current_unix_timestamp() result = self.insert_document(structure) self.update_from_db_document(structure) self.collection().update_many( { "model_id": self.model_id, "_id": {"$ne": self._id}, "is_latest": True }, {"$set": {"is_latest": False}} ) return result
def new_time_bounce(self): left_bound = timeutils.current_unix_timestamp() + BOUNCE_TIMEOUT right_bound = left_bound + self.bounced * BOUNCE_TIMEOUT bounce_time = random.triangular(left_bound, right_bound) bounce_time = int(bounce_time) return bounce_time
def watch(cls, stop_condition=None, exit_on_empty=False): """Watch for a new tasks appear in queue. It is a generator, which yields tasks in correct order to be managed. It looks like an ideal usecase for MongoDB capped collections and tailable cursors, but in fact, due to limitations (not possible to change size of document -> cannot set error message etc) it is a way easier to maintain classic collections. """ query = { "time.started": 0, "time.completed": 0, "time.cancelled": 0, "time.failed": 0, "time.bounced": {"$lte": 0} } sortby = [ ("bounced", generic.SORT_DESC), ("time.bounced", generic.SORT_ASC), ("time.created", generic.SORT_ASC) ] collection = cls.collection() stop_condition = stop_condition or threading.Event() find_method = retryutils.mongo_retry()(collection.find_one) try: while not stop_condition.is_set(): fetched_at = timeutils.current_unix_timestamp() query["time.bounced"]["$lte"] = fetched_at document = find_method(query, sort=sortby) if stop_condition.is_set(): raise StopIteration() if document: yield cls.make_task(document) elif exit_on_empty: raise StopIteration() watch_again = timeutils.current_unix_timestamp() if fetched_at == watch_again: stop_condition.wait(1) except pymongo.errors.OperationFailure as exc: LOG.exception("Cannot continue to listen to queue: %s", exc) raise exceptions.InternalDBError() from exc
def watch(cls, stop_condition=None, exit_on_empty=False): """Watch for a new tasks appear in queue. It is a generator, which yields tasks in correct order to be managed. It looks like an ideal usecase for MongoDB capped collections and tailable cursors, but in fact, due to limitations (not possible to change size of document -> cannot set error message etc) it is a way easier to maintain classic collections. """ query = { "time.started": 0, "time.completed": 0, "time.cancelled": 0, "time.failed": 0, "time.bounced": {"$lte": 0} } sortby = [ ("bounced", generic.SORT_DESC), ("time.bounced", generic.SORT_ASC), ("time.created", generic.SORT_ASC) ] collection = cls.collection() stop_condition = stop_condition or threading.Event() find_method = retryutils.mongo_retry()(collection.find_one) try: while not stop_condition.is_set(): fetched_at = timeutils.current_unix_timestamp() query["time.bounced"]["$lte"] = fetched_at document = find_method(query, sort=sortby) if stop_condition.is_set(): raise StopIteration() if document: yield cls.make_task(document) elif exit_on_empty: raise StopIteration() watch_again = timeutils.current_unix_timestamp() if fetched_at == watch_again: stop_condition.wait(1) except pymongo.errors.OperationFailure as exc: LOG.exception("Cannot continue to listen to queue: %s", exc) raise exceptions.InternalDBError() from exc
def get(self): return { "time": { "local": datetime.datetime.now().isoformat(), "utc": datetime.datetime.utcnow().isoformat(), "unix": timeutils.current_unix_timestamp() }, "version": pkg_resources.get_distribution("decapod_api").version }
def delete(self): """This method marks model as deleted.""" structure = self.make_db_document_structure() structure["time_deleted"] = timeutils.current_unix_timestamp() result = self.save(structure) return result
def get(self): return { "time": { "local": datetime.datetime.now().isoformat(), "utc": datetime.datetime.utcnow().isoformat(), "unix": timeutils.current_unix_timestamp() }, "version": pkg_resources.get_distribution("decapod_api").version }
def cancel(self): """Cancels task execution.""" query = { "time.failed": 0, "time.completed": 0, "time.cancelled": 0, } setfields = {"time.cancelled": timeutils.current_unix_timestamp()} return self._update(query, setfields, exceptions.CannotCancelTaskError)
def make_db_document_specific_fields(self): expires_at = self.expires_at if not expires_at: expires_at = timeutils.current_unix_timestamp() + self.default_ttl return { "user_id": self.user_id, "expires_at": expires_at, "model_id": self.model_id, "initiator_id": self.initiator_id }
def clean_expired_password_resets(): """This function swipes expired password reset tokens from DB.""" timestamp = timeutils.current_unix_timestamp() result = password_reset.PasswordReset.collection().delete_many( {"expires_at": { "$lt": timestamp }}) LOG.info( "Clean expired password reset tokens. Removed all tokens pre %d (%s). " "Cleaned %d tokens.", timestamp, time.ctime(timestamp), result.deleted_count)
def start(self): """Starts task execution.""" query = { "time.failed": 0, "time.completed": 0, "time.cancelled": 0, "time.started": 0 } setfields = {"time.started": timeutils.current_unix_timestamp()} return self._update(query, setfields, exceptions.CannotStartTaskError)
def clean_expired_tokens(): """This function swipe out expired tokens from DB.""" timestamp = timeutils.current_unix_timestamp() result = token.TokenModel.collection().delete_many( {"expires_at": { "$lt": timestamp }}) LOG.info( "Clean expired tokens. Removed all tokens pre %d (%s). " "Cleaned %d tokens.", timestamp, time.ctime(timestamp), result.deleted_count)
def get(cls, token): document = cls.collection().find_one({ "_id": token, "expires_at": { "$gte": timeutils.current_unix_timestamp() } }) if not document: return None instance = cls() instance.update(document) return instance
def create(cls, user_id, ttl=None): ttl = ttl or CONF["common"]["password_reset_ttl_in_seconds"] expires_at = timeutils.current_unix_timestamp() + ttl new_password_reset = cls() new_password_reset.user_id = user_id new_password_reset.expires_at = expires_at def create(model): model._id = model.generate_new_id() model.save() return model return retryutils.simple_retry()(create)(new_password_reset)
def consume(self, new_password): self.delete() if self.expires_at < timeutils.current_unix_timestamp(): raise exceptions.PasswordResetExpiredError user_model = user.UserModel.find_by_model_id(self.user_id) if not user_model or user_model.time_deleted: raise exceptions.PasswordResetUnknownUser user_model.password_hash = passwords.hash_password(new_password) user_model.save() token.TokenModel.collection().remove({"user_id": user_model.model_id})
def cancel(self): """Cancels task execution.""" query = { "time.failed": 0, "time.completed": 0, "time.cancelled": 0, } setfields = { "time.cancelled": timeutils.current_unix_timestamp(), TTL_FIELDNAME: timeutils.ttl(self.default_ttl) } return self._update(query, setfields, exceptions.CannotCancelTaskError)
def create(cls, name, script_hash, state, stdout, stderr, time_executed=None): if not time_executed: time_executed = timeutils.current_unix_timestamp() instance = cls() instance._id = name instance.script_hash = script_hash instance.state = state instance.time_executed = time_executed instance.stdout = stdout instance.stderr = stderr instance.save() return instance
def fail(self, error_message="Internal error"): """Fails task execution.""" query = { "time.failed": 0, "time.completed": 0, "time.cancelled": 0, "time.started": {"$ne": 0} } setfields = { "time.failed": timeutils.current_unix_timestamp(), "error": error_message, TTL_FIELDNAME: timeutils.ttl(self.default_ttl) } return self._update(query, setfields, exceptions.CannotFailTask)
def create(cls, name, script_hash, state, stdout, stderr, time_executed=None): if not time_executed: time_executed = timeutils.current_unix_timestamp() instance = cls() instance._id = name instance.script_hash = script_hash instance.state = state instance.time_executed = time_executed instance.stdout = stdout instance.stderr = stderr instance.save() return instance
def fail(self, error_message="Internal error"): """Fails task execution.""" query = { "time.failed": 0, "time.completed": 0, "time.cancelled": 0, "time.started": {"$ne": 0} } setfields = { "time.failed": timeutils.current_unix_timestamp(), "error": error_message, TTL_FIELDNAME: timeutils.ttl(self.default_ttl) } return self._update(query, setfields, exceptions.CannotFailTask)
def complete(self): """Completes task execution.""" query = { "time.failed": 0, "time.completed": 0, "time.cancelled": 0, "time.started": {"$ne": 0} } setfields = { "time.completed": timeutils.current_unix_timestamp(), TTL_FIELDNAME: timeutils.ttl(self.default_ttl) } return self._update(query, setfields, exceptions.CannotCompleteTaskError)
def _cas_update(self, query, setfields): """Does CAS update of the task.""" query = copy.deepcopy(query) setfields = copy.deepcopy(setfields) query["_id"] = self._id query["time.completed"] = 0 query["time.cancelled"] = 0 query["time.failed"] = 0 query["update_marker"] = self.update_marker setfields["update_marker"] = self.new_update_marker() setfields["time.updated"] = timeutils.current_unix_timestamp() method = self.collection().find_one_and_update method = retryutils.mongo_retry()(method) return method(query, {"$set": setfields}, return_document=pymongo.ReturnDocument.AFTER)
def clean_old_tasks(): """This function removes old finished tasks from database.""" timestamp = timeutils.current_unix_timestamp() old_limit = timestamp - CONF["cron"]["clean_finished_tasks_after_seconds"] limit_condition = {"$gt": 0, "$lte": old_limit} query = { "$or": [{ "time.completed": limit_condition }, { "time.cancelled": limit_condition }, { "time_failed": limit_condition }] } result = task.Task.collection().delete_many(query) LOG.info( "Clean old tasks. Removed all tasks pre %d (%s). Cleaned %d tasks.", old_limit, time.ctime(old_limit), result.deleted_count)
def _cas_update(self, query, setfields): """Does CAS update of the task.""" query = copy.deepcopy(query) setfields = copy.deepcopy(setfields) query["_id"] = self._id query["time.completed"] = 0 query["time.cancelled"] = 0 query["time.failed"] = 0 query["update_marker"] = self.update_marker setfields["update_marker"] = self.new_update_marker() setfields["time.updated"] = timeutils.current_unix_timestamp() method = self.collection().find_one_and_update method = retryutils.mongo_retry()(method) return method( query, {"$set": setfields}, return_document=pymongo.ReturnDocument.AFTER )
def try_to_acquire(self, force=False): current_time = timeutils.current_unix_timestamp() query = {"_id": self.lockname} if not force: query["locker"] = None query["expired_at"] = {"$lte": current_time} try: self.find_method(query, { "$set": { "locker": self.lock_id, "expired_at": current_time + self.DEFAULT_PROLONG_TIMEOUT } }, upsert=True, return_document=pymongo.ReturnDocument.BEFORE) except pymongo.errors.PyMongoError as exc: raise exceptions.MongoLockCannotAcquire() from exc LOG.debug("Lock %s was acquire by locker %s", self.lockname, self.lock_id) self.acquired = True
def prolong(self, force=False): if not self.acquired and not force: LOG.warning("Cannot prolong mongo lock %s", self.lockname) raise exceptions.MongoLockCannotProlong() query = {"_id": self.lockname} if not force: query["locker"] = self.lock_id time_to_update = timeutils.current_unix_timestamp() + \ self.DEFAULT_PROLONG_TIMEOUT result = self.find_method( query, {"$set": {"expired_at": time_to_update}}, return_document=pymongo.ReturnDocument.AFTER ) if not result or result["expired_at"] != time_to_update: LOG.warning("Cannot prolong mongo lock %s: %s", self.lockname, result) raise exceptions.MongoLockCannotProlong() LOG.debug("Lock %s was proloned by locker %s.", self.lockname, self.lock_id)
def create(self): """Creates model in database.""" state = self.get_state() state.pop("_id", None) state["time"]["created"] = timeutils.current_unix_timestamp() state["time"]["updated"] = state["time"]["created"] state["update_marker"] = self.new_update_marker() collection = self.collection() insert_method = retryutils.mongo_retry()(collection.insert_one) find_method = retryutils.mongo_retry()(collection.find_one) try: document = insert_method(state) except pymongo.errors.DuplicateKeyError as exc: raise exceptions.UniqueConstraintViolationError from exc document = find_method({"_id": document.inserted_id}) self.set_state(document) return self
def create(self): """Creates model in database.""" state = self.get_state() state.pop("_id", None) state["time"]["created"] = timeutils.current_unix_timestamp() state["time"]["updated"] = state["time"]["created"] state["update_marker"] = self.new_update_marker() collection = self.collection() insert_method = retryutils.mongo_retry()(collection.insert_one) find_method = retryutils.mongo_retry()(collection.find_one) try: document = insert_method(state) except pymongo.errors.DuplicateKeyError as exc: raise exceptions.UniqueConstraintViolationError from exc document = find_method({"_id": document.inserted_id}) self.set_state(document) return self
def prolong(self, force=False): if not self.acquired and not force: LOG.warning("Cannot prolong mongo lock %s", self.lockname) raise exceptions.MongoLockCannotProlong() query = {"_id": self.lockname} if not force: query["locker"] = self.lock_id time_to_update = timeutils.current_unix_timestamp() + \ self.DEFAULT_PROLONG_TIMEOUT result = self.find_method(query, {"$set": { "expired_at": time_to_update }}, return_document=pymongo.ReturnDocument.AFTER) if not result or result["expired_at"] != time_to_update: LOG.warning("Cannot prolong mongo lock %s: %s", self.lockname, result) raise exceptions.MongoLockCannotProlong() LOG.debug("Lock %s was proloned by locker %s.", self.lockname, self.lock_id)
def find_token(cls, token_id): """This method returns token by the given token_id. It also respects expiration time. So even if token is exist but expired, it won't be found. Returns None if nothing is found. """ query = { "model_id": token_id, "expires_at": { "$gte": timeutils.current_unix_timestamp() } } document = cls.collection().find_one(query) if not document: return None model = cls() model.update_from_db_document(document) return model
def test_check_unix_timestamp_valid(freeze_time): assert timeutils.current_unix_timestamp() == int(time.time()) time.sleep(1.2) assert timeutils.current_unix_timestamp() == int(time.time())
def test_check_unix_timestamp_int(timestamp, freeze_time): freeze_time.return_value = timestamp assert timeutils.current_unix_timestamp() == 100