def __init__(self, lockname): self.lockname = lockname self.lock_id = str(uuid.uuid4()) self.acquired = False self.find_method = retryutils.mongo_retry()( self.collection().find_one_and_update )
def unlock_servers(cls, servers): if not servers: return server_ids = [srv._id for srv in servers] update_method = retryutils.mongo_retry()(cls.collection().update_many) update_method( {"_id": {"$in": server_ids}, "lock": {"$ne": None}}, {"$set": {"lock": None}} )
def create(self): """Creates model in database.""" state = self.get_state() state.pop("_id", None) state["time"]["created"] = timeutils.current_unix_timestamp() state["time"]["updated"] = state["time"]["created"] state["update_marker"] = self.new_update_marker() collection = self.collection() insert_method = retryutils.mongo_retry()(collection.insert_one) find_method = retryutils.mongo_retry()(collection.find_one) try: document = insert_method(state) except pymongo.errors.DuplicateKeyError as exc: raise exceptions.UniqueConstraintViolationError from exc document = find_method({"_id": document.inserted_id}) self.set_state(document) return self
def create(self): """Creates model in database.""" state = self.get_state() state.pop("_id", None) state["time"]["created"] = timeutils.current_unix_timestamp() state["time"]["updated"] = state["time"]["created"] state["update_marker"] = self.new_update_marker() collection = self.collection() insert_method = retryutils.mongo_retry()(collection.insert_one) find_method = retryutils.mongo_retry()(collection.find_one) try: document = insert_method(state) except pymongo.errors.DuplicateKeyError as exc: raise exceptions.UniqueConstraintViolationError from exc document = find_method({"_id": document.inserted_id}) self.set_state(document) return self
def watch(cls, stop_condition=None, exit_on_empty=False): """Watch for a new tasks appear in queue. It is a generator, which yields tasks in correct order to be managed. It looks like an ideal usecase for MongoDB capped collections and tailable cursors, but in fact, due to limitations (not possible to change size of document -> cannot set error message etc) it is a way easier to maintain classic collections. """ query = { "time.started": 0, "time.completed": 0, "time.cancelled": 0, "time.failed": 0, "time.bounced": {"$lte": 0} } sortby = [ ("bounced", generic.SORT_DESC), ("time.bounced", generic.SORT_ASC), ("time.created", generic.SORT_ASC) ] collection = cls.collection() stop_condition = stop_condition or threading.Event() find_method = retryutils.mongo_retry()(collection.find_one) try: while not stop_condition.is_set(): fetched_at = timeutils.current_unix_timestamp() query["time.bounced"]["$lte"] = fetched_at document = find_method(query, sort=sortby) if stop_condition.is_set(): raise StopIteration() if document: yield cls.make_task(document) elif exit_on_empty: raise StopIteration() watch_again = timeutils.current_unix_timestamp() if fetched_at == watch_again: stop_condition.wait(1) except pymongo.errors.OperationFailure as exc: LOG.exception("Cannot continue to listen to queue: %s", exc) raise exceptions.InternalDBError() from exc
def watch(cls, stop_condition=None, exit_on_empty=False): """Watch for a new tasks appear in queue. It is a generator, which yields tasks in correct order to be managed. It looks like an ideal usecase for MongoDB capped collections and tailable cursors, but in fact, due to limitations (not possible to change size of document -> cannot set error message etc) it is a way easier to maintain classic collections. """ query = { "time.started": 0, "time.completed": 0, "time.cancelled": 0, "time.failed": 0, "time.bounced": {"$lte": 0} } sortby = [ ("bounced", generic.SORT_DESC), ("time.bounced", generic.SORT_ASC), ("time.created", generic.SORT_ASC) ] collection = cls.collection() stop_condition = stop_condition or threading.Event() find_method = retryutils.mongo_retry()(collection.find_one) try: while not stop_condition.is_set(): fetched_at = timeutils.current_unix_timestamp() query["time.bounced"]["$lte"] = fetched_at document = find_method(query, sort=sortby) if stop_condition.is_set(): raise StopIteration() if document: yield cls.make_task(document) elif exit_on_empty: raise StopIteration() watch_again = timeutils.current_unix_timestamp() if fetched_at == watch_again: stop_condition.wait(1) except pymongo.errors.OperationFailure as exc: LOG.exception("Cannot continue to listen to queue: %s", exc) raise exceptions.InternalDBError() from exc
def lock_servers(cls, servers): if not servers: return server_ids = [srv._id for srv in servers] lock = str(uuid.uuid4()) update_method = retryutils.mongo_retry()(cls.collection().update_many) result = update_method( {"_id": {"$in": server_ids}, "lock": None}, {"$set": {"lock": lock}} ) if result.modified_count == len(server_ids): return if result.modified_count: update_method( {"_id": {"$in": server_ids}, "lock": lock}, {"$set": {"lock": None}} ) raise exceptions.CannotLockServers()
def _cas_update(self, query, setfields): """Does CAS update of the task.""" query = copy.deepcopy(query) setfields = copy.deepcopy(setfields) query["_id"] = self._id query["time.completed"] = 0 query["time.cancelled"] = 0 query["time.failed"] = 0 query["update_marker"] = self.update_marker setfields["update_marker"] = self.new_update_marker() setfields["time.updated"] = timeutils.current_unix_timestamp() method = self.collection().find_one_and_update method = retryutils.mongo_retry()(method) return method(query, {"$set": setfields}, return_document=pymongo.ReturnDocument.AFTER)
def create_server(self, task, json_result): facts = json_result["ansible_facts"] ip_addr = self.get_host_ip(task) create_method = retryutils.mongo_retry()(server.ServerModel.create) try: server_model = create_method(server_id=task.data["id"], name=facts["ansible_nodename"], fqdn=facts["ansible_nodename"], username=task.data["username"], ip=ip_addr, facts=facts) except Exception as exc: LOG.exception("Cannot create server for task %s: %s", task._id, exc) raise else: LOG.info("Creates server %s for task %s", server_model.model_id, task._id) return server_model
def _cas_update(self, query, setfields): """Does CAS update of the task.""" query = copy.deepcopy(query) setfields = copy.deepcopy(setfields) query["_id"] = self._id query["time.completed"] = 0 query["time.cancelled"] = 0 query["time.failed"] = 0 query["update_marker"] = self.update_marker setfields["update_marker"] = self.new_update_marker() setfields["time.updated"] = timeutils.current_unix_timestamp() method = self.collection().find_one_and_update method = retryutils.mongo_retry()(method) return method( query, {"$set": setfields}, return_document=pymongo.ReturnDocument.AFTER )
def test_mongo_retry_fail(exc, func_pass_fail, no_sleep): func_pass_fail.side_effect = [exc(""), True] with pytest.raises(exc): retryutils.mongo_retry()(func_pass_fail)()
def test_mongo_retry_ok(exc, func_pass_fail, no_sleep): func_pass_fail.side_effect = [exc(""), True] retryutils.mongo_retry()(func_pass_fail)()
def __init__(self, lockname): self.lockname = lockname self.lock_id = str(uuid.uuid4()) self.acquired = False self.find_method = retryutils.mongo_retry()( self.collection().find_one_and_update)
def set_execution_state(self, execution_model, new_state): execution_model.state = new_state save_method = retryutils.mongo_retry()(execution_model.save) save_method()
def set_execution_state(self, execution_model, new_state): execution_model.state = new_state save_method = retryutils.mongo_retry()(execution_model.save) save_method()