def delete_entry_queue(object_id, table_name): """Delete a document from the outbound queue table.""" conn = connect_to_db() result = r.table(table_name).get(object_id).delete( return_changes=True).run(conn) conn.close() LOGGER.debug(result)
def wait_for_resource_in_db(table, index, identifier, max_attempts=15, delay=0.3): """Polls rethinkdb for the requested resource until it exists. Args: table: str: name of a table to query for the resource in index: str: name of the index of the identifier to query for identifier: str: a match of the resource in the index selected max_attempts: int: number of attempts to find resource before giving up and returning False Default value: 15 delay: float: number of seconds to wait between query attempts. Default value: 0.3 Returns: resource_removed: bool: if the role is found within given number of attempts """ resource_found = False count = 0 with connect_to_db() as conn: while not resource_found and count < max_attempts: resource = ( r.table(table).filter({index: identifier}).coerce_to("array").run(conn) ) if resource: resource_found = True count += 1 time.sleep(delay) return resource_found
def wait_for_rethink(max_attempts=200, delay=0.5): """Polls rethinkDB until all tables report as "ready". This prevents resources from attempting to read/write to rethink before it has initialized. Args: max_attempts: int: The number of attempts before giving up. Waiting longer is preferable to giving up, and this only runs during init, so we set a large default val. default: 200 delay: float: The number of seconds to wait between attempts. default: 0.5 Returns: bool: True: if all rethink tables are reporting as ready. bool: False: if some rethink tables are not ready after reaching the max number of attempts. """ with connect_to_db() as conn: is_rethink_ready = False attempts = 0 while attempts < max_attempts and not is_rethink_ready: db_status = r.db("rbac").wait().coerce_to("object").run(conn) ready_table_count = db_status["ready"] is_rethink_ready = ready_table_count == 25 attempts += 1 time.sleep(delay) return is_rethink_ready
def peek_at_q_unfiltered(table_name): """Returns a single entry from table_name with the oldest timestamp.""" conn = connect_to_db() queue_entry = r.table(table_name).min("timestamp").coerce_to("object").run( conn) conn.close() return queue_entry
def put_entry_changelog(queue_entry, direction): """Puts the referenced document in the changelog table.""" queue_entry["changelog_timestamp"] = dt.now().isoformat() queue_entry["direction"] = direction conn = connect_to_db() result = (r.table("changelog").insert(queue_entry, return_changes=True, conflict="error").run(conn)) conn.close() LOGGER.debug(result)
def update_outbound_entry_status(entry_id): """ Change outbound_queue entry's status from UNCONFIRMED to CONFIRMED Args: entry_id: (str) Id field of outbound_queue entry """ conn = connect_to_db() r.table("outbound_queue").get(entry_id).update({ "status": "CONFIRMED" }).run(conn) conn.close()
def save_sync_time(provider_id, sync_source, sync_type, timestamp=None): """Saves sync time for the current data type into the RethinkDB table 'sync_tracker'.""" if timestamp: last_sync_time = timestamp else: last_sync_time = dt.now().replace(tzinfo=timezone.utc).isoformat() sync_entry = { "provider_id": provider_id, "timestamp": last_sync_time, "source": sync_source, "sync_type": sync_type, } conn = connect_to_db() r.table("sync_tracker").insert(sync_entry).run(conn) conn.close()
def get_user_by_username(username): """Get user information from users table by username. Args: username: str: username of user to retrieve """ conn = connect_to_db() user = ( r.table("users") .filter(lambda doc: (doc["username"].match("(?i)^" + username + "$"))) .coerce_to("array") .run(conn) ) conn.close() return user
def get_role_members(role_id): """Get the role members of a role Args: role_id: str: id of role to see members """ conn = connect_to_db() members = ( r.table("role_members") .filter({"role_id": role_id}) .coerce_to("array") .run(conn) ) conn.close() return members
def get_role_by_name(name): """Get role information from roles table by role name. Args: name: str: name of role to retrieve """ conn = connect_to_db() user = ( r.table("roles") .filter(lambda doc: (doc["name"].match("(?i)^" + name + "$"))) .coerce_to("array") .run(conn) ) conn.close() return user
def get_user_by_id(next_id): """"Get user information from users table by user id. Args: next_id: str: id of user to retrieve """ conn = connect_to_db() user = ( r.db("rbac") .table("roles") .filter({"next_id": next_id}) .coerce_to("array") .run(conn) ) conn.close() return user
def get_role_by_id(role_id): """"Get role information from roles table by role id. Args: role_id: str: id of role to retrieve """ conn = connect_to_db() role = ( r.db("rbac") .table("roles") .filter({"next_id": role_id}) .coerce_to("array") .run(conn) ) conn.close() return role
def peek_at_queue(table_name, provider_id=None): """Returns a single entry from table_name with the oldest timestamp and matching provider_id.""" try: conn = connect_to_db() if provider_id: queue_entry = (r.table(table_name).filter({ "provider_id": provider_id, "status": "UNCONFIRMED" }).min("timestamp").coerce_to("object").run(conn)) conn.close() return queue_entry queue_entry = r.table(table_name).min("timestamp").coerce_to( "object").run(conn) conn.close() return queue_entry except (r.ReqlNonExistenceError, r.ReqlOpFailedError, r.ReqlDriverError): return None
def get_last_sync(source, sync_type): """ Search and get last sync entry from the specified source. Throws ExpectedError if sync_tracker table has not been initialized. """ try: conn = connect_to_db() last_sync = (r.table("sync_tracker").filter({ "source": source, "sync_type": sync_type }).max("timestamp").coerce_to("object").run(conn)) conn.close() return last_sync except (r.ReqlOpFailedError, r.ReqlDriverError) as err: raise ExpectedError(err) except r.ReqlNonExistenceError: LOGGER.debug("The sync_tracker table is empty.") except Exception as err: LOGGER.warning(type(err).__name__) raise err