async def main(): define( name="idle_users", type=bool, default=False, help="locate and identify idle users who should be deleted", ) define( name="dry_run", type=bool, default=False, help="dry run (don't actually perform deletions)", ) parse_command_line() if options.idle_users: with timer(f"Deleted idle users{' (dry run)' * options.dry_run}"): await delete_idle_users(dry_run=options.dry_run) return with timer("Deleted old raw data files"): await delete_raw_data() deleter = get_deleter() with timer("Database deletion"): await delete_everything(concurrency=CONCURRENCY, max_failures=MAX_FAILURES) # wait for batched deletions to complete await deleter.stop(block=False)
def in_thread(): db = None if persistent: try: db = async_with_db.local.db except AttributeError: pass else: try: db.execute(_check_connection_query).fetchall() except Exception as e: app_log.error( f"Not reusing closed connection: {e}") del async_with_db.local.db db = None if db is None: with utils.timer("db connect"): # Check the object counts before pyodbc connection. if DEBUG_OBJGRAPH: app_log.info( f"Checking the object counts before pyodbc connection: {objgraph.growth()}" ) db = asyncio.run(connect_to_database(**params)) # Check the object counts after pyodbc connection. # There should be a pyodbc object in memory if DEBUG_OBJGRAPH: app_log.info( f"Checking the object counts after pyodbc connection: {objgraph.growth()}" ) if persistent: async_with_db.local.db = db with utils.timer(f"db query {f.__name__}"): try: return f(db, *args, **kwargs) finally: if not persistent: # Check the object counts before pyodbc connection is closed. # Check if the pyodbc object still exists in memory if DEBUG_OBJGRAPH: app_log.info( f"Checking the object counts before pyodbc connection closes: {objgraph.growth()}" ) db.close() # Check the object counts after pyodbc connection is closed. # Check if the pyodbc object is removed from memory if DEBUG_OBJGRAPH: app_log.info( f"Checking the object counts after pyodbc connection closes: {objgraph.growth()}" )
def consume(self): """Consume the deletion queue in the background""" self.batch = batch = [] finished = False finish_future = None while not finished: should_delete = False try: device_id, future = self.queue.get(timeout=self.batch_seconds) except Empty: # idle, submit deletion if there's anything to delete should_delete = bool(batch) else: if device_id is self.Halt: # received halt message, delete anything pending and exit app_log.info( f"Halt of deletion requested, {len(batch)} items to delete" ) finished = True finish_future = future should_delete = bool(batch) else: # deletion requested, add to batch and delete if batch is full app_log.debug(f"Device {device_id} added to deletion batch") batch.append((device_id, future)) should_delete = len(batch) >= self.batch_size if not should_delete: continue # consume the batch app_log.info(f"Submitting {len(batch)} devices for deletion") device_ids = [] futures = [] for device_id, future in batch: device_ids.append(device_id) futures.append(future) batch[:] = [] with timer(f"Deleted {len(device_ids)} devices from the db"): try: deleted_somethings = asyncio.run(delete_sql_data(*device_ids)) except Exception as e: app_log.error(f"Error processing deletion: {e}") # propagate errors to awaited Futures for future in futures: future.set_exception(e) else: # signal deletions as completed for deleted_something, future in zip(deleted_somethings, futures): future.set_result(deleted_something) app_log.info("Exiting deletion queue") if finish_future: finish_future.set_result(None)
def test_timer(): # smoke test for timer method message_test = "testmessage" with utils.timer(message_test): pass