def loop(self): enable_cached_db_connections() while True: try: result = self.execute() # if we did something then we immediately look for more work unless we're shutting down if result == WORK_SUBMITTED: if self.shutdown_event.is_set(): break # if were was no work available to be submitted then wait a second and look again elif result == NO_WORK_AVAILABLE: if self.shutdown_event.wait(1): break # if there were no NODES available then wait a little while longer and look again elif result == NO_NODES_AVAILABLE: if self.shutdown_event.wait( self.node_status_update_frequency / 2): break elif result == NO_WORK_SUBMITTED: if self.shutdown_event.wait(1): break except Exception as e: logging.error( "unexpected exception thrown in loop for {}: {}".format( self, e)) report_exception() if self.shutdown_event.wait(1): break disable_cached_db_connections()
def test_database_006_caching_threaded(self): """Cached database connections for threads.""" enable_cached_db_connections() e = threading.Event() with get_db_connection() as conn_1: self.assertEquals(len(saq.database._global_db_cache), 1) conn_1_id = id(conn_1) def f(): enable_cached_db_connections() # this connection should be different than conn_1 with get_db_connection() as conn_2: self.assertEquals(len(saq.database._global_db_cache), 2) self.assertNotEquals(conn_1, conn_2) conn_2_id = id(conn_2) # but asked a second time this should be the same as before with get_db_connection() as conn_3: self.assertEquals(len(saq.database._global_db_cache), 2) self.assertEquals(conn_2_id, id(conn_3)) e.set() disable_cached_db_connections() self.assertEquals(len(saq.database._global_db_cache), 1) t = threading.Thread(target=f) t.start() e.wait() with get_db_connection() as conn_4: self.assertEquals(len(saq.database._global_db_cache), 1) self.assertEquals(conn_1_id, id(conn_4)) disable_cached_db_connections() self.assertEquals(len(saq.database._global_db_cache), 0)
def f(): enable_cached_db_connections() # this connection should be different than conn_1 with get_db_connection() as conn_2: send_test_message(len(saq.database._global_db_cache) == 2) send_test_message(conn_1 != conn_2) conn_2_id = id(conn_2) # but asked a second time this should be the same as before with get_db_connection() as conn_3: send_test_message(len(saq.database._global_db_cache) == 2) send_test_message(conn_2_id == id(conn_3)) disable_cached_db_connections() send_test_message(len(saq.database._global_db_cache) == 1)
def debug(self): # if we're starting and we haven't loaded any groups yet then go ahead and load them here if not self.remote_node_groups: self.load_groups() enable_cached_db_connections() try: self.debug_extended_collection() except NotImplementedError: pass self.execute() self.execute_workload_cleanup() disable_cached_db_connections()
def f(): enable_cached_db_connections() # this connection should be different than conn_1 with get_db_connection() as conn_2: self.assertEquals(len(saq.database._global_db_cache), 2) self.assertNotEquals(conn_1, conn_2) conn_2_id = id(conn_2) # but asked a second time this should be the same as before with get_db_connection() as conn_3: self.assertEquals(len(saq.database._global_db_cache), 2) self.assertEquals(conn_2_id, id(conn_3)) e.set() disable_cached_db_connections() self.assertEquals(len(saq.database._global_db_cache), 1)
def loop(self): enable_cached_db_connections() while True: try: self.execute() except Exception as e: logging.error( "unexpected exception thrown during loop for {}: {}". format(self, e)) report_exception() if self.service_shutdown_event.wait(1): break if self.is_service_shutdown: break disable_cached_db_connections()
def cleanup_loop(self): logging.debug("starting cleanup loop") enable_cached_db_connections() while True: wait_time = 1 try: if self.execute_workload_cleanup() > 0: wait_time = 0 except Exception as e: logging.exception(f"unable to execute workload cleanup: {e}") if self.service_shutdown_event.wait(wait_time): break disable_cached_db_connections() logging.debug("exited cleanup loop")
def test_database_005_caching(self): from saq.database import _cached_db_connections_enabled self.assertFalse(_cached_db_connections_enabled()) enable_cached_db_connections() self.assertTrue(_cached_db_connections_enabled()) with get_db_connection() as db: pass # we should have one database connection ready self.assertEquals(len(saq.database._global_db_cache), 1) disable_cached_db_connections() self.assertFalse(_cached_db_connections_enabled()) # we should have zero database connection ready self.assertEquals(len(saq.database._global_db_cache), 0) self.assertEquals(len(saq.database._use_cache_flags), 0)
def disposition_watch_loop(self, alert_id): enable_cached_db_connections() while not self.shutdown and not self.cancel_analysis_flag and not self.analysis_ended_flag: try: self.disposition_watch_execute(alert_id) time.sleep(5) except Exception as e: logging.error("unable to check disposition of {}: {}".format( alert_id, e)) report_exception() return disable_cached_db_connections() logging.debug("exiting disposition watch")
def test_database_007_caching_processes(self): """Cached database connections for processes.""" enable_cached_db_connections() with get_db_connection() as conn_1: self.assertEquals(len(saq.database._global_db_cache), 1) conn_1_id = id(conn_1) def f(): enable_cached_db_connections() # this connection should be different than conn_1 with get_db_connection() as conn_2: send_test_message(len(saq.database._global_db_cache) == 2) send_test_message(conn_1 != conn_2) conn_2_id = id(conn_2) # but asked a second time this should be the same as before with get_db_connection() as conn_3: send_test_message(len(saq.database._global_db_cache) == 2) send_test_message(conn_2_id == id(conn_3)) disable_cached_db_connections() send_test_message(len(saq.database._global_db_cache) == 1) p = multiprocessing.Process(target=f) p.start() self.assertTrue( recv_test_message()) # len(saq.database._global_db_cache) == 2 self.assertTrue(recv_test_message()) # conn_1 != conn_2 self.assertTrue( recv_test_message()) # len(saq.database._global_db_cache) == 2 self.assertTrue(recv_test_message()) # conn_2_id == id(conn_3) self.assertTrue( recv_test_message()) # len(saq.database._global_db_cache) == 1 p.join() with get_db_connection() as conn_4: self.assertEquals(len(saq.database._global_db_cache), 1) self.assertEquals(conn_1_id, id(conn_4)) disable_cached_db_connections() self.assertEquals(len(saq.database._global_db_cache), 0)
def test_execute_with_retry_commit(self): _uuid = str(uuid.uuid4()) _lock_uuid = str(uuid.uuid4()) disable_cached_db_connections() # simple insert statement with commit option with get_db_connection() as db: c = db.cursor() execute_with_retry( db, c, 'INSERT INTO locks ( uuid, lock_time ) VALUES ( %s, NOW() )', (_uuid, ), commit=True) # check it on another connection with get_db_connection() as db: c = db.cursor() c.execute("SELECT uuid FROM locks WHERE uuid = %s", (_uuid, )) self.assertIsNotNone(c.fetchone()) _uuid = str(uuid.uuid4()) _lock_uuid = str(uuid.uuid4()) # and then this one should fail since we did not commit it with get_db_connection() as db: c = db.cursor() execute_with_retry( db, c, 'INSERT INTO locks ( uuid, lock_time ) VALUES ( %s, NOW() )', (_uuid, ), commit=False) with get_db_connection() as db: c = db.cursor() c.execute("SELECT uuid FROM locks WHERE uuid = %s", (_uuid, )) self.assertIsNone(c.fetchone()) enable_cached_db_connections()
def extended_collection_wrapper(self): enable_cached_db_connections() try: self.extended_collection() finally: disable_cached_db_connections()