def logger(self): """Fetch the logger instance, which will auto-commit""" if self._logger is not None: return self._logger logger_conn = itgs.database() logger_conn.autocommit = True self._logger = Logger(os.environ.get('APPNAME', 'lbshared'), self.logger_iden, logger_conn) self._logger.prepare() def closure(*args): self._logger.connection.close() self.closures.append(closure) return self._logger
def main(): # Turns out initializing 2 connections and trying to report errors on those # connections is painful. Also to make it not highly annoying to use this # program, we will attempt connections a few times before giving up. # This literally just connects to postgres, then connects to amqp, # registers a shutdown hook, then calls handlers.manager.register_listeners # The assumption is this process is being managed by something which will # restart it when it exists, so we don't want to just constantly stop and # restart if e.g. the Postgres goes down temporarily. Instead we want to # intelligently retry conn = None for attempt in range(5): if attempt > 0: sleep_time = 4**attempt print(f'Sleeping for {sleep_time} seconds..') time.sleep(sleep_time) print(f'Connecting to Postgres.. (attempt {attempt + 1}/5)') try: conn = psycopg2.connect('') break except psycopg2.OperationalError: traceback.print_exc() if conn is None: _exit(1) print('Success! Initializing logger') logger = Logger(os.environ['APPNAME'], 'main.py', conn) logger.prepare() logger.print(Level.INFO, 'Starting up') conn.commit() print('Logger successfully initialized') print('Initializing AMQP connection') # Until we setup the signal handlers we need to try/except to ensure # we report if we went down amqp = None try: parameters = pika.ConnectionParameters( os.environ['AMQP_HOST'], int(os.environ['AMQP_PORT']), os.environ['AMQP_VHOST'], pika.PlainCredentials(os.environ['AMQP_USERNAME'], os.environ['AMQP_PASSWORD'])) for attempt in range(5): if attempt > 0: sleep_time = 4**attempt print(f'Sleeping for {sleep_time} seconds..') logger.print( Level.WARN, 'Failed to connect to the AMQP server; ' + 'will retry in {} seconds', sleep_time) logger.connection.commit() time.sleep(sleep_time) print(f'Connecting to the AMQP server.. (attempt {attempt + 1}/5)') try: amqp = pika.BlockingConnection(parameters) break except pika.exceptions.AMQPConnectionError: traceback.print_exc() logger.exception(Level.WARN) logger.connection.commit() if amqp is None: logger.print( Level.ERROR, 'Failed to connect to the AMQP server ' + '(exhausted all attempts): shutting down') conn.commit() logger.close() conn.close() _exit(1) logger.print(Level.INFO, 'Successfully connected to the AMQP server!') print('Setting up signal handlers...') def receive_and_clean_shutdown(sig_num=None, frame=None): global shutdown_started global shutdown_listeners nonlocal logger nonlocal amqp if sig_num is not None: print(f'Received signal {sig_num}!') if shutdown_started: print('Ignoring; already shutting down') return else: if shutdown_started: return print( 'A crash has been detected and we are attempting to shutdown cleanly' ) shutdown_started = True reporting_errors = True try: if sig_num is not None: logger.print(Level.INFO, 'Received signal {}, clean shutdown started', sig_num) else: logger.print( Level.ERROR, 'A crash has been detected and a clean shutdown has been initiated' ) logger.connection.commit() except: # noqa: E722 print( 'Failed to report the signal to the logger, continuing shutdown anyway' ) traceback.print_exc() reporting_errors = False print('Running through shutdown listeners...') for lst in shutdown_listeners: try: lst() except: # noqa: E722 print( 'Shutdown listener failed, continuing shutdown anyway') traceback.print_exc() if reporting_errors: try: logger.exception(Level.ERROR) logger.connection.commit() except: # noqa: E72 print( 'Failed to report exception to the logger, ' + 'disabling logger and continuing shutdown') traceback.print_exc() reporting_errors = False try: logger.close() logger.connection.close() amqp.close() print( 'Cleaning up resources finished normally, exiting status 0' ) _exit(0) except: # noqa: E722 print( 'Failed to successfully cleanup resources, exiting status 1' ) traceback.print_exc() _exit(1) signal.signal(signal.SIGINT, receive_and_clean_shutdown) signal.signal(signal.SIGTERM, receive_and_clean_shutdown) atexit.register(receive_and_clean_shutdown) except: # noqa: E722 print( 'Failed to connect to AMQP and register signals, exiting status 1') traceback.print_exc() try: logger.exception( Level.ERROR, 'Failed to connect to AMQP and register signal handlers') conn.commit() except: # noqa: E722 print('Failed to report the error, continuing shutdown anyway') traceback.print_exc() logger.close() conn.close() _exit(1) logger.print(Level.INFO, 'Initialization completed normally') conn.commit() print('Initialization completed normally. Either SIGINT or SIGTERM should ' 'be used to initiate a clean shutdown.') print('Logs will not be sent to STDOUT until shutdown. Monitor the ' 'postgres log table to follow progress.') try: handlers.manager.register_listeners(logger, amqp) except: # noqa: E722 print('register_listeners error') traceback.print_exc() try: logger.exception(Level.ERROR) logger.commit() except: # noqa: E722 print('Error while reporting error for register_listeners') traceback.print_exc() raise
def test_reuses_iden_id(self): self.logger.prepare() new_lgr = Logger(self.logger.appname, self.logger.identifier, self.connection) new_lgr.prepare() self.assertEqual(self.logger.iden_id, new_lgr.iden_id)
def test_reuses_app_id(self): self.logger.prepare() new_lgr = Logger(self.logger.appname, 'iden2', self.connection) new_lgr.prepare() self.assertEqual(self.logger.app_id, new_lgr.app_id)
def setUp(self): self.logger = Logger('lblogging', 'logger_tests.py', self.connection) self.cursor = self.connection.cursor()
class LoggerTests(unittest.TestCase): @classmethod def setUpClass(cls): cls.connection = psycopg2.connect('') cls.log_events = Table('log_events') cls.log_apps = Table('log_applications') cls.log_idens = Table('log_identifiers') @classmethod def tearDownClass(cls): cls.connection.close() cls.connection = None def setUp(self): self.logger = Logger('lblogging', 'logger_tests.py', self.connection) self.cursor = self.connection.cursor() def tearDown(self): self.logger.close() self.logger = None self.connection.rollback() self.cursor.execute('TRUNCATE log_applications CASCADE') self.cursor.execute('TRUNCATE log_identifiers CASCADE') self.cursor.execute('TRUNCATE log_events CASCADE') self.connection.commit() self.cursor.close() self.cursor = None def test_print_fails_without_prepare(self): with self.assertRaises(AttributeError): self.logger.print(Level.TRACE, 'some message') def test_creates_app_id(self): self.cursor.execute( Query.from_(self.log_apps).select(analytics.Count( Star())).get_sql()) cnt = self.cursor.fetchone()[0] self.connection.commit() self.assertEqual(cnt, 0) self.logger.prepare() self.cursor.execute( Query.from_(self.log_apps).select(analytics.Count( Star())).get_sql()) cnt = self.cursor.fetchone()[0] self.connection.commit() self.assertEqual(cnt, 1) self.cursor.execute( Query.from_(self.log_apps).select(self.log_apps.id, self.log_apps.name).get_sql()) id_, nm = self.cursor.fetchone() self.connection.commit() self.assertEqual(nm, self.logger.appname) self.assertEqual(id_, self.logger.app_id) def test_creates_iden_id(self): self.cursor.execute( Query.from_(self.log_idens).select(analytics.Count( Star())).get_sql()) cnt = self.cursor.fetchone()[0] self.connection.commit() self.assertEqual(cnt, 0) self.logger.prepare() self.cursor.execute( Query.from_(self.log_idens).select(analytics.Count( Star())).get_sql()) cnt = self.cursor.fetchone()[0] self.connection.commit() self.assertEqual(cnt, 1) self.cursor.execute( Query.from_(self.log_idens).select( self.log_idens.id, self.log_idens.identifier).get_sql()) id_, iden = self.cursor.fetchone() self.connection.commit() self.assertEqual(iden, self.logger.identifier) self.assertEqual(id_, self.logger.iden_id) def test_reuses_app_id(self): self.logger.prepare() new_lgr = Logger(self.logger.appname, 'iden2', self.connection) new_lgr.prepare() self.assertEqual(self.logger.app_id, new_lgr.app_id) def test_reuses_iden_id(self): self.logger.prepare() new_lgr = Logger(self.logger.appname, self.logger.identifier, self.connection) new_lgr.prepare() self.assertEqual(self.logger.iden_id, new_lgr.iden_id) def test_print_created_event(self): self.logger.prepare() self.cursor.execute( Query.from_(self.log_events).select(analytics.Count( Star())).get_sql()) cnt = self.cursor.fetchone()[0] self.connection.commit() self.assertEqual(cnt, 0) self.logger.print(Level.TRACE, 'msg') self.connection.commit() self.cursor.execute( Query.from_(self.log_events).select(analytics.Count( Star())).get_sql()) cnt = self.cursor.fetchone()[0] self.connection.commit() self.assertEqual(cnt, 1) self.cursor.execute( Query.from_(self.log_events).select('level', 'application_id', 'identifier_id', 'message').get_sql()) lvl, appid, idenid, msg = self.cursor.fetchone() self.connection.commit() self.assertEqual(lvl, int(Level.TRACE)) self.assertEqual(appid, self.logger.app_id) self.assertEqual(idenid, self.logger.iden_id) self.assertEqual(msg, 'msg') def test_print_formats_messages(self): self.logger.prepare() self.logger.print(Level.TRACE, 'my {} message', 'formatted') self.cursor.execute( Query.from_(self.log_events).select('message').limit(1).get_sql()) msg = self.cursor.fetchone()[0] self.connection.commit() self.assertEqual(msg, 'my formatted message') def test_exception_event(self): self.logger.prepare() try: 1 / 0 except ZeroDivisionError: self.logger.exception(Level.WARN) self.cursor.execute( Query.from_(self.log_events).select('level', 'message').limit(1).get_sql()) lvl, msg = self.cursor.fetchone() self.connection.commit() self.assertEqual(lvl, int(Level.WARN)) self.assertIn('ZeroDivisionError', msg) def test_with_iden(self): self.logger.prepare() lgr = self.logger.with_iden('iden2') self.assertEqual(lgr.app_id, self.logger.app_id) self.assertEqual(lgr.iden_id, self.logger.iden_id) self.assertEqual(lgr.identifier, 'iden2') self.assertEqual(lgr.appname, self.logger.appname)
class LazyIntegrations: """Contains lazily-loaded connection properties, which are cleaned up when this object is exited. This object is a context manager. This is intended to reduce boilerplate around integrations as much as possible while being extensible in the future. Hence the interface provides the logger as its own integration (so that the connection can autocommit and logging can be split into its own database in the future), and is prepared for having follower-databases which are read-only. The read-connection should be assumed to be up to a few minutes behind the write-connection. Hence when using both of them there may not be consistency. To alleviate this, set "no_read_only" to True and the read connection will be the same as the write connection. Note that callees should not rely on having separate transactions between the read and write cursors. In most scenarios either only the read cursor should be used or no_read_only should be True. @param [bool] no_read_only If True, this MAY NOT initiate a separate read-able connection for the database. Otherwise, this MAY initiate a separate connection. @param [str] logger_iden The identifier to initialize the logger with; typically the file and function initializing the lazy integrations. """ def __init__(self, no_read_only=False, logger_iden='lazy_integrations.py#logger'): self.closures = [] self.no_read_only = no_read_only self.logger_iden = logger_iden self._logger = None self._conn = None self._cursor = None self._amqp = None self._channel = None self._cache = None self._arango_conn = None self._arango_db = None def __enter__(self): return self def __exit__(self, exc_type, exc_value, traceback): errors = [] for closure in self.closures: try: closure(exc_type, exc_value, traceback) except Exception as e: # noqa # we need to delay these to give other closures # an opportunity errors.append(e) if len(errors) == 1: raise errors[0] elif errors: raise Exception( f'Many errors while shutting down integrations: {errors}') return False @property def logger(self): """Fetch the logger instance, which will auto-commit""" if self._logger is not None: return self._logger logger_conn = itgs.database() logger_conn.autocommit = True self._logger = Logger(os.environ.get('APPNAME', 'lbshared'), self.logger_iden, logger_conn) self._logger.prepare() def closure(*args): self._logger.connection.close() self.closures.append(closure) return self._logger @property def read_cursor(self): """Fetches a database cursor that is only promised to support reads. This may be the same connection as write_conn.""" return self.write_cursor @property def read_conn(self): """Fetches the database connection that is only promised to support reads. This may be the same connection as read_conn. This is useful if you performed a long read connection and you want to close the transaction. """ return self.write_conn @property def write_conn(self): """Fetch the writable database connection""" return self.write_conn_and_cursor[0] @property def write_cursor(self): "Fetch the writable database cursor" return self.write_conn_and_cursor[1] @property def write_conn_and_cursor(self): """Returns the writable database connection alongside the cursor. The connection can be used to commit.""" if self._conn is not None: return (self._conn, self._cursor) self._conn = itgs.database() self._cursor = self._conn.cursor() def closure(*args): self._cursor.close() self._conn.close() self.closures.append(closure) return (self._conn, self._cursor) @property def amqp(self): """Get the advanced message queue pika instance, which is really only necessary if you need to declare custom channels""" return self.amqp_and_channel[0] @property def channel(self): """The AMQP channel to use.""" return self.amqp_and_channel[1] @property def amqp_and_channel(self): """Get both the AMQP pika instance and the channel we are using""" if self._amqp is not None: return (self._amqp, self._channel) self._amqp = itgs.amqp() self._channel = self._amqp.channel() self._channel.confirm_delivery() def closure(*args): self._channel.close() self._amqp.close() self.closures.append(closure) return (self._amqp, self._channel) @property def cache(self): """Get the memcached client""" if self._cache is not None: return self._cache self._cache = itgs.cache() def closure(*args): self._cache.close() self.closures.append(closure) return self._cache @property def kvs_conn(self) -> Config: """Get the connection for ArangoDB""" if self._arango_conn is not None: return self._arango_conn self._arango_conn = itgs.kvstore() return self._arango_conn @property def kvs_db(self) -> Database: """Get the Arango DB which all collections are in""" if self._arango_db is not None: return self._arango_db self._arango_db = self.kvs_conn.database(os.environ['ARANGO_DB']) return self._arango_db