def test_patroni_logger(self): config = { 'log': { 'dir': 'foo', 'file_size': 4096, 'file_num': 5, 'loggers': { 'foo.bar': 'INFO' } }, 'restapi': {}, 'postgresql': { 'data_dir': 'foo' } } sys.argv = ['patroni.py'] os.environ[Config.PATRONI_CONFIG_VARIABLE] = yaml.dump( config, default_flow_style=False) logger = PatroniLogger() patroni_config = Config() logger.reload_config(patroni_config['log']) self.assertEqual(logger.handler.maxBytes, config['log']['file_size']) self.assertEqual(logger.handler.backupCount, config['log']['file_num']) config['log'].pop('dir') logger.reload_config(config['log'])
class AbstractPatroniDaemon(object): def __init__(self, config): from patroni.log import PatroniLogger self.setup_signal_handlers() self.logger = PatroniLogger() self.config = config AbstractPatroniDaemon.reload_config(self, local=True) def sighup_handler(self, *args): self._received_sighup = True def sigterm_handler(self, *args): with self._sigterm_lock: if not self._received_sigterm: self._received_sigterm = True sys.exit() def setup_signal_handlers(self): self._received_sighup = False self._sigterm_lock = Lock() self._received_sigterm = False if os.name != 'nt': signal.signal(signal.SIGHUP, self.sighup_handler) signal.signal(signal.SIGTERM, self.sigterm_handler) @property def received_sigterm(self): with self._sigterm_lock: return self._received_sigterm def reload_config(self, sighup=False, local=False): if local: self.logger.reload_config(self.config.get('log', {})) @abc.abstractmethod def _run_cycle(self): """_run_cycle""" def run(self): self.logger.start() while not self.received_sigterm: if self._received_sighup: self._received_sighup = False self.reload_config(True, self.config.reload_local_configuration()) self._run_cycle() @abc.abstractmethod def _shutdown(self): """_shutdown""" def shutdown(self): with self._sigterm_lock: self._received_sigterm = True self._shutdown() self.logger.shutdown()
def test_interceptor(self): logger = PatroniLogger() logger.reload_config({'level': 'INFO'}) logger.start() _LOG.info('Lock owner: ') _LOG.info('blabla') logger.shutdown() self.assertEqual(logger.records_lost, 0)
def test_patroni_logger(self): config = { 'log': { 'traceback_level': 'DEBUG', 'max_queue_size': 5, 'dir': 'foo', 'file_size': 4096, 'file_num': 5, 'loggers': { 'foo.bar': 'INFO' } }, 'restapi': {}, 'postgresql': { 'data_dir': 'foo' } } sys.argv = ['patroni.py'] os.environ[Config.PATRONI_CONFIG_VARIABLE] = yaml.dump( config, default_flow_style=False) logger = PatroniLogger() patroni_config = Config(None) logger.reload_config(patroni_config['log']) _LOG.exception('test') logger.start() with patch.object(logging.Handler, 'format', Mock(side_effect=Exception)),\ patch('_pytest.logging.LogCaptureHandler.emit', Mock()): logging.error('test') self.assertEqual(logger.log_handler.maxBytes, config['log']['file_size']) self.assertEqual(logger.log_handler.backupCount, config['log']['file_num']) config['log']['level'] = 'DEBUG' config['log'].pop('dir') with patch('logging.Handler.close', Mock(side_effect=Exception)): logger.reload_config(config['log']) with patch.object( logging.Logger, 'makeRecord', Mock(side_effect=[ logging.LogRecord('', logging.INFO, '', 0, '', ( ), None), Exception ])): logging.exception('test') logging.error('test') with patch.object(Queue, 'put_nowait', Mock(side_effect=Full)): self.assertRaises(SystemExit, logger.shutdown) self.assertRaises(Exception, logger.shutdown) self.assertLessEqual( logger.queue_size, 2 ) # "Failed to close the old log handler" could be still in the queue self.assertEqual(logger.records_lost, 0)
class Patroni(object): def __init__(self, conf): from patroni.api import RestApiServer from patroni.dcs import get_dcs from patroni.ha import Ha from patroni.log import PatroniLogger from patroni.postgresql import Postgresql from patroni.request import PatroniRequest from patroni.watchdog import Watchdog self.setup_signal_handlers() self.version = __version__ self.logger = PatroniLogger() self.config = conf self.logger.reload_config(self.config.get('log', {})) self.dcs = get_dcs(self.config) self.watchdog = Watchdog(self.config) self.load_dynamic_configuration() self.postgresql = Postgresql(self.config['postgresql']) self.api = RestApiServer(self, self.config['restapi']) self.request = PatroniRequest(self.config, True) self.ha = Ha(self) self.tags = self.get_tags() self.next_run = time.time() self.scheduled_restart = {} def load_dynamic_configuration(self): from patroni.exceptions import DCSError while True: try: cluster = self.dcs.get_cluster() if cluster and cluster.config and cluster.config.data: if self.config.set_dynamic_configuration(cluster.config): self.dcs.reload_config(self.config) self.watchdog.reload_config(self.config) elif not self.config.dynamic_configuration and 'bootstrap' in self.config: if self.config.set_dynamic_configuration( self.config['bootstrap']['dcs']): self.dcs.reload_config(self.config) break except DCSError: logger.warning('Can not get cluster from dcs') time.sleep(5) def get_tags(self): return { tag: value for tag, value in self.config.get('tags', {}).items() if tag not in ('clonefrom', 'nofailover', 'noloadbalance', 'nosync') or value } @property def nofailover(self): return bool(self.tags.get('nofailover', False)) @property def nosync(self): return bool(self.tags.get('nosync', False)) def reload_config(self, sighup=False): try: self.tags = self.get_tags() self.logger.reload_config(self.config.get('log', {})) self.watchdog.reload_config(self.config) if sighup: self.request.reload_config(self.config) self.api.reload_config(self.config['restapi']) self.postgresql.reload_config(self.config['postgresql'], sighup) self.dcs.reload_config(self.config) except Exception: logger.exception('Failed to reload config_file=%s', self.config.config_file) @property def replicatefrom(self): return self.tags.get('replicatefrom') def sighup_handler(self, *args): self._received_sighup = True def sigterm_handler(self, *args): with self._sigterm_lock: if not self._received_sigterm: self._received_sigterm = True sys.exit() @property def noloadbalance(self): return bool(self.tags.get('noloadbalance', False)) def schedule_next_run(self): self.next_run += self.dcs.loop_wait current_time = time.time() nap_time = self.next_run - current_time if nap_time <= 0: self.next_run = current_time # Release the GIL so we don't starve anyone waiting on async_executor lock time.sleep(0.001) # Warn user that Patroni is not keeping up logger.warning("Loop time exceeded, rescheduling immediately.") elif self.ha.watch(nap_time): self.next_run = time.time() @property def received_sigterm(self): with self._sigterm_lock: return self._received_sigterm def run(self): self.api.start() self.logger.start() self.next_run = time.time() while not self.received_sigterm: if self._received_sighup: self._received_sighup = False if self.config.reload_local_configuration(): self.reload_config(True) else: self.postgresql.config.reload_config( self.config['postgresql'], True) logger.info(self.ha.run_cycle()) if self.dcs.cluster and self.dcs.cluster.config and self.dcs.cluster.config.data \ and self.config.set_dynamic_configuration(self.dcs.cluster.config): self.reload_config() if self.postgresql.role != 'uninitialized': self.config.save_cache() self.schedule_next_run() def setup_signal_handlers(self): from threading import Lock self._received_sighup = False self._sigterm_lock = Lock() self._received_sigterm = False if os.name != 'nt': signal.signal(signal.SIGHUP, self.sighup_handler) signal.signal(signal.SIGTERM, self.sigterm_handler) def shutdown(self): with self._sigterm_lock: self._received_sigterm = True try: self.api.shutdown() except Exception: logger.exception('Exception during RestApi.shutdown') try: self.ha.shutdown() except Exception: logger.exception('Exception during Ha.shutdown') self.logger.shutdown()