def transaction_raw(reuse=True): """ A single transaction. It is automatically commited on success and rolled back on exception. Use as following: with database.transaction() as transaction: transaction.execute(...) transaction.execute(...) If reuse is true, the cursor inside may have been used before and may be used again later. """ global __cache if 'connection' not in __cache.__dict__: logger.debug("Initializing connection to DB") retry = True while retry: try: __cache.connection = psycopg2.connect(database=get('db'), user=get('dbuser'), password=get('dbpasswd'), host=get('dbhost')) retry = False except Exception as e: logger.error("Failed to create DB connection (blocking until it works): %s", e) time.sleep(1) if reuse: if 'context' not in __cache.__dict__: __cache.context = __CursorContext(__cache.connection) logger.debug("Initializing cursor") else: __cache.context.reuse() return __cache.context else: return __CursorContext(__cache.connection)
def submit(self, cback, cid, challenge, response): global connecting global receiver line = 'HALF ' + cid + ' ' + challenge + ' ' + response if receiver: receiver.submit(cback, line) else: if not connecting: connecting = True reactor.connectTCP(master_config.get('authenticator_host'), master_config.getint('authenticator_port'), self) self.__queue.append((line, cback))
def transaction_raw(reuse=True): """ A single transaction. It is automatically commited on success and rolled back on exception. Use as following: with database.transaction() as transaction: transaction.execute(...) transaction.execute(...) If reuse is true, the cursor inside may have been used before and may be used again later. """ global __cache if 'connection' not in __cache.__dict__: logger.debug("Initializing connection to DB") retry = True while retry: try: __cache.connection = psycopg2.connect(database=get('db'), user=get('dbuser'), password=get('dbpasswd'), host=get('dbhost')) retry = False except Exception as e: logger.error( "Failed to create DB connection (blocking until it works): %s", e) time.sleep(1) if reuse: if 'context' not in __cache.__dict__: __cache.context = __CursorContext(__cache.connection) logger.debug("Initializing cursor") else: __cache.context.reuse() return __cache.context else: return __CursorContext(__cache.connection)
from subprocess import Popen import log_extra import logging import logging.handlers from client import ClientFactory from plugin import Plugins, pool import master_config import activity import importlib import os # If we have too many background threads, the GIL slows down the # main thread and clients start dropping because we are not able # to keep up with pings. reactor.suggestThreadPoolSize(5) severity = master_config.get('log_severity') if severity == 'TRACE': severity = log_extra.TRACE_LEVEL else: severity = getattr(logging, severity) log_file = master_config.get('log_file') logging.basicConfig(level=severity, format=master_config.get('log_format')) if log_file != '-': handler = logging.handlers.RotatingFileHandler( log_file, maxBytes=int(master_config.get('log_file_size')), backupCount=int(master_config.get('log_file_count'))) handler.setFormatter( logging.Formatter(fmt=master_config.get('log_format'))) logging.getLogger().addHandler(handler)
from twisted.internet import reactor, protocol from twisted.internet.endpoints import UNIXServerEndpoint from twisted.internet.error import ReactorNotRunning from subprocess import Popen import log_extra import logging import logging.handlers from client import ClientFactory from plugin import Plugins, pool import master_config import activity import importlib import os reactor.suggestThreadPoolSize(4) # Too much seems to have trouble with locking :-( severity = master_config.get('log_severity') if severity == 'TRACE': severity = log_extra.TRACE_LEVEL else: severity = getattr(logging, severity) log_file = master_config.get('log_file') logging.basicConfig(level=severity, format=master_config.get('log_format')) if log_file != '-': handler = logging.handlers.RotatingFileHandler(log_file, maxBytes=int(master_config.get('log_file_size')), backupCount=int(master_config.get('log_file_count'))) handler.setFormatter(logging.Formatter(fmt=master_config.get('log_format'))) logging.getLogger().addHandler(handler) loaded_plugins = {} plugins = Plugins() for (plugin, config) in master_config.plugins().items(): (modulename, classname) = plugin.rsplit('.', 1)