class RedisConnectionPool(object): """Pool of Redis Connections that uses a gevent LifoQueue to block when a resource is not available. """ def __init__(self, size, host, port, db=0, passwd=None, socket_timeout=None): """ Args: size - Number of connections to maintain in the pool. host - The hostname to use for making connections. port - The port to use for making connections. db - The database number to connect to. passwd - The password to use for accessing the database. socket_timeout - The socket timeout value for connections. """ self.size = size self.all = set() self.pool = LifoQueue(maxsize=self.size) for _ in xrange(self.size): connection = redis.Connection(host, port, db, passwd, socket_timeout, encoding='utf-8', encoding_errors='strict', parser_class=DefaultParser) self.all.add(connection) self.pool.put(connection) def get_connection(self, command_name, *keys, **options): """Get a connection from the pool. If no connection is available, this call will block. """ return self.pool.get(timeout=60) def release(self, connection): """Return a connection to the pool. """ if connection not in self.all: raise ValueError() self.pool.put(connection) def disconnect(self): """Close all the connections managed by this pool. """ for connection in self.all: connection.disconnect()
class RedisConnectionPool(object): """Pool of Redis Connections that uses a gevent LifoQueue to block when a resource is not available. """ def __init__(self, size, host, port, db=0, passwd=None, socket_timeout=None): """ Args: size - Number of connections to maintain in the pool. host - The hostname to use for making connections. port - The port to use for making connections. db - The database number to connect to. passwd - The password to use for accessing the database. socket_timeout - The socket timeout value for connections. """ self.size = size self.all = set() self.pool = LifoQueue(maxsize=self.size) for _ in xrange(self.size): connection = redis.Connection( host, port, db, passwd, socket_timeout, encoding='utf-8', encoding_errors='strict', parser_class=DefaultParser) self.all.add(connection) self.pool.put(connection) def get_connection(self, command_name, *keys, **options): """Get a connection from the pool. If no connection is available, this call will block. """ return self.pool.get(timeout=60) def release(self, connection): """Return a connection to the pool. """ if connection not in self.all: raise ValueError() self.pool.put(connection) def disconnect(self): """Close all the connections managed by this pool. """ for connection in self.all: connection.disconnect()
class ClientPool(object): DEFAULT_CLIENT_EXPIRE_TIME = 300 DEFAULT_CLOSE_EXPIRE_CLIENT_INTERVAL = 60 def __init__(self, pool_name, pool_size, client_class, close_client_handler, *client_args, **client_kwargs): assert pool_size > 0 assert client_class is not None and hasattr(client_class, '__call__') assert close_client_handler is None or hasattr(close_client_handler, '__call__') self._pool_name = pool_name self._pool_size = pool_size self._client_class = client_class self._close_client_handler = close_client_handler self._client_args = client_args self._client_kwargs = client_kwargs self._queue = LifoQueue(maxsize=pool_size) for i in range(pool_size): self._queue.put(ClientHolder()) self._client_expire_time = self.DEFAULT_CLIENT_EXPIRE_TIME self._gc_task = ScheduleTask( name='ClientPool-GC-%s' % pool_name, start_after_seconds=0, interval_seconds=self.DEFAULT_CLOSE_EXPIRE_CLIENT_INTERVAL, handler=self._close_expire_client) self._gc_task.run() def __del__(self): self._gc_task.stop() @contextmanager def get_client(self, block=True, pool_acquire_client_timeout=1, req_timeout=5): client_holder = self._get_client(block, pool_acquire_client_timeout) tm = None try: tm = gevent.Timeout.start_new(req_timeout) yield client_holder.get_client() except BaseException as e: logger.error( 'Client is out pool for too long %s seconds, raise exception: %s', req_timeout, e) self._close_client(client_holder) raise finally: if tm: tm.cancel() self.push(client_holder) def _get_client(self, block=True, timeout=1): if self.is_empty(): logger.info('ClientPool: %s is empty.', self._pool_name) client_holder = self._queue.get(block=block, timeout=timeout) if client_holder.get_client() is None: tm = None try: tm = gevent.Timeout.start_new(timeout) client_holder.set_client(self._create_client()) except BaseException as e: client_holder.set_client(None) self.push(client_holder) raise finally: if tm: tm.cancel() client_holder.set_access_time(time.time()) return client_holder def push(self, client_holder): if not self.is_full(): self._queue.put_nowait(client_holder) def is_full(self): return self._queue.qsize() >= self._pool_size def is_empty(self): return self._queue.qsize() <= 0 def _create_client(self): return self._client_class(*self._client_args, **self._client_kwargs) def _close_client(self, client_holder): if self._close_client_handler and client_holder.get_client(): try: self._close_client_handler(client_holder.get_client()) except Exception as e: logger.error('Close client raise exception: %s', e) client_holder.set_client(None) def _close_expire_client(self): cur_time = time.time() need_closed_clients = [] for client_holder in self._queue.queue: if client_holder.get_client( ) and cur_time - client_holder.get_access_time( ) > self._client_expire_time: need_closed_clients.append(client_holder.get_client) for client in need_closed_clients: self._close_client_handler(client)
class BlockingSentinelMasterGeventConnectionPool(object): """Blocking, Sentinel enabled Redis connection pool. We use this instead of the built-in connection pool in redis-py, because the built-in one for sentinel does not support a blocking implementation, which Taba relies on. (There is a blocking connection pool in redis-py, but it is incompatible with sentinel connections). """ # Timeout, in seconds, when trying to retrieve a connection from the # redis connection pool. This is set to infinite (i.e. a worker will wait # indefinitely for a connection to become available). Any actual remote # failure should be caught and surfaced by the socket timeout. GET_CONNECTION_TIMEOUT = None def __init__(self, service_name, sentinel_manager, pool_size=8, tab_prefix='redis_bsmg_pool', connection_class=SentinelManagedConnection, connection_kwargs={}, sentinel_check_connections=False, is_master=True): """ Args: service_name - Name of the Sentinel service name to connect to. sentinel_manager - Sentinel manager object. pool_size - Number of connections to maintain in the pool. tab_prefix - Tab name prefix for Tabs recorded by this class. connection_class - Class to use for creating connections. Must be a sub-class of (or be API compatible with) SentinelManagedConnection. connection_kwargs - Keyword arguments to pass through to connection constructor. sentinel_check_connections - Whether to enable Sentinel connection checking on establishing each connection. is_master - Always True. Included to match SentinelConnectionPool API. """ self.service_name = service_name self.sentinel_manager = sentinel_manager self.pool_size = pool_size self.tab_prefix = tab_prefix self.conn_class = connection_class self.conn_kwargs = connection_kwargs # Sentinel connection pool API member variables. self.is_master = is_master self.check_connection = sentinel_check_connections self.master_address = None # Actual pool containers. self.closed = False self.all = set() self.pool = LifoQueue(maxsize=self.pool_size) # Initialize the pool. for _ in xrange(self.pool_size): conn = self.conn_class( connection_pool=weakref.proxy(self), **connection_kwargs) self.all.add(conn) self.pool.put(conn) def __repr__(self): return "%s<%s|%s>" % ( type(self).__name__, self.connection_class.__name__, self.connection_kwargs) ######################################################### # Connection Pool API Methods ######################################################### def get_connection(self, command_name, *keys, **options): """Get a connection from the pool. Args: Ignored. Included to match ConnectionPool API. """ if self.closed: raise Empty() try: return self.pool.get(timeout=self.GET_CONNECTION_TIMEOUT) except Empty as e: client.Counter(self.tab_prefix + '_redis_conn_pool_get_conn_timeout') LOG.error('Cannot get connection for %s:%d' % (self.host, self.port)) raise e def release(self, connection): """Releases the connection back to the pool Args: connection - Connection to put back in the pool. Must have been initially taken from this pool. """ if connection not in self.all: raise ValueError() self.pool.put(connection) def disconnect(self): """Disconnects all connections in the pool.""" for conn in self.all: conn.disconnect() def shutdown(self): """Close the pool and disconnect all connections. """ self.closed = True try: # Wait for all the connections to finish and get returned to the pool. def _wait_ready(): while not self.pool.full(): time.sleep(0.5) thread_util.PerformOperationWithTimeout(30, _wait_ready) except Exception as e: LOG.error(e) finally: # Disconnect anyway. self.disconnect() ######################################################### # Sentinel Pool API Methods ######################################################### def get_master_address(self): """SentinelConnectionPool API compatibility. Get the connection information to the service master. Returns: Tuple of (Master Hostname, Master Port) """ master_address = self.sentinel_manager.discover_master(self.service_name) if self.master_address is None: self.master_address = master_address elif master_address != self.master_address: # Master address changed. Reset all connections. self.disconnect() return master_address def rotate_slaves(self): """SentinelConnectionPool API compatibility. Not implemented. """ pass
class AbstractDatabaseConnectionPool(object): def __init__(self, maxsize=100, maxwait=1.0, expires=None, cleanup=None): """ The pool manages opened connections to the database. The main strategy is to keep the smallest number of alive connections which are required for best web service performance. In most cases connections are taken from the pool. In case of views-peeks, pool creates some extra resources preventing service gone unavailable. In time of low traffic (night) unnecessary connections are released. Parameters ---------- maxsize : int Soft limit of the number of created connections. After reaching this limit taking the next connection first waits `maxwait` time for any returned slot. maxwait : float The time in seconds which is to be wait before creating new connection after the pool gets empty. It may be 0 then immediate connections are created til `maxoverflow` is reached. expires : float The time in seconds indicates how long connection should stay alive. It is also used to close unneeded slots. """ if not isinstance(maxsize, integer_types): raise TypeError('Expected integer, got %r' % (maxsize, )) self._maxsize = maxsize self._maxwait = maxwait self._expires = expires self._cleanup = cleanup self._created_at = {} self._latest_use = {} self._pool = LifoQueue() self._size = 0 self._latest_cleanup = 0 if self._expires or self._cleanup else 0xffffffffffffffff self._interval_cleanup = min( self._expires or self._cleanup, self._cleanup or self._expires) if self._expires or self._cleanup else 0 self._cleanup_lock = Semaphore(value=1) def create_connection(self): raise NotImplementedError() def close_connection(self, item): try: self._size -= 1 self._created_at.pop(id(item), None) self._latest_use.pop(id(item), None) item.close() except Exception: pass def cleanup(self): self._cleanup_queue(time.time()) def _cleanup_queue(self, now): if self._latest_cleanup > now: return with self._cleanup_lock: if self._latest_cleanup > now: return self._latest_cleanup = now + self._interval_cleanup cleanup = now - self._cleanup if self._cleanup else None expires = now - self._expires if self._expires else None # Instead of creating new LIFO for self._pool, the ole one is reused, # beacuse some othere might wait for connetion on it. fresh_slots = [] try: # try to fill self._pool ASAP, preventing creation of new connections. # because after this loop LIFO will be in reversed order while not self._pool.empty(): item = self._pool.get_nowait() if cleanup and self._latest_use.get(id(item), 0) < cleanup: self.close_connection(item) elif expires and self._created_at.get(id(item), 0) < expires: self.close_connection(item) else: fresh_slots.append(item) except Empty: pass # Reverse order back (frestest connections should be at the begining) for conn in reversed(fresh_slots): self._pool.put_nowait(conn) def get(self): try: return self._pool.get_nowait() except Empty: pass if self._size >= self._maxsize: try: return self._pool.get(timeout=self._maxwait) except Empty: pass # It is posiible that after waiting self._maxwait time, non connection has been returned # because of cleaning up old ones on put(), so there is not connection but also LIFO is not full. # In that case new connection shouls be created, otherwise exception is risen. if self._size >= self._maxsize: raise OperationalError( "Too many connections created: {} (maxsize is {})".format( self._size, self._maxsize)) try: self._size += 1 conn = self.create_connection() except: self._size -= 1 raise now = time.time() self._created_at[id(conn)] = now self._latest_use[id(conn)] = now return conn def put(self, conn): now = time.time() self._pool.put(conn) self._latest_use[id(conn)] = now self._cleanup_queue(now) def closeall(self): while not self._pool.empty(): conn = self._pool.get_nowait() try: conn.close() except Exception: pass self._size = 0 @contextlib.contextmanager def connection(self, isolation_level=None): conn = self.get() try: if isolation_level is not None: if conn.isolation_level == isolation_level: isolation_level = None else: conn.set_isolation_level(isolation_level) yield conn except: if conn.closed: conn = None self.closeall() else: conn = self._rollback(conn) raise else: if conn.closed: raise OperationalError( "Cannot commit because connection was closed: %r" % (conn, )) conn.commit() finally: if conn is not None and not conn.closed: if isolation_level is not None: conn.set_isolation_level(isolation_level) self.put(conn) @contextlib.contextmanager def cursor(self, *args, **kwargs): isolation_level = kwargs.pop('isolation_level', None) with self.connection(isolation_level) as conn: yield conn.cursor(*args, **kwargs) def _rollback(self, conn): try: conn.rollback() except: gevent.get_hub().handle_error(conn, *sys.exc_info()) return return conn def execute(self, *args, **kwargs): with self.cursor(**kwargs) as cursor: cursor.execute(*args) return cursor.rowcount def fetchone(self, *args, **kwargs): with self.cursor(**kwargs) as cursor: cursor.execute(*args) return cursor.fetchone() def fetchall(self, *args, **kwargs): with self.cursor(**kwargs) as cursor: cursor.execute(*args) return cursor.fetchall() def fetchiter(self, *args, **kwargs): with self.cursor(**kwargs) as cursor: cursor.execute(*args) while True: items = cursor.fetchmany() if not items: break for item in items: yield item