class ClientPool(object): DEFAULT_CLIENT_EXPIRE_TIME = 300 DEFAULT_CLOSE_EXPIRE_CLIENT_INTERVAL = 60 def __init__(self, pool_name, pool_size, client_class, close_client_handler, *client_args, **client_kwargs): assert pool_size > 0 assert client_class is not None and hasattr(client_class, '__call__') assert close_client_handler is None or hasattr(close_client_handler, '__call__') self._pool_name = pool_name self._pool_size = pool_size self._client_class = client_class self._close_client_handler = close_client_handler self._client_args = client_args self._client_kwargs = client_kwargs self._queue = LifoQueue(maxsize=pool_size) for i in range(pool_size): self._queue.put(ClientHolder()) self._client_expire_time = self.DEFAULT_CLIENT_EXPIRE_TIME self._gc_task = ScheduleTask( name='ClientPool-GC-%s' % pool_name, start_after_seconds=0, interval_seconds=self.DEFAULT_CLOSE_EXPIRE_CLIENT_INTERVAL, handler=self._close_expire_client) self._gc_task.run() def __del__(self): self._gc_task.stop() @contextmanager def get_client(self, block=True, pool_acquire_client_timeout=1, req_timeout=5): client_holder = self._get_client(block, pool_acquire_client_timeout) tm = None try: tm = gevent.Timeout.start_new(req_timeout) yield client_holder.get_client() except BaseException as e: logger.error( 'Client is out pool for too long %s seconds, raise exception: %s', req_timeout, e) self._close_client(client_holder) raise finally: if tm: tm.cancel() self.push(client_holder) def _get_client(self, block=True, timeout=1): if self.is_empty(): logger.info('ClientPool: %s is empty.', self._pool_name) client_holder = self._queue.get(block=block, timeout=timeout) if client_holder.get_client() is None: tm = None try: tm = gevent.Timeout.start_new(timeout) client_holder.set_client(self._create_client()) except BaseException as e: client_holder.set_client(None) self.push(client_holder) raise finally: if tm: tm.cancel() client_holder.set_access_time(time.time()) return client_holder def push(self, client_holder): if not self.is_full(): self._queue.put_nowait(client_holder) def is_full(self): return self._queue.qsize() >= self._pool_size def is_empty(self): return self._queue.qsize() <= 0 def _create_client(self): return self._client_class(*self._client_args, **self._client_kwargs) def _close_client(self, client_holder): if self._close_client_handler and client_holder.get_client(): try: self._close_client_handler(client_holder.get_client()) except Exception as e: logger.error('Close client raise exception: %s', e) client_holder.set_client(None) def _close_expire_client(self): cur_time = time.time() need_closed_clients = [] for client_holder in self._queue.queue: if client_holder.get_client( ) and cur_time - client_holder.get_access_time( ) > self._client_expire_time: need_closed_clients.append(client_holder.get_client) for client in need_closed_clients: self._close_client_handler(client)
class AbstractDatabaseConnectionPool(object): def __init__(self, maxsize=100, maxwait=1.0, expires=None, cleanup=None): """ The pool manages opened connections to the database. The main strategy is to keep the smallest number of alive connections which are required for best web service performance. In most cases connections are taken from the pool. In case of views-peeks, pool creates some extra resources preventing service gone unavailable. In time of low traffic (night) unnecessary connections are released. Parameters ---------- maxsize : int Soft limit of the number of created connections. After reaching this limit taking the next connection first waits `maxwait` time for any returned slot. maxwait : float The time in seconds which is to be wait before creating new connection after the pool gets empty. It may be 0 then immediate connections are created til `maxoverflow` is reached. expires : float The time in seconds indicates how long connection should stay alive. It is also used to close unneeded slots. """ if not isinstance(maxsize, integer_types): raise TypeError('Expected integer, got %r' % (maxsize, )) self._maxsize = maxsize self._maxwait = maxwait self._expires = expires self._cleanup = cleanup self._created_at = {} self._latest_use = {} self._pool = LifoQueue() self._size = 0 self._latest_cleanup = 0 if self._expires or self._cleanup else 0xffffffffffffffff self._interval_cleanup = min( self._expires or self._cleanup, self._cleanup or self._expires) if self._expires or self._cleanup else 0 self._cleanup_lock = Semaphore(value=1) def create_connection(self): raise NotImplementedError() def close_connection(self, item): try: self._size -= 1 self._created_at.pop(id(item), None) self._latest_use.pop(id(item), None) item.close() except Exception: pass def cleanup(self): self._cleanup_queue(time.time()) def _cleanup_queue(self, now): if self._latest_cleanup > now: return with self._cleanup_lock: if self._latest_cleanup > now: return self._latest_cleanup = now + self._interval_cleanup cleanup = now - self._cleanup if self._cleanup else None expires = now - self._expires if self._expires else None # Instead of creating new LIFO for self._pool, the ole one is reused, # beacuse some othere might wait for connetion on it. fresh_slots = [] try: # try to fill self._pool ASAP, preventing creation of new connections. # because after this loop LIFO will be in reversed order while not self._pool.empty(): item = self._pool.get_nowait() if cleanup and self._latest_use.get(id(item), 0) < cleanup: self.close_connection(item) elif expires and self._created_at.get(id(item), 0) < expires: self.close_connection(item) else: fresh_slots.append(item) except Empty: pass # Reverse order back (frestest connections should be at the begining) for conn in reversed(fresh_slots): self._pool.put_nowait(conn) def get(self): try: return self._pool.get_nowait() except Empty: pass if self._size >= self._maxsize: try: return self._pool.get(timeout=self._maxwait) except Empty: pass # It is posiible that after waiting self._maxwait time, non connection has been returned # because of cleaning up old ones on put(), so there is not connection but also LIFO is not full. # In that case new connection shouls be created, otherwise exception is risen. if self._size >= self._maxsize: raise OperationalError( "Too many connections created: {} (maxsize is {})".format( self._size, self._maxsize)) try: self._size += 1 conn = self.create_connection() except: self._size -= 1 raise now = time.time() self._created_at[id(conn)] = now self._latest_use[id(conn)] = now return conn def put(self, conn): now = time.time() self._pool.put(conn) self._latest_use[id(conn)] = now self._cleanup_queue(now) def closeall(self): while not self._pool.empty(): conn = self._pool.get_nowait() try: conn.close() except Exception: pass self._size = 0 @contextlib.contextmanager def connection(self, isolation_level=None): conn = self.get() try: if isolation_level is not None: if conn.isolation_level == isolation_level: isolation_level = None else: conn.set_isolation_level(isolation_level) yield conn except: if conn.closed: conn = None self.closeall() else: conn = self._rollback(conn) raise else: if conn.closed: raise OperationalError( "Cannot commit because connection was closed: %r" % (conn, )) conn.commit() finally: if conn is not None and not conn.closed: if isolation_level is not None: conn.set_isolation_level(isolation_level) self.put(conn) @contextlib.contextmanager def cursor(self, *args, **kwargs): isolation_level = kwargs.pop('isolation_level', None) with self.connection(isolation_level) as conn: yield conn.cursor(*args, **kwargs) def _rollback(self, conn): try: conn.rollback() except: gevent.get_hub().handle_error(conn, *sys.exc_info()) return return conn def execute(self, *args, **kwargs): with self.cursor(**kwargs) as cursor: cursor.execute(*args) return cursor.rowcount def fetchone(self, *args, **kwargs): with self.cursor(**kwargs) as cursor: cursor.execute(*args) return cursor.fetchone() def fetchall(self, *args, **kwargs): with self.cursor(**kwargs) as cursor: cursor.execute(*args) return cursor.fetchall() def fetchiter(self, *args, **kwargs): with self.cursor(**kwargs) as cursor: cursor.execute(*args) while True: items = cursor.fetchmany() if not items: break for item in items: yield item