def test_notify_waiters(self): dbapi = MockDBAPI() canary = [] def creator1(): canary.append(1) return dbapi.connect() def creator2(): canary.append(2) return dbapi.connect() p1 = pool.QueuePool(creator=creator1, pool_size=1, timeout=None, max_overflow=0) p2 = pool.QueuePool(creator=creator2, pool_size=1, timeout=None, max_overflow=-1) def waiter(p): conn = p.connect() time.sleep(.5) conn.close() c1 = p1.connect() for i in range(5): t = threading.Thread(target=waiter, args=(p1, )) t.setDaemon(True) t.start() time.sleep(.5) eq_(canary, [1]) p1._pool.abort(p2) time.sleep(1) eq_(canary, [1, 2, 2, 2, 2, 2])
def test_waiters_handled(self): """test that threads waiting for connections are handled when the pool is replaced. """ dbapi = MockDBAPI() def creator(): return dbapi.connect() success = [] for timeout in (None, 30): for max_overflow in (0, -1, 3): p = pool.QueuePool(creator=creator, pool_size=2, timeout=timeout, max_overflow=max_overflow) def waiter(p): conn = p.connect() time.sleep(.5) success.append(True) conn.close() time.sleep(.2) c1 = p.connect() c2 = p.connect() for i in range(2): t = threading.Thread(target=waiter, args=(p, )) t.setDaemon(True) # so the tests dont hang if this fails t.start() c1.invalidate() c2.invalidate() p2 = p._replace() time.sleep(2) eq_(len(success), 12)
def test_timeout_race(self): # test a race condition where the initial connecting threads all race # to queue.Empty, then block on the mutex. each thread consumes a # connection as they go in. when the limit is reached, the remaining # threads go in, and get TimeoutError; even though they never got to # wait for the timeout on queue.get(). the fix involves checking the # timeout again within the mutex, and if so, unlocking and throwing # them back to the start of do_get() dbapi = MockDBAPI() p = pool.QueuePool(creator=lambda: dbapi.connect(delay=.05), pool_size=2, max_overflow=1, use_threadlocal=False, timeout=3) timeouts = [] def checkout(): for x in xrange(1): now = time.time() try: c1 = p.connect() except tsa.exc.TimeoutError, e: timeouts.append(time.time() - now) continue time.sleep(4) c1.close()
def test_threadfairy(self): p = pool.QueuePool(creator=mock_dbapi.connect, pool_size=3, max_overflow=-1, use_threadlocal=True) c1 = p.connect() c1.close() c2 = p.connect() assert c2.connection is not None
def __init__(self, cfg, app=None): self.cfg = cfg self._db_hosts = self.cfg['db']['hosts'] self._db_port = self.cfg['db']['port'] self._db_database = self.cfg['db']['database'] self._db_user = self.cfg['db']['username'] self._db_pass = self.cfg['db']['password'] if not isinstance(self._db_hosts, list): self._db_hosts = [self._db_hosts] else: if ',' in self._db_hosts: self._db_hosts = self._db_hosts.split(',') self.pool = pool.QueuePool(self._getconn, max_overflow=1, pool_size=2, echo=self.cfg['general']['debug']) self.engine = create_engine('postgresql+psycopg2://', pool=self.pool, echo=self.cfg['general']['debug']) self.plugin = sqlalchemy.Plugin(self.engine, Base.metadata, keyword='db', create=True, commit=False, use_kwargs=False) if app: Base.metadata.create_all(self.engine) app.install(self.plugin)
def get_postgres_pool(conn, conf): rePool = pool.QueuePool(conn, max_overflow=conf.max_overflow, pool_size=conf.pool_size) if rePool: print("Connection pool created successfully!!!") return rePool
def __init__(self, host: str = "localhost", port: Union[int, str] = 5432, db_name: str = "postgres", user: str = "postgres", password: str = ""): """ Database connection object Connection object is to be used as a contextmanager: Examples -------- >>> con = Connection() >>> with con as c: :param host: Hostname :param port: Port :param db_name: Database Name :param user: Username :param password: Password """ self._password = password self._host = host self._port = port self._user = user self._db_name = db_name self.__connection_pool = pool.QueuePool(self._get_conn, pool_size=5)
def test_detach(self): dbapi = MockDBAPI() p = pool.QueuePool(creator=lambda: dbapi.connect('foo.db'), pool_size=1, max_overflow=0, use_threadlocal=False) c1 = p.connect() c1.detach() c_id = c1.connection.id c2 = p.connect() assert c2.connection.id != c1.connection.id dbapi.raise_error = True c2.invalidate() c2 = None c2 = p.connect() assert c2.connection.id != c1.connection.id con = c1.connection assert not con.closed c1.close() assert con.closed
def test_properties(self): dbapi = MockDBAPI() p = pool.QueuePool(creator=lambda: dbapi.connect('foo.db'), pool_size=1, max_overflow=0, use_threadlocal=False) c = p.connect() self.assert_(not c.info) self.assert_(c.info is c._connection_record.info) c.info['foo'] = 'bar' c.close() del c c = p.connect() self.assert_('foo' in c.info) c.invalidate() c = p.connect() self.assert_('foo' not in c.info) c.info['foo2'] = 'bar2' c.detach() self.assert_('foo2' in c.info) c2 = p.connect() self.assert_(c.connection is not c2.connection) self.assert_(not c2.info) self.assert_('foo2' in c.info)
def _test_overflow(self, thread_count, max_overflow): def creator(): time.sleep(.05) return mock_dbapi.connect() p = pool.QueuePool(creator=creator, pool_size=3, timeout=2, max_overflow=max_overflow) peaks = [] def whammy(): for i in range(10): try: con = p.connect() time.sleep(.005) peaks.append(p.overflow()) con.close() del con except tsa.exc.TimeoutError: pass threads = [] for i in xrange(thread_count): th = threading.Thread(target=whammy) th.start() threads.append(th) for th in threads: th.join() self.assert_(max(peaks) <= max_overflow)
def test_recreate(self): dbapi = MockDBAPI() p = pool.QueuePool(creator = lambda: dbapi.connect('foo.db'), pool_size = 1, max_overflow = 0, use_threadlocal = False) p2 = p.recreate() assert p2.size() == 1 assert p2._use_threadlocal is False assert p2._max_overflow == 0
def test_dispose_closes_pooled(self): dbapi = MockDBAPI() p = pool.QueuePool(creator=dbapi.connect, pool_size=2, timeout=None, max_overflow=0) c1 = p.connect() c2 = p.connect() c1_con = c1.connection c2_con = c2.connection c1.close() eq_(c1_con.close.call_count, 0) eq_(c2_con.close.call_count, 0) p.dispose() eq_(c1_con.close.call_count, 1) eq_(c2_con.close.call_count, 0) # currently, if a ConnectionFairy is closed # after the pool has been disposed, there's no # flag that states it should be invalidated # immediately - it just gets returned to the # pool normally... c2.close() eq_(c1_con.close.call_count, 1) eq_(c2_con.close.call_count, 0) # ...and that's the one we'll get back next. c3 = p.connect() assert c3.connection is c2_con
def test_listen_targets_per_subclass(self): """test that listen() called on a subclass remains specific to that subclass.""" canary = [] def listen_one(*args): canary.append("listen_one") def listen_two(*args): canary.append("listen_two") def listen_three(*args): canary.append("listen_three") event.listen(pool.Pool, 'connect', listen_one) event.listen(pool.QueuePool, 'connect', listen_two) event.listen(pool.SingletonThreadPool, 'connect', listen_three) p1 = pool.QueuePool(creator=MockDBAPI().connect) p2 = pool.SingletonThreadPool(creator=MockDBAPI().connect) assert listen_one in p1.dispatch.connect assert listen_two in p1.dispatch.connect assert listen_three not in p1.dispatch.connect assert listen_one in p2.dispatch.connect assert listen_two not in p2.dispatch.connect assert listen_three in p2.dispatch.connect p1.connect() eq_(canary, ["listen_one", "listen_two"]) p2.connect() eq_(canary, ["listen_one", "listen_two", "listen_one", "listen_three"])
def test_dispose_closes_pooled(self): dbapi = MockDBAPI() def creator(): return dbapi.connect() p = pool.QueuePool(creator=creator, pool_size=2, timeout=None, max_overflow=0) c1 = p.connect() c2 = p.connect() conns = [c1.connection, c2.connection] c1.close() eq_([c.closed for c in conns], [False, False]) p.dispose() eq_([c.closed for c in conns], [True, False]) # currently, if a ConnectionFairy is closed # after the pool has been disposed, there's no # flag that states it should be invalidated # immediately - it just gets returned to the # pool normally... c2.close() eq_([c.closed for c in conns], [True, False]) # ...and that's the one we'll get back next. c3 = p.connect() assert c3.connection is conns[1]
def __init__(self, logger_=appscale_logger.getLogger("datastore-voldemort")): # self.__host = 'localhost' # self.__port = 9090 DHashDatastore.__init__(self, logger_) self.__store = STORE #self.logger.debug("AppVoldemort is created") self.pool = pool.QueuePool(self.__create_connection)
def __init__(self, db_connection_str): if db_connection_str is None: raise ReferenceError('database connection string is NULL') self.connection_str = db_connection_str self.connection_pool = pool.QueuePool(self.create_connection, pool_size=1) self.engine = create_engine('mssql+pyodbc://', pool=self.connection_pool)
def getPool(dbtype, params, poolParams): """Get connection pool. Defines getConnection method based on db type Input: (string) database type, (dict) connect() parameters, (dict) pool constructor parameters. Output: (class) pool instance """ if dbtype == T_POSTGRESQL: funct = getPostgresConnection(**params) return pool.QueuePool(creator=funct, **poolParams) elif dbtype == T_MYSQL: funct = getMysqlConnection(**params) return pool.QueuePool(creator=funct, **poolParams) elif dbtype == T_ORACLE: funct = getOracleConnection(**params) return pool.QueuePool(creator=funct, **poolParams) elif dbtype == T_SQLITE: funct = getSqliteConnection(**params) return pool.SingletonThreadPool(creator=funct, **poolParams)
def query_geographic_database(sql_query): mypool = pool.QueuePool(getConnection_db_gis, max_overflow=100, pool_size=5) # get a connectioncommands_in_array conn = mypool.connect() # use it cursor = query(sql_query, conn) return cursor
def connect(self): global con_pool if not con_pool: con_pool = pool.QueuePool(lambda *_: self.get_connection(), max_overflow=10, pool_size=50) self._conn = con_pool.connect() self._cursor = self._conn.cursor()
def __init__(self, hosts, users, passwords, dbs): self.mypool = pool.QueuePool( lambda: pymysql.connect(host=hosts, user=users, password=passwords, db=dbs, cursorclass=pymysql.cursors.DictCursor), pool_size=5, max_overflow=0)
def __init__(self): try: DBCode.dbConnectionPool = pool.QueuePool(getconn, max_overflow=20, pool_size=15, echo=True) except mysql.connector.Error as err: log.error( 'MySql Error: Exception occured due to Database doesnt exists or MySql Credentials error %s', str(err))
def test_mixed_close(self): p = pool.QueuePool(creator = mock_dbapi.connect, pool_size = 3, max_overflow = -1, use_threadlocal = True) c1 = p.connect() c2 = p.connect() assert c1 is c2 c1.close() c2 = None assert p.checkedout() == 1 c1 = None assert p.checkedout() == 0
def test_connection_fairy_connection(self): dbapi = MockDBAPI() p1 = pool.QueuePool(creator=lambda: dbapi.connect("foo.db")) fairy = p1.connect() with expect_deprecated( "The _ConnectionFairy.connection attribute is deprecated; " "please use 'driver_connection'"): is_(fairy.connection, fairy.dbapi_connection)
def __init__(self): """Connects to the Postgres database.""" self.engine = None self.session = None self.dsn = config("findex:database:connection") self.pool = pool.QueuePool(creator=self._getconn, max_overflow=1, pool_size=300, echo=False) # config("findex:findex:debug")
def test_hanging_connect_within_overflow(self): """test that a single connect() call which is hanging does not block other connections from proceeding.""" dbapi = Mock() mutex = threading.Lock() def hanging_dbapi(): time.sleep(2) with mutex: return dbapi.connect() def fast_dbapi(): with mutex: return dbapi.connect() creator = threading.local() def create(): return creator.mock_connector() def run_test(name, pool, should_hang): if should_hang: creator.mock_connector = hanging_dbapi else: creator.mock_connector = fast_dbapi conn = pool.connect() conn.operation(name) time.sleep(1) conn.close() p = pool.QueuePool(creator=create, pool_size=2, max_overflow=3) threads = [ threading.Thread(target=run_test, args=("success_one", p, False)), threading.Thread(target=run_test, args=("success_two", p, False)), threading.Thread(target=run_test, args=("overflow_one", p, True)), threading.Thread(target=run_test, args=("overflow_two", p, False)), threading.Thread(target=run_test, args=("overflow_three", p, False)) ] for t in threads: t.start() time.sleep(.2) for t in threads: t.join(timeout=join_timeout) eq_(dbapi.connect().operation.mock_calls, [ call("success_one"), call("success_two"), call("overflow_two"), call("overflow_three"), call("overflow_one") ])
def _pool_fixture(self, pre_ping): dialect = url.make_url( 'postgresql://*****:*****@localhost/test').get_dialect()() dialect.dbapi = self.dbapi _pool = pool.QueuePool(creator=lambda: self.dbapi.connect('foo.db'), pre_ping=pre_ping, dialect=dialect) dialect.is_disconnect = \ lambda e, conn, cursor: isinstance(e, MockDisconnect) return _pool
def test_timeout(self): p = pool.QueuePool(creator = mock_dbapi.connect, pool_size = 3, max_overflow = 0, use_threadlocal = False, timeout=2) c1 = p.connect() c2 = p.connect() c3 = p.connect() now = time.time() try: c4 = p.connect() assert False except tsa.exc.TimeoutError, e: assert int(time.time() - now) == 2
def get_pool(db_args, pool_size=10, max_overflow=25): poolkey = ''.join(['%s%s' % item for item in db_args.items()]) if poolkey not in _POOLS: conn_func = _get_connector_func(db_args) _POOLS[poolkey] = pool.QueuePool(conn_func, pool_size=pool_size, max_overflow=max_overflow) # add checkout event listener event.listen(_POOLS[poolkey], "checkout", checkout) return _POOLS[poolkey]
def _pool_fixture(self, pre_ping, pool_kw=None): dialect = url.make_url( "postgresql://*****:*****@localhost/test").get_dialect()() dialect.dbapi = self.dbapi _pool = pool.QueuePool(creator=lambda: self.dbapi.connect("foo.db"), pre_ping=pre_ping, dialect=dialect, **(pool_kw if pool_kw else {})) dialect.is_disconnect = lambda e, conn, cursor: isinstance( e, MockDisconnect) return _pool
def test_recycle(self): p = pool.QueuePool(creator = mock_dbapi.connect, pool_size = 1, max_overflow = 0, use_threadlocal = False, recycle=3) c1 = p.connect() c_id = id(c1.connection) c1.close() c2 = p.connect() assert id(c2.connection) == c_id c2.close() time.sleep(4) c3= p.connect() assert id(c3.connection) != c_id