def create_pool(self, no_loop=False, **kwargs): loop = None if no_loop else self.loop pool = yield from aiopg.create_pool( database="aiopg", user="******", password="******", host="127.0.0.1", loop=loop, **kwargs ) self.pool = pool return pool
def go(*, no_loop=False, **kwargs): nonlocal pool params = pg_params.copy() params.update(kwargs) useloop = None if no_loop else loop pool = yield from aiopg.create_pool(loop=useloop, **params) return pool
def init(loop): app = web.Application(loop=loop) # ------------------------------------------------------------------------- app.router.add_route( 'GET', r'/{thread_id:\d+}/{user_id:\d+}/', PrivateMessageHandler() ) # ------------------------------------------------------------------------- app['pg_pool'] = yield from aiopg.create_pool(dsn) app['redis_pool'] = yield from aioredis.create_pool(redis_address) # ------------------------------------------------------------------------- srv = yield from loop.create_server( app.make_handler(), SERVER['HOST'], SERVER['PORT'] ) print( "Server started at http://{0}:{1}".format( SERVER['HOST'], SERVER['PORT'] ) ) return srv
def test_select(): pool = yield from aiopg.create_pool(dsn) with (yield from pool.cursor()) as cur: yield from cur.execute("SELECT 1") ret = yield from cur.fetchone() assert ret == (1,) print("ALL DONE")
def start(self): yield from super().start() LOG.info('Starting engines...') print('Starting engines...') self.engines['pg'] = self.loop.create_task(aiopg.create_pool(host=os.environ.get('DBHOST', self.config['engines']['pg']['host']), port=int(self.config['engines']['pg']['port']), sslmode='disable', dbname=self.config['engines']['pg']['dbname'], user=self.config['engines']['pg']['user'], password=self.config['engines']['pg']['password'], cursor_factory=psycopg2.extras.RealDictCursor, minsize=int(self.config['engines']['pg']['minsize']), maxsize=int(self.config['engines']['pg']['maxsize']), loop=self.loop)) self.engines['mysql'] = self.loop.create_task(aiomysql.create_pool( host=self.config['engines']['mysql']['host'], port=self.config['engines']['mysql']['port'], user=self.config['engines']['mysql']['user'], password=self.config['engines']['mysql']['pwd'], db=self.config['engines']['mysql']['db'], minsize=int(self.config['engines']['mysql']['minsize']), maxsize=int(self.config['engines']['mysql']['maxsize']), cursorclass=aiomysql.DictCursor, charset='utf8', use_unicode=True, loop=self.loop)) yield from asyncio.wait([self.engines['pg']], return_when=asyncio.ALL_COMPLETED) LOG.info('All engines ready !')
async def test_select(): async with aiopg.create_pool(connection) as pool: async with pool.acquire() as conn: async with conn.cursor() as cur: await cur.execute("SELECT question, pub_date FROM sanic_polls") async for row in cur: result.append({"question": row[0], "pub_date": row[1]})
def connect(self): """Create connection pool asynchronously. """ self._pool = yield from aiopg.create_pool(dsn=self.dsn, loop=self._loop, timeout=self.timeout, **self.connect_kwargs)
async def init_db(): pool_fut = lambda: aiopg.create_pool(os.getenv('DB_URI')) pool = await retry_fut(pool_fut, (psycopg2.OperationalError,)) async with pool.acquire() as conn: async with conn.cursor() as cur: await cur.execute(schema) return pool
def main(): pool = yield from aiopg.create_pool(dsn) with (yield from pool.cursor()) as cur: yield from transaction(cur, IsolationLevel.repeatable_read) yield from transaction(cur, IsolationLevel.read_committed) yield from transaction(cur, IsolationLevel.serializable) cur.execute('select * from tbl')
async def main(): async with aiopg.create_pool(dsn) as pool: async with pool.acquire() as conn1: listener = listen(conn1) async with pool.acquire() as conn2: notifier = notify(conn2) await asyncio.gather(listener, notifier) print("ALL DONE")
async def main(): async with aiopg.connect(dsn) as listenConn: async with aiopg.create_pool(dsn) as notifyPool: async with notifyPool.acquire() as notifyConn: listener = listen(listenConn) notifier = notify(notifyConn) await asyncio.gather(listener, notifier) print("ALL DONE")
async def build_disjoint_set(): async with aiopg.create_pool(PG_DSN) as pool: ds = AsyncDisjointSet() await ds.build( objects_ids=load_objects_ids(pool=pool), relations=load_relations(pool=pool) ) await write_components(ds.get_components(), pool=pool)
def connect(self): try: self.pool = yield from aiopg.create_pool( self.dsn, maxsize=3,\ on_connect = initConnection ) logging.debug('db connections pool created') except: logging.exception('Error creating connection pool') logging.error(self.dsn)
def connect(self): """Create connection pool asynchronously. """ self.pool = yield from aiopg.create_pool( loop=self.loop, timeout=self.timeout, database=self.database, **self.connect_kwargs)
async def _to_pg(sql: str, codes: list): """数据存储到pg""" dsn = "host=192.168.43.254 user=pguser password=tofuture dbname=mydata" async with aiopg.create_pool(dsn) as pool: async with pool.acquire() as conn: async with conn.cursor() as cur: for i in codes: await cur.execute(sql, i)
def get_pool(self): # pragma: no cover if not hasattr(self, '_pool'): self._pool = yield from aiopg.create_pool( **parse_pgurl(settings.db_url) ) return self._pool
def main(): pool = yield from aiopg.create_pool(dsn) with (yield from pool) as conn1: listener = listen(conn1) with (yield from pool) as conn2: notifier = notify(conn2) yield from asyncio.gather(listener, notifier) print("ALL DONE")
def connect(self): """Create connection pool asynchronously. """ self._pool = yield from aiopg.create_pool( dsn=self.dsn, loop=self._loop, timeout=self.timeout, **self.connect_kwargs)
def init(loop): # add aiohttp_debugtoolbar middleware to you application app = web.Application(loop=loop) extra_panels = [] if 'aiopg' in sys.modules: extra_panels.append(RequestPgDebugPanel) if 'aioredis' in sys.modules: extra_panels.append(RequestRedisDebugPanel) # install aiohttp_debugtoolbar aiohttp_debugtoolbar.setup(app, extra_panels=extra_panels, extra_templates=str(PATH_PARENT / 'extra_tpl')) template = """ <html> <head> <title>{{ title }}</title> </head> <body> <h1>{{ text }}</h1> <p> <a href="{{ app.router['exc_example'].url_for() }}"> Exception example</a> </p> </body> </html> """ # install jinja2 templates loader = jinja2.DictLoader({'index.html': template}) aiohttp_jinja2.setup(app, loader=loader) # init routes for index page, and page with error app.router.add_route('GET', '/', basic_handler, name='index') app.router.add_route('GET', '/exc', exception_handler, name='exc_example') if 'aiopg' in sys.modules: # create connection to the database dsn = 'host={host} dbname={db} user={user} password={passw} '.format( db='postgres', user='******', passw='1', host='localhost') app['db'] = yield from aiopg.create_pool(dsn, loop=loop, minsize=1, maxsize=2) # Correct PostgreSQL shutdown app.on_cleanup.append(close_pg) if 'aioredis' in sys.modules: # create redis pool app['redis'] = yield from create_pool(('127.0.0.1', '6379')) # Correct Redis shutdown app.on_cleanup.append(close_redis) handler = app.make_handler() srv = yield from loop.create_server(handler, '127.0.0.1', 9000) print("Server started at http://127.0.0.1:9000") return srv, handler
async def go(): async with aiopg.create_pool(dsn) as pool: async with pool.acquire() as conn: async with conn.cursor() as cur: await cur.execute("SELECT 1") ret = [] async for row in cur: ret.append(row) assert ret == [(1, )]
async def start(): async with aiopg.create_pool(**config.postgreSetttings) as portgres_db: redis_db = await aioredis.create_pool("redis://"+config.REDIS_HOST) await initdb(portgres_db) options.parse_command_line() app = Application(portgres_db, redis_db) app.listen(options.port) shutdown_event = tornado.locks.Event() await shutdown_event.wait()
async def db_events(self, data: dict, channel: str): async with aiopg.create_pool(dsn) as pool: async with pool.acquire() as conn: try: await asyncio.gather( self.listen(conn, channel), return_exceptions=False ) except: print("releasing connection")
async def go(): async with aiopg.create_pool(dsn) as pool: async with pool.acquire() as conn: async with conn.cursor() as cur: await cur.execute("SELECT * from posts") ret = [] async for row in cur: ret.append(row) print(row)
async def go(): async with aiopg.create_pool(dsn) as pool: async with pool.acquire() as conn: async with conn.cursor() as cur: await cur.execute(table_account_sql) ret = [] async for row in cur: ret.append(row) return ret
def connect(self): try: self.pool = yield from aiopg.create_pool(self.dsn, \ timeout=18000,\ on_connect=init_connection) logging.debug('db connections pool created') except Exception: logging.exception('Error creating connection pool') logging.error(self.dsn)
def init(store, db, api_url, loop=None): loop = asyncio.get_event_loop() db = loop.run_until_complete(aiopg.create_pool(loop=loop, **db)) store = pyced.store.init(store['url'], loop=loop) loop.run_until_complete(store['username']) loop.run_until_complete(store['username']) model = pyced.saga.model.Model(db) logger.info("Saga server initialized") return Server(loop, model, store, api_url)
async def select_one(dsn): async with aiopg.create_pool(dsn) as pool: async with pool.acquire() as conn: async with conn.cursor() as cur: await cur.execute('SELECT 1') result = [] async for row in cur: result.append(row) return result
def test___del__(loop, pg_params, warning): pool = yield from aiopg.create_pool(loop=loop, **pg_params) with warning(ResourceWarning): del pool if IS_PYPY: # PyPy's GC is not based on reference counting, and the objects # are not freed instantly when they are no longer reachable. # Therefore, we explicitly collect unreachable objects here. gc.collect()
def create_pool(self, no_loop=False, **kwargs): loop = None if no_loop else self.loop pool = yield from aiopg.create_pool(database='aiopg', user='******', password='******', host='127.0.0.1', loop=loop, **kwargs) return pool
def init(loop): # add aiohttp_debugtoolbar middleware to you application app = web.Application(loop=loop) extra_panels = [] if 'aiopg' in sys.modules: extra_panels.append(RequestPgDebugPanel) if 'aioredis' in sys.modules: extra_panels.append(RequestRedisDebugPanel) # install aiohttp_debugtoolbar aiohttp_debugtoolbar.setup( app, extra_panels=extra_panels, extra_templates=str(PATH_PARENT / 'extra_tpl')) template = """ <html> <head> <title>{{ title }}</title> </head> <body> <h1>{{ text }}</h1> <p> <a href="{{ app.router['exc_example'].url_for() }}"> Exception example</a> </p> </body> </html> """ # install jinja2 templates loader = jinja2.DictLoader({'index.html': template}) aiohttp_jinja2.setup(app, loader=loader) # init routes for index page, and page with error app.router.add_route('GET', '/', basic_handler, name='index') app.router.add_route('GET', '/exc', exception_handler, name='exc_example') if 'aiopg' in sys.modules: # create connection to the database dsn = 'host={host} dbname={db} user={user} password={passw} '.format( db='postgres', user='******', passw='1', host='localhost') app['db'] = yield from aiopg.create_pool( dsn, loop=loop, minsize=1, maxsize=2) # Correct PostgreSQL shutdown app.on_cleanup.append(close_pg) if 'aioredis' in sys.modules: # create redis pool app['redis'] = yield from create_pool(('127.0.0.1', '6379')) # Correct Redis shutdown app.on_cleanup.append(close_redis) handler = app.make_handler() srv = yield from loop.create_server(handler, '127.0.0.1', 9000) print("Server started at http://127.0.0.1:9000") return srv, handler
async def test_select(): async with aiopg.create_pool(dsn) as pool: async with pool.acquire() as conn: async with conn.cursor() as cur: await cur.execute("SELECT 1") ret = [] async for row in cur: ret.append(row) assert ret == [(1,)] print("ALL DONE")
def create_pool(): # pragma: no cover """Creates a connection pool to the database.""" pool = yield from aiopg.create_pool( minsize=settings.db_pool_min, maxsize=settings.db_pool_max, **parse_pgurl(settings.db_url) ) return pool
async def main(): async with aiopg.create_pool(dsn) as pool: async with pool.acquire() as conn: async with conn.cursor() as cur: await cur.execute('CREATE TABLE tbl (id int)') await transaction(cur, IsolationLevel.repeatable_read) await transaction(cur, IsolationLevel.read_committed) await transaction(cur, IsolationLevel.serializable) await cur.execute('select * from tbl')
def aiopg_pool(loop, postgresql_params): pool = loop.run_until_complete( aiopg.create_pool(host=postgresql_params['host'], port=postgresql_params['port'], database='postgres', user='******', loop=loop)) yield pool pool.terminate() loop.run_until_complete(pool.wait_closed())
def create_pool(self, no_loop=False, **kwargs): loop = None if no_loop else self.loop pool = yield from aiopg.create_pool(database='aiopg', user='******', password='******', host='127.0.0.1', loop=loop, **kwargs) self.pool = pool return pool
async def test_pubsub(): async with create_pool(get_db_url()) as e: async with e.acquire() as listen_conn: listener = listen_helper(listen_conn) db = Database() await db.startup() await asyncio.gather( listener, db.insert_post(parent_id=290, content='testing notify')) print("listen/notify done!")
def db_setup(): print("DB Setup") notorm.db = yield from aiopg.create_pool("dbname=notorm_example user=dbuser") # We have to use a regular psycopg connection to register the extensions # This can be done through Momoko, I just haven't spent enough time on it. conn = psycopg2.connect(dsn="dbname=notorm_example user=dbuser") psycopg2.extras.register_composite("game", conn, globally=True, factory=GameComposite) conn.close()
def get_pool(cls) -> Pool: """ Yields: existing db connection pool """ if len(cls._connection_params) < 5: raise ConnectionError('Please call SQLStore.connect before calling this method') if not cls._pool: cls._pool = yield from create_pool(**cls._connection_params) return cls._pool
async def _test(): async with aiopg.create_pool(PG_DSN) as pool: ctx_mgr_1 = PoolCtxManagerProxy(pool) ctx_mgr_2 = PoolCtxManagerProxy(pool) async with ctx_mgr_1: async with ctx_mgr_2: self.assertIs(ctx_mgr_1._pool, ctx_mgr_2._pool) self.assertFalse(ctx_mgr_2._pool.closed) self.assertFalse(ctx_mgr_1._pool.closed) self.assertTrue(pool.closed)
async def insert_to_db(new_entity: tuple): """ this function inserts new entity into the database :param updated_records: new entity from user """ async with aiopg.create_pool(db_params) as pool: async with pool.acquire() as conn: async with conn.cursor() as cur: insert_query = 'INSERT INTO services (ip, port, available) VALUES (%s,%s,%s)' await cur.execute(insert_query, new_entity)
async def ws_files_current_operation(request, ws, user): try: async with aiopg.create_pool(apfell.config['DB_POOL_CONNECT_STRING']) as pool: async with pool.acquire() as conn: async with conn.cursor() as cur: await cur.execute('LISTEN "newfilemeta";') # BEFORE WE START GETTING NEW THINGS, UPDATE WITH ALL OF THE OLD DATA operation = await db_objects.get(Operation, name=user['current_operation']) files = await db_objects.execute(FileMeta.select().where( (FileMeta.operation == operation) & (FileMeta.deleted == False)).order_by(FileMeta.id)) for f in files: if "/screenshots/" not in f.path: if "/{}/downloads/".format(user['current_operation']) not in f.path: # this means it's an upload, so supply additional information as well # two kinds of uploads: via task or manual if f.task is not None: # this is an upload via agent tasking await ws.send(js.dumps( {**f.to_json(), 'host': f.task.callback.host, "upload": f.task.params})) else: # this is a manual upload await ws.send(js.dumps({**f.to_json(), 'host': 'MANUAL FILE UPLOAD', "upload": "{\"remote_path\": \"Apfell\", \"file_id\": " + str(f.id) + "}", "task": "null"})) else: await ws.send(js.dumps({**f.to_json(), 'host': f.task.callback.host, 'params': f.task.params})) await ws.send("") # now pull off any new payloads we got queued up while processing old data while True: try: msg = conn.notifies.get_nowait() id = (msg.payload) f = await db_objects.get(FileMeta, id=id, operation=operation, deleted=False) if "/screenshots" not in f.path: try: if "/{}/downloads/".format(user['current_operation']) not in f.path: # this means it's an upload, so supply additional information as well # could be upload via task or manual if f.task is not None: # this is an upload via gent tasking await ws.send(js.dumps( {**f.to_json(), 'host': f.task.callback.host, "upload": f.task.params})) else: # this is a manual upload await ws.send(js.dumps({**f.to_json(), 'host': 'MANUAL FILE UPLOAD', "upload": "{\"remote_path\": \"Apfell\", \"file_id\": " + str(f.id) + "}", "task": "null"})) else: await ws.send(js.dumps({**f.to_json(), 'host': f.task.callback.host, 'params': f.task.params})) except Exception as e: pass # we got a file that's just not part of our current operation, so move on except asyncio.QueueEmpty as e: await asyncio.sleep(1) await ws.send("") # this is our test to see if the client is still there continue except Exception as e: print(e) continue finally: pool.close()
async def test_select(): async with aiopg.create_pool(dsn) as pool: async with pool.acquire() as conn: async with conn.cursor() as cur: await cur.execute("SELECT * from management where no=%s", ('jwk', )) ret = [] async for row in cur: ret.append(row) print(row) print("ALL DONE")
async def test_create_pool_context_manager(pg_params): async with aiopg.create_pool(**pg_params) as pool: async with pool.acquire() as conn: async with conn.cursor() as cursor: await cursor.execute('SELECT 42;') resp = await cursor.fetchone() assert resp == (42, ) assert cursor.closed assert conn.closed assert pool.closed
async def ws_screenshots(request, ws, user): try: async with aiopg.create_pool( apfell.config['DB_POOL_CONNECT_STRING']) as pool: async with pool.acquire() as conn: async with conn.cursor() as cur: await cur.execute('LISTEN "newfilemeta";') # BEFORE WE START GETTING NEW THINGS, UPDATE WITH ALL OF THE OLD DATA operation = await db_objects.get( Operation, name=user['current_operation']) files = await db_objects.execute(FileMeta.select().where( FileMeta.operation == operation).order_by(FileMeta.id)) for f in files: if "{}/downloads/".format( user['current_operation'] ) in f.path and "/screenshots/" in f.path: await ws.send( js.dumps({ **f.to_json(), 'callback_id': f.task.callback.id, 'operator': f.task.operator.username })) await ws.send("") # now pull off any new payloads we got queued up while processing old data while True: try: msg = conn.notifies.get_nowait() blob = js.loads(msg.payload) if "{}/downloads/".format( user['current_operation'] ) in blob['path'] and "/screenshots" in blob[ 'path']: f = await db_objects.get(FileMeta, id=blob['id']) callback_id = f.task.callback.id await ws.send( js.dumps({ **f.to_json(), 'callback_id': callback_id, 'operator': f.task.operator.username })) except asyncio.QueueEmpty as e: await asyncio.sleep(2) await ws.send( "" ) # this is our test to see if the client is still there continue except Exception as e: print(e) return finally: pool.close()
async def test_create_pool_context_manager(pg_params, loop): async with aiopg.create_pool(loop=loop, **pg_params) as pool: async with pool.acquire() as conn: async with conn.cursor() as cursor: await cursor.execute('SELECT 42;') resp = await cursor.fetchone() assert resp == (42, ) assert cursor.closed assert conn.closed assert pool.closed
def get_pool(**kwargs): # pragma: no cover ''' Gets the postgresql connection pool. :return: pool ''' db_url = kwargs.pop('db_url') kwargs.update(parse_pgurl(db_url)) pool = yield from aiopg.create_pool(**kwargs) return pool
def _create_engine(dsn=None, *, minsize=10, maxsize=10, loop=None, dialect=_dialect, timeout=TIMEOUT, **kwargs): if loop is None: loop = asyncio.get_event_loop() pool = yield from aiopg.create_pool(dsn, minsize=minsize, maxsize=maxsize, loop=loop, timeout=timeout, **kwargs) conn = yield from pool.acquire() try: real_dsn = conn.dsn return Engine(dialect, pool, real_dsn) finally: pool.release(conn)
def connect(self, url): """ create the driver and connect from the given url """ url = urlparse(url) if url.scheme not in ('postgresql', 'aiopg+postgresql'): raise ValueError('Invalid scheme') self.database = url.path[1:] self.pool = yield from aiopg.create_pool( host=url.hostname or 'localhost', port=url.port or 5432, user=url.username or 'postgres', password=url.password or 'secret', database=self.database)
def get_pool(cls) -> Pool: """ Yields: existing db connection pool """ if len(cls._connection_params) < 5: raise ConnectionError('Please call SQLStore.connect before calling this method') if not cls._pool: with (yield from cls._pool_pending): if not cls._pool: cls._pool = yield from create_pool(**cls._connection_params) asyncio.async(cls._periodic_cleansing()) return cls._pool
def _connect(self): conn_kwargs = { "database": self.database, } conn_kwargs.update(self.connect_params) if 'passwd' in conn_kwargs: conn_kwargs['password'] = conn_kwargs.pop('passwd') if "db" in conn_kwargs: conn_kwargs["database"] = conn_kwargs.pop("db") if "maxsize" not in conn_kwargs: conn_kwargs["maxsize"] = 32 return aiopg.create_pool(None, **conn_kwargs)
def validation_part(self, request): session_key = request.cookies.get(settings.SESSION_COOKIE_NAME) session = session_engine.SessionStore(session_key) try: self.user_id = session["_auth_user_id"] except KeyError: raise aiohttp.web.HTTPBadRequest(reason="KeyError") pool = yield from aiopg.create_pool(dsn) with (yield from pool.cursor()) as cur: yield from cur.execute("SELECT username FROM auth_user WHERE id = %s", (self.user_id,)) sql_res = yield from cur.fetchone() if sql_res is None: raise aiohttp.web.HTTPBadRequest(reason="Wrong user_id") self.sender_name = sql_res[0] # --------------------------------------------------------------------- self.thread_id = request.match_info.get("thread_id") pool = yield from aiopg.create_pool(dsn) with (yield from pool.cursor()) as cur: yield from cur.execute( """ SELECT pt.id FROM privatemessages_thread AS pt INNER JOIN privatemessages_thread_participants AS ptp ON (pt.id = ptp.thread_id) WHERE (pt.id = %s AND ptp.user_id = %s) """, (self.thread_id, self.user_id), ) sql_res = yield from cur.fetchone() if sql_res is None: raise aiohttp.web.HTTPBadRequest(reason="Wrong thread_id")
async def go(): async with aiopg.create_pool(host=self.host, user=self.user, database=self.database, password=self.password, loop=self.loop) as pool: async with pool.acquire() as conn: async with conn.cursor() as cursor: await cursor.execute('SELECT 42;') resp = await cursor.fetchone() assert resp == (42, ) assert cursor.closed assert conn.closed assert pool.closed
def init(loop): config = SafeConfigParser() config.read([path.join(MY_DIR, "config.ini")]) pool = yield from aiopg.create_pool(maxsize=50, cursor_factory=DictCursor, **dict(config.items("db"))) sv = ScrumVoter(loop, pool) webapp = web.Application(loop=loop) webapp.router.add_route("GET", "/", sv.index) webapp.router.add_route("*", "/sprint/new", sv.sprint_edit) webapp.router.add_route("*", "/sprint/edit/{id}", sv.sprint_edit) webapp.router.add_static("/static", path.join(MY_DIR, "static")) srv = yield from loop.create_server(webapp.make_handler(), "0.0.0.0", 8095) print("Server started at http://0.0.0.0:8095") return srv
def exec_tx(self, tx_block, aggname_prefix, conn_i): aggname = "%s_%i" % (aggname_prefix, conn_i) agg = self.aggregates[aggname] = MtmTxAggregate(aggname) pool = yield from aiopg.create_pool(self.dsns[conn_i]) conn = yield from pool.acquire() cur = yield from conn.cursor() while self.running: agg.start_tx() try: yield from cur.execute("commit") yield from tx_block(conn, cur, agg) agg.finish_tx("commit") except psycopg2.Error as e: agg.finish_tx(e.pgerror) print("We've count to infinity!")