def command_psql(self, args): settings = self.connection.get_settings() pgaddr = settings.get('pgaddr') if not pgaddr: print('\\psql requires EdgeDB to run in DEV mode') return host = os.path.dirname(pgaddr) port = pgaddr.rpartition('.')[2] pg_config = buildmeta.get_pg_config_path() psql = pg_config.parent / 'psql' cmd = [ str(psql), '-h', host, '-p', port, '-d', self.connection.dbname, '-U', 'postgres' ] def _psql(cmd): proc = subprocess.Popen(cmd) while proc.returncode is None: try: proc.wait() except KeyboardInterrupt: pass return proc.returncode pt_app.run_in_terminal(lambda: _psql(cmd) == 0) self.prompt.app.current_buffer.reset() print('\r ')
def get_remote_pg_cluster(dsn: str) -> RemoteCluster: addrs, params = pgconnparams.parse_dsn(dsn) if len(addrs) > 1: raise ValueError('multiple hosts in Postgres DSN are not supported') pg_config = buildmeta.get_pg_config_path() rcluster = RemoteCluster(addrs[0], params, pg_config_path=str(pg_config)) loop = asyncio.new_event_loop() async def _is_rds(): conn = await rcluster.connect() try: rds_super = await conn.fetch( "SELECT * FROM pg_roles WHERE rolname = 'rds_superuser'" ) finally: await conn.close() return bool(rds_super) try: is_rds = loop.run_until_complete(_is_rds()) finally: loop.close() if is_rds: return RDSCluster(addrs[0], params, pg_config_path=str(pg_config)) else: return rcluster
def _command_psql(self, *, flags: AbstractSet[str], arg: Optional[str]) -> None: pgaddr = self.get_server_pgaddr() if not pgaddr: print('\\psql requires EdgeDB to run in DEV mode') return pg_config = buildmeta.get_pg_config_path() psql = pg_config.parent / 'psql' cmd = [ str(psql), '-h', pgaddr['host'], '-p', str(pgaddr['port']), '-d', self.connection.dbname, '-U', pgaddr['user'] ] def _psql(cmd: List[str]) -> int: proc = subprocess.Popen(cmd) while proc.returncode is None: try: proc.wait() except KeyboardInterrupt: pass return proc.returncode pt_app.run_in_terminal(lambda: _psql(cmd) == 0) self.prompt.app.current_buffer.reset() # Fix 'psql' command stdout artefacts: print('\r ')
def get_local_pg_cluster(data_dir: os.PathLike, *, max_connections: Optional[int] = None) -> Cluster: pg_config = buildmeta.get_pg_config_path() instance_params = None if max_connections is not None: instance_params = get_default_runtime_params( max_connections=max_connections).instance_params return Cluster( data_dir=data_dir, pg_config_path=str(pg_config), instance_params=instance_params, )
def get_local_pg_cluster( data_dir: os.PathLike, *, max_connections: Optional[int] = None, tenant_id: Optional[str] = None, log_level: str = 'i', ) -> Cluster: pg_config = buildmeta.get_pg_config_path() if tenant_id is None: tenant_id = buildmeta.get_default_tenant_id() instance_params = None if max_connections is not None: instance_params = get_default_runtime_params( max_connections=max_connections, tenant_id=tenant_id, ).instance_params return Cluster( data_dir=data_dir, pg_config_path=str(pg_config), instance_params=instance_params, log_level=log_level, )
def get_local_pg_cluster(data_dir: os.PathLike) -> Cluster: pg_config = buildmeta.get_pg_config_path() return Cluster(data_dir=data_dir, pg_config_path=str(pg_config))
def get_remote_pg_cluster(dsn: str) -> RemoteCluster: addrs, params = pgconnparams.parse_dsn(dsn) if len(addrs) > 1: raise ValueError('multiple hosts in Postgres DSN are not supported') pg_config = buildmeta.get_pg_config_path() rcluster = RemoteCluster(addrs[0], params, pg_config_path=str(pg_config)) loop = asyncio.new_event_loop() async def _get_cluster_type( conn, ) -> Tuple[Type[RemoteCluster], Optional[str]]: managed_clouds = { 'rds_superuser': RemoteCluster, # Amazon RDS 'cloudsqlsuperuser': RemoteCluster, # GCP Cloud SQL } managed_cloud_super = await conn.fetchval( """ SELECT rolname FROM pg_roles WHERE rolname = any($1::text[]) LIMIT 1 """, list(managed_clouds), ) if managed_cloud_super is not None: return managed_clouds[managed_cloud_super], managed_cloud_super else: return RemoteCluster, None async def _detect_capabilities(conn) -> BackendCapabilities: caps = BackendCapabilities.NONE try: await conn.execute(f'ALTER SYSTEM SET foo = 10') except asyncpg.InsufficientPrivilegeError: configfile_access = False except asyncpg.UndefinedObjectError: configfile_access = True else: configfile_access = True if configfile_access: caps |= BackendCapabilities.CONFIGFILE_ACCESS tx = conn.transaction() await tx.start() rname = str(uuidgen.uuid1mc()) try: await conn.execute(f'CREATE ROLE "{rname}" WITH SUPERUSER') except asyncpg.InsufficientPrivilegeError: can_make_superusers = False else: can_make_superusers = True finally: await tx.rollback() if can_make_superusers: caps |= BackendCapabilities.SUPERUSER_ACCESS coll = await conn.fetchval(''' SELECT collname FROM pg_collation WHERE lower(collname) = 'c.utf8'; ''') if coll is not None: caps |= BackendCapabilities.C_UTF8_LOCALE return caps async def _get_pg_settings(conn, name): return await conn.fetchval( 'SELECT setting FROM pg_settings WHERE name = $1', name) async def _get_reserved_connections(conn): rv = await _get_pg_settings(conn, 'superuser_reserved_connections') rv = int(rv) for name in [ 'rds.rds_superuser_reserved_connections', ]: value = await _get_pg_settings(conn, name) if value: rv += int(value) return rv async def _get_cluster_info( ) -> Tuple[Type[RemoteCluster], BackendInstanceParams]: conn = await rcluster.connect() try: cluster_type, superuser_name = await _get_cluster_type(conn) max_connections = await _get_pg_settings(conn, 'max_connections') instance_params = BackendInstanceParams( capabilities=await _detect_capabilities(conn), base_superuser=superuser_name, max_connections=int(max_connections), reserved_connections=await _get_reserved_connections(conn), ) return (cluster_type, instance_params) finally: await conn.close() try: cluster_type, instance_params = (loop.run_until_complete( _get_cluster_info())) finally: loop.close() return cluster_type( addrs[0], params, pg_config_path=str(pg_config), instance_params=instance_params, )