async def introspect_global_schema(self, conn=None): if conn is not None: json_data = await conn.parse_execute_json( self._global_intro_query, b'__global_intro_db', dbver=0, use_prep_stmt=True, args=(), ) else: syscon = await self._acquire_sys_pgcon() try: json_data = await syscon.parse_execute_json( self._global_intro_query, b'__global_intro_db', dbver=0, use_prep_stmt=True, args=(), ) finally: self._release_sys_pgcon() return s_refl.parse_into( base_schema=self._std_schema, schema=s_schema.FlatSchema(), data=json_data, schema_class_layout=self._schema_class_layout, )
async def _get_dbs_and_roles( pgconn: asyncpg.Connection, ) -> Tuple[List[str], List[str]]: compiler = edbcompiler.Compiler() await compiler.initialize_from_pg(pgconn) compilerctx = edbcompiler.new_compiler_context( user_schema=s_schema.FlatSchema(), global_schema=s_schema.FlatSchema(), expected_cardinality_one=False, single_statement=True, output_format=edbcompiler.IoFormat.JSON, bootstrap_mode=True, ) _, get_databases_sql = edbcompiler.compile_edgeql_script( compiler, compilerctx, 'SELECT sys::Database.name', ) databases = list( sorted( json.loads(await pgconn.fetchval(get_databases_sql)), key=lambda dname: edbdef.EDGEDB_TEMPLATE_DB in dname, )) _, get_roles_sql = edbcompiler.compile_edgeql_script( compiler, compilerctx, '''SELECT sys::Role { name, parents := .member_of.name, }''', ) roles = json.loads(await pgconn.fetchval(get_roles_sql)) sorted_roles = list( topological.sort({ r['name']: topological.DepGraphEntry( item=r['name'], deps=r['parents'], extra=False, ) for r in roles })) return databases, sorted_roles
async def introspect_user_schema(self, conn): json_data = await conn.parse_execute_json( self._local_intro_query, b'__local_intro_db', dbver=0, use_prep_stmt=True, args=(), ) base_schema = s_schema.ChainedSchema( self._std_schema, s_schema.FlatSchema(), self.get_global_schema(), ) return s_refl.parse_into( base_schema=base_schema, schema=s_schema.FlatSchema(), data=json_data, schema_class_layout=self._schema_class_layout, )
def _load_std_schema(): global _std_schema if _std_schema is None: std_dirs_hash = buildmeta.hash_dirs(s_std.CACHE_SRC_DIRS) schema = None if devmode.is_in_dev_mode(): schema = buildmeta.read_data_cache(std_dirs_hash, 'transient-stdschema.pickle') if schema is None: schema = s_schema.FlatSchema() for modname in s_schema.STD_LIB + ('stdgraphql', ): schema = s_std.load_std_module(schema, modname) if devmode.is_in_dev_mode(): buildmeta.write_data_cache(schema, std_dirs_hash, 'transient-stdschema.pickle') _std_schema = schema return _std_schema
async def compile(server, query): compiler_pool = server.get_compiler_pool() units, _ = await compiler_pool.compile( edbdef.EDGEDB_SYSTEM_DB, s_schema.FlatSchema(), # user schema server.get_global_schema(), immu.Map(), # reflection cache immu.Map(), # database config server.get_compilation_system_config(), edgeql.Source.from_string(query), None, # modaliases None, # session config IoFormat.JSON_ELEMENTS, # json mode False, # expected cardinality is MANY 0, # no implicit limit False, # no inlining of type IDs False, # no inlining of type names compiler.CompileStatementMode.SINGLE, True, # json parameters ) return units[0]
async def _make_stdlib(testmode: bool, global_ids) -> StdlibBits: schema = s_schema.ChainedSchema( s_schema.FlatSchema(), s_schema.FlatSchema(), s_schema.FlatSchema(), ) schema, _ = s_mod.Module.create_in_schema( schema, name=sn.UnqualName('__derived__'), ) current_block = dbops.PLTopBlock() std_texts = [] for modname in s_schema.STD_SOURCES: std_texts.append(s_std.get_std_module_text(modname)) if testmode: std_texts.append(s_std.get_std_module_text(sn.UnqualName('_testmode'))) ddl_text = '\n'.join(std_texts) types: Set[uuid.UUID] = set() std_plans: List[sd.Command] = [] for ddl_cmd in edgeql.parse_block(ddl_text): assert isinstance(ddl_cmd, qlast.DDLCommand) delta_command = s_ddl.delta_from_ddl(ddl_cmd, modaliases={}, schema=schema, stdmode=True) if debug.flags.delta_plan_input: debug.header('Delta Plan Input') debug.dump(delta_command) # Apply and adapt delta, build native delta plan, which # will also update the schema. schema, plan = _process_delta(delta_command, schema) std_plans.append(delta_command) types.update(plan.new_types) plan.generate(current_block) _, schema_version = s_std.make_schema_version(schema) schema, plan = _process_delta(schema_version, schema) std_plans.append(schema_version) plan.generate(current_block) stdglobals = '\n'.join([ f'''CREATE SUPERUSER ROLE {edbdef.EDGEDB_SUPERUSER} {{ SET id := <uuid>'{global_ids[edbdef.EDGEDB_SUPERUSER]}' }};''', ]) schema = await _execute_edgeql_ddl(schema, stdglobals) _, global_schema_version = s_std.make_global_schema_version(schema) schema, plan = _process_delta(global_schema_version, schema) std_plans.append(global_schema_version) plan.generate(current_block) reflection = s_refl.generate_structure(schema) reflschema, reflplan = _process_delta(reflection.intro_schema_delta, schema) assert current_block is not None reflplan.generate(current_block) subblock = current_block.add_block() compiler = edbcompiler.new_compiler( std_schema=schema.get_top_schema(), reflection_schema=reflschema.get_top_schema(), schema_class_layout=reflection.class_layout, # type: ignore ) compilerctx = edbcompiler.new_compiler_context( user_schema=reflschema.get_top_schema(), global_schema=schema.get_global_schema(), bootstrap_mode=True, ) for std_plan in std_plans: compiler._compile_schema_storage_in_delta( ctx=compilerctx, delta=std_plan, block=subblock, ) compilerctx = edbcompiler.new_compiler_context( user_schema=reflschema.get_top_schema(), global_schema=schema.get_global_schema(), bootstrap_mode=True, internal_schema_mode=True, ) compiler._compile_schema_storage_in_delta( ctx=compilerctx, delta=reflection.intro_schema_delta, block=subblock, ) sqltext = current_block.to_string() compilerctx = edbcompiler.new_compiler_context( user_schema=reflschema.get_top_schema(), global_schema=schema.get_global_schema(), schema_reflection_mode=True, output_format=edbcompiler.IoFormat.JSON_ELEMENTS, ) # The introspection query bits are returned in chunks # because it's a large UNION and we currently generate SQL # that is much harder for Posgres to plan as opposed to a # straight flat UNION. sql_intro_local_parts = [] sql_intro_global_parts = [] for intropart in reflection.local_intro_parts: sql_intro_local_parts.append( compile_single_query( intropart, compiler=compiler, compilerctx=compilerctx, ), ) for intropart in reflection.global_intro_parts: sql_intro_global_parts.append( compile_single_query( intropart, compiler=compiler, compilerctx=compilerctx, ), ) local_intro_sql = ' UNION ALL '.join(sql_intro_local_parts) local_intro_sql = f''' WITH intro(c) AS ({local_intro_sql}) SELECT json_agg(intro.c) FROM intro ''' global_intro_sql = ' UNION ALL '.join(sql_intro_global_parts) global_intro_sql = f''' WITH intro(c) AS ({global_intro_sql}) SELECT json_agg(intro.c) FROM intro ''' return StdlibBits( stdschema=schema.get_top_schema(), reflschema=reflschema.get_top_schema(), global_schema=schema.get_global_schema(), sqltext=sqltext, types=types, classlayout=reflection.class_layout, local_intro_query=local_intro_sql, global_intro_query=global_intro_sql, )
async def _bootstrap( cluster: pgcluster.BaseCluster, pgconn: asyncpg_con.Connection, args: Dict[str, Any], ) -> None: await _ensure_edgedb_supergroup( cluster, pgconn, edbdef.EDGEDB_SUPERGROUP, ) superuser_uid = await _ensure_edgedb_role( cluster, pgconn, edbdef.EDGEDB_SUPERUSER, superuser=True, builtin=True, ) await _execute( pgconn, f'SET ROLE {qi(edbdef.EDGEDB_SUPERUSER)};', ) cluster.set_default_session_authorization(edbdef.EDGEDB_SUPERUSER) new_template_db_id = await _create_edgedb_template_database( cluster, pgconn) conn = await cluster.connect(database=edbdef.EDGEDB_TEMPLATE_DB) try: conn.add_log_listener(_pg_log_listener) await _populate_misc_instance_data(cluster, conn) stdlib, compiler = await _init_stdlib(cluster, conn, testmode=args['testmode'], global_ids={ edbdef.EDGEDB_SUPERUSER: superuser_uid, edbdef.EDGEDB_TEMPLATE_DB: new_template_db_id, }) await _bootstrap_config_spec(stdlib.stdschema, cluster) await _compile_sys_queries(stdlib.reflschema, compiler, cluster) schema = s_schema.FlatSchema() schema = await _init_defaults(schema, compiler, conn) schema = await _populate_data(schema, compiler, conn) finally: await conn.close() await _create_edgedb_database( pgconn, edbdef.EDGEDB_SYSTEM_DB, edbdef.EDGEDB_SUPERUSER, builtin=True, ) conn = await cluster.connect(database=edbdef.EDGEDB_SYSTEM_DB) try: conn.add_log_listener(_pg_log_listener) await _configure(schema, compiler, conn, insecure=args['insecure']) finally: await conn.close() await _create_edgedb_database( pgconn, edbdef.EDGEDB_SUPERUSER_DB, edbdef.EDGEDB_SUPERUSER, ) if (args['default_database_user'] and args['default_database_user'] != edbdef.EDGEDB_SUPERUSER): await _ensure_edgedb_role( cluster, pgconn, args['default_database_user'], superuser=True, ) await _execute( pgconn, f"SET ROLE {qi(args['default_database_user'])};", ) if (args['default_database'] and args['default_database'] != edbdef.EDGEDB_SUPERUSER_DB): await _create_edgedb_database( pgconn, args['default_database'], args['default_database_user'] or edbdef.EDGEDB_SUPERUSER, )
async def _bootstrap(ctx: BootstrapContext, ) -> None: args = ctx.args await _ensure_edgedb_supergroup( ctx, edbdef.EDGEDB_SUPERGROUP, ) superuser_uid = await _ensure_edgedb_role( ctx, edbdef.EDGEDB_SUPERUSER, superuser=True, builtin=True, ) superuser = ctx.cluster.get_role_name(edbdef.EDGEDB_SUPERUSER) cluster = ctx.cluster await _execute(ctx.conn, f'SET ROLE {qi(superuser)}') cluster.set_default_session_authorization(superuser) in_dev_mode = devmode.is_in_dev_mode() # Protect against multiple EdgeDB tenants from trying to bootstrap # on the same cluster in devmode, as that is both a waste of resources # and might result in broken stdlib cache. if in_dev_mode: bootstrap_lock = 0xEDB00001 await ctx.conn.execute('SELECT pg_advisory_lock($1)', bootstrap_lock) new_template_db_id = await _create_edgedb_template_database(ctx) tpl_db = cluster.get_db_name(edbdef.EDGEDB_TEMPLATE_DB) conn = await cluster.connect(database=tpl_db) try: tpl_ctx = ctx._replace(conn=conn) conn.add_log_listener(_pg_log_listener) await _populate_misc_instance_data(tpl_ctx) stdlib, compiler = await _init_stdlib(tpl_ctx, testmode=args.testmode, global_ids={ edbdef.EDGEDB_SUPERUSER: superuser_uid, edbdef.EDGEDB_TEMPLATE_DB: new_template_db_id, }) await _bootstrap_config_spec(tpl_ctx, stdlib.stdschema) await _compile_sys_queries(tpl_ctx, stdlib.reflschema, compiler) schema = s_schema.FlatSchema() schema = await _init_defaults(schema, compiler, conn) schema = await _populate_data(schema, compiler, conn) finally: if in_dev_mode: await ctx.conn.execute( 'SELECT pg_advisory_unlock($1)', bootstrap_lock, ) await conn.close() await _create_edgedb_database( ctx, edbdef.EDGEDB_SYSTEM_DB, edbdef.EDGEDB_SUPERUSER, builtin=True, ) conn = await cluster.connect( database=cluster.get_db_name(edbdef.EDGEDB_SYSTEM_DB)) try: conn.add_log_listener(_pg_log_listener) await _configure( ctx._replace(conn=conn), schema=schema, compiler=compiler, insecure=args.insecure, ) finally: await conn.close() await _create_edgedb_database( ctx, edbdef.EDGEDB_SUPERUSER_DB, edbdef.EDGEDB_SUPERUSER, ) if (args.default_database_user and args.default_database_user != edbdef.EDGEDB_SUPERUSER): await _ensure_edgedb_role( ctx, args.default_database_user, superuser=True, ) def_role = ctx.cluster.get_role_name(args.default_database_user) await _execute(ctx.conn, f"SET ROLE {qi(def_role)}") if (args.default_database and args.default_database != edbdef.EDGEDB_SUPERUSER_DB): await _create_edgedb_database( ctx, args.default_database, args.default_database_user or edbdef.EDGEDB_SUPERUSER, )
async def _bootstrap(ctx: BootstrapContext) -> None: args = ctx.args cluster = ctx.cluster backend_params = cluster.get_runtime_params() if backend_params.instance_params.version < edbdef.MIN_POSTGRES_VERSION: min_ver = '.'.join(str(v) for v in edbdef.MIN_POSTGRES_VERSION) raise errors.ConfigurationError( 'unsupported backend', details=( f'EdgeDB requires PostgreSQL version {min_ver} or later, ' f'while the specified backend reports itself as ' f'{backend_params.instance_params.version.string}.' ) ) if args.backend_capability_sets.must_be_absent: caps = backend_params.instance_params.capabilities disabled = [] for cap in args.backend_capability_sets.must_be_absent: if caps & cap: caps &= ~cap disabled.append(cap) if disabled: logger.info(f"the following backend capabilities are disabled: " f"{', '.join(str(cap.name) for cap in disabled)}") cluster.overwrite_capabilities(caps) _check_capabilities(ctx) if backend_params.has_create_role: superuser_uid = await _bootstrap_edgedb_super_roles(ctx) else: superuser_uid = uuidgen.uuid1mc() in_dev_mode = devmode.is_in_dev_mode() # Protect against multiple EdgeDB tenants from trying to bootstrap # on the same cluster in devmode, as that is both a waste of resources # and might result in broken stdlib cache. if in_dev_mode: bootstrap_lock = 0xEDB00001 await ctx.conn.execute('SELECT pg_advisory_lock($1)', bootstrap_lock) if backend_params.has_create_database: new_template_db_id = await _create_edgedb_template_database(ctx) tpl_db = cluster.get_db_name(edbdef.EDGEDB_TEMPLATE_DB) conn = await cluster.connect(database=tpl_db) else: new_template_db_id = uuidgen.uuid1mc() try: if backend_params.has_create_database: tpl_ctx = ctx._replace(conn=conn) conn.add_log_listener(_pg_log_listener) else: tpl_ctx = ctx await _populate_misc_instance_data(tpl_ctx) stdlib, config_spec, compiler = await _init_stdlib( tpl_ctx, testmode=args.testmode, global_ids={ edbdef.EDGEDB_SUPERUSER: superuser_uid, edbdef.EDGEDB_TEMPLATE_DB: new_template_db_id, } ) await _compile_sys_queries( tpl_ctx, stdlib.reflschema, compiler, config_spec, ) schema = s_schema.FlatSchema() schema = await _init_defaults(schema, compiler, tpl_ctx.conn) finally: if in_dev_mode: await ctx.conn.execute( 'SELECT pg_advisory_unlock($1)', bootstrap_lock, ) if backend_params.has_create_database: await conn.close() if backend_params.has_create_database: await _create_edgedb_database( ctx, edbdef.EDGEDB_SYSTEM_DB, edbdef.EDGEDB_SUPERUSER, builtin=True, ) conn = await cluster.connect( database=cluster.get_db_name(edbdef.EDGEDB_SYSTEM_DB)) try: conn.add_log_listener(_pg_log_listener) await _configure( ctx._replace(conn=conn), config_spec=config_spec, schema=schema, compiler=compiler, ) finally: await conn.close() else: await _configure( ctx, config_spec=config_spec, schema=schema, compiler=compiler, ) if backend_params.has_create_database: await _create_edgedb_database( ctx, edbdef.EDGEDB_SUPERUSER_DB, edbdef.EDGEDB_SUPERUSER, ) else: await _set_edgedb_database_metadata( ctx, edbdef.EDGEDB_SUPERUSER_DB, ) if ( backend_params.has_create_role and args.default_database_user and args.default_database_user != edbdef.EDGEDB_SUPERUSER ): await _ensure_edgedb_role( ctx, args.default_database_user, superuser=True, ) def_role = ctx.cluster.get_role_name(args.default_database_user) await _execute(ctx.conn, f"SET ROLE {qi(def_role)}") if ( backend_params.has_create_database and args.default_database and args.default_database != edbdef.EDGEDB_SUPERUSER_DB ): await _create_edgedb_database( ctx, args.default_database, args.default_database_user or edbdef.EDGEDB_SUPERUSER, )