async def _amend_stdlib( ddl_text: str, stdlib: StdlibBits, ) -> Tuple[StdlibBits, str]: schema = stdlib.stdschema reflschema = stdlib.reflschema topblock = dbops.PLTopBlock() plans = [] context = sd.CommandContext() context.stdmode = True for ddl_cmd in edgeql.parse_block(ddl_text): assert isinstance(ddl_cmd, qlast.DDLCommand) delta_command = s_ddl.delta_from_ddl(ddl_cmd, modaliases={}, schema=schema, stdmode=True) if debug.flags.delta_plan_input: debug.header('Delta Plan Input') debug.dump(delta_command) # Apply and adapt delta, build native delta plan, which # will also update the schema. schema, plan = _process_delta(delta_command, schema) reflschema = delta_command.apply(reflschema, context) plan.generate(topblock) plans.append(plan) compiler = edbcompiler.new_compiler( std_schema=schema, reflection_schema=reflschema, schema_class_layout=stdlib.classlayout, ) compilerctx = edbcompiler.new_compiler_context( schema, bootstrap_mode=True, ) compilerctx = edbcompiler.new_compiler_context(schema) for plan in plans: compiler._compile_schema_storage_in_delta( ctx=compilerctx, delta=plan, block=topblock, ) sqltext = topblock.to_string() return stdlib._replace(stdschema=schema, reflschema=reflschema), sqltext
def test_server_compiler_compile_edgeql_script(self): compiler = tb.new_compiler() context = edbcompiler.new_compiler_context( user_schema=self.schema, modaliases={None: 'default'}, ) edbcompiler.compile_edgeql_script( compiler=compiler, ctx=context, eql=''' SELECT Foo { bar } ''', )
async def _get_dbs_and_roles( pgconn: asyncpg.Connection, ) -> Tuple[List[str], List[str]]: compiler = edbcompiler.Compiler() await compiler.initialize_from_pg(pgconn) compilerctx = edbcompiler.new_compiler_context( user_schema=s_schema.FlatSchema(), global_schema=s_schema.FlatSchema(), expected_cardinality_one=False, single_statement=True, output_format=edbcompiler.IoFormat.JSON, bootstrap_mode=True, ) _, get_databases_sql = edbcompiler.compile_edgeql_script( compiler, compilerctx, 'SELECT sys::Database.name', ) databases = list( sorted( json.loads(await pgconn.fetchval(get_databases_sql)), key=lambda dname: edbdef.EDGEDB_TEMPLATE_DB in dname, )) _, get_roles_sql = edbcompiler.compile_edgeql_script( compiler, compilerctx, '''SELECT sys::Role { name, parents := .member_of.name, }''', ) roles = json.loads(await pgconn.fetchval(get_roles_sql)) sorted_roles = list( topological.sort({ r['name']: topological.DepGraphEntry( item=r['name'], deps=r['parents'], extra=False, ) for r in roles })) return databases, sorted_roles
def compile_bootstrap_script( compiler: edbcompiler.Compiler, schema: s_schema.Schema, eql: str, *, single_statement: bool = False, expected_cardinality_one: bool = False, ) -> Tuple[s_schema.Schema, str]: ctx = edbcompiler.new_compiler_context( schema=schema, single_statement=single_statement, expected_cardinality_one=expected_cardinality_one, json_parameters=True, output_format=edbcompiler.IoFormat.JSON, ) return edbcompiler.compile_edgeql_script(compiler, ctx, eql)
async def _get_dbs_and_roles(pgconn) -> Tuple[List[str], List[str]]: compiler = edbcompiler.Compiler({}) await compiler.ensure_initialized(pgconn) schema = compiler.get_std_schema() compilerctx = edbcompiler.new_compiler_context( schema, expected_cardinality_one=False, single_statement=True, output_format=edbcompiler.IoFormat.JSON, ) schema, get_databases_sql = edbcompiler.compile_edgeql_script( compiler, compilerctx, 'SELECT sys::Database.name', ) databases = list( sorted( json.loads(await pgconn.fetchval(get_databases_sql)), key=lambda dname: dname == edbdef.EDGEDB_TEMPLATE_DB, )) schema, get_roles_sql = edbcompiler.compile_edgeql_script( compiler, compilerctx, '''SELECT sys::Role { name, parents := .member_of.name, }''', ) roles = json.loads(await pgconn.fetchval(get_roles_sql)) sorted_roles = list( topological.sort({ r['name']: topological.DepGraphEntry( item=r['name'], deps=r['parents'], extra=False, ) for r in roles })) return databases, sorted_roles
async def test_server_compiler_pool_disconnect_queue(self): with tempfile.TemporaryDirectory() as td: pool_ = await pool.create_compiler_pool( runstate_dir=td, pool_size=2, dbindex=dbview.DatabaseIndex( None, std_schema=self._std_schema, global_schema=None, sys_config={}, ), backend_runtime_params=None, std_schema=self._std_schema, refl_schema=self._refl_schema, schema_class_layout=self._schema_class_layout, ) try: w1 = await pool_._acquire_worker() w2 = await pool_._acquire_worker() with self.assertRaises(AttributeError): await w1.call('nonexist') with self.assertRaises(AttributeError): await w2.call('nonexist') pool_._release_worker(w1) pool_._release_worker(w2) pool_._ready_evt.clear() os.kill(w1.get_pid(), signal.SIGTERM) os.kill(w2.get_pid(), signal.SIGTERM) await asyncio.wait_for(pool_._ready_evt.wait(), 10) context = edbcompiler.new_compiler_context( user_schema=self._std_schema, modaliases={None: 'default'}, ) await asyncio.gather(*(pool_.compile_in_tx( context.state.current_tx().id, pickle.dumps(context.state), 0, edgeql.Source.from_string('SELECT 123'), edbcompiler. IoFormat.BINARY, False, 101, False, True, 'single', ( 0, 12), True) for _ in range(4))) finally: await pool_.stop()
async def _make_stdlib(testmode: bool, global_ids) -> StdlibBits: schema = s_schema.ChainedSchema( s_schema.FlatSchema(), s_schema.FlatSchema(), s_schema.FlatSchema(), ) schema, _ = s_mod.Module.create_in_schema( schema, name=sn.UnqualName('__derived__'), ) current_block = dbops.PLTopBlock() std_texts = [] for modname in s_schema.STD_SOURCES: std_texts.append(s_std.get_std_module_text(modname)) if testmode: std_texts.append(s_std.get_std_module_text(sn.UnqualName('_testmode'))) ddl_text = '\n'.join(std_texts) types: Set[uuid.UUID] = set() std_plans: List[sd.Command] = [] for ddl_cmd in edgeql.parse_block(ddl_text): assert isinstance(ddl_cmd, qlast.DDLCommand) delta_command = s_ddl.delta_from_ddl(ddl_cmd, modaliases={}, schema=schema, stdmode=True) if debug.flags.delta_plan_input: debug.header('Delta Plan Input') debug.dump(delta_command) # Apply and adapt delta, build native delta plan, which # will also update the schema. schema, plan = _process_delta(delta_command, schema) std_plans.append(delta_command) types.update(plan.new_types) plan.generate(current_block) _, schema_version = s_std.make_schema_version(schema) schema, plan = _process_delta(schema_version, schema) std_plans.append(schema_version) plan.generate(current_block) stdglobals = '\n'.join([ f'''CREATE SUPERUSER ROLE {edbdef.EDGEDB_SUPERUSER} {{ SET id := <uuid>'{global_ids[edbdef.EDGEDB_SUPERUSER]}' }};''', ]) schema = await _execute_edgeql_ddl(schema, stdglobals) _, global_schema_version = s_std.make_global_schema_version(schema) schema, plan = _process_delta(global_schema_version, schema) std_plans.append(global_schema_version) plan.generate(current_block) reflection = s_refl.generate_structure(schema) reflschema, reflplan = _process_delta(reflection.intro_schema_delta, schema) assert current_block is not None reflplan.generate(current_block) subblock = current_block.add_block() compiler = edbcompiler.new_compiler( std_schema=schema.get_top_schema(), reflection_schema=reflschema.get_top_schema(), schema_class_layout=reflection.class_layout, # type: ignore ) compilerctx = edbcompiler.new_compiler_context( user_schema=reflschema.get_top_schema(), global_schema=schema.get_global_schema(), bootstrap_mode=True, ) for std_plan in std_plans: compiler._compile_schema_storage_in_delta( ctx=compilerctx, delta=std_plan, block=subblock, ) compilerctx = edbcompiler.new_compiler_context( user_schema=reflschema.get_top_schema(), global_schema=schema.get_global_schema(), bootstrap_mode=True, internal_schema_mode=True, ) compiler._compile_schema_storage_in_delta( ctx=compilerctx, delta=reflection.intro_schema_delta, block=subblock, ) sqltext = current_block.to_string() compilerctx = edbcompiler.new_compiler_context( user_schema=reflschema.get_top_schema(), global_schema=schema.get_global_schema(), schema_reflection_mode=True, output_format=edbcompiler.IoFormat.JSON_ELEMENTS, ) # The introspection query bits are returned in chunks # because it's a large UNION and we currently generate SQL # that is much harder for Posgres to plan as opposed to a # straight flat UNION. sql_intro_local_parts = [] sql_intro_global_parts = [] for intropart in reflection.local_intro_parts: sql_intro_local_parts.append( compile_single_query( intropart, compiler=compiler, compilerctx=compilerctx, ), ) for intropart in reflection.global_intro_parts: sql_intro_global_parts.append( compile_single_query( intropart, compiler=compiler, compilerctx=compilerctx, ), ) local_intro_sql = ' UNION ALL '.join(sql_intro_local_parts) local_intro_sql = f''' WITH intro(c) AS ({local_intro_sql}) SELECT json_agg(intro.c) FROM intro ''' global_intro_sql = ' UNION ALL '.join(sql_intro_global_parts) global_intro_sql = f''' WITH intro(c) AS ({global_intro_sql}) SELECT json_agg(intro.c) FROM intro ''' return StdlibBits( stdschema=schema.get_top_schema(), reflschema=reflschema.get_top_schema(), global_schema=schema.get_global_schema(), sqltext=sqltext, types=types, classlayout=reflection.class_layout, local_intro_query=local_intro_sql, global_intro_query=global_intro_sql, )
async def _make_stdlib(testmode: bool, global_ids) -> StdlibBits: schema = s_schema.Schema() schema, _ = s_mod.Module.create_in_schema(schema, name='__derived__') current_block = dbops.PLTopBlock() std_texts = [] for modname in s_schema.STD_LIB + ('stdgraphql', ): std_texts.append(s_std.get_std_module_text(modname)) if testmode: std_texts.append(s_std.get_std_module_text('_testmode')) ddl_text = '\n'.join(std_texts) types: Set[uuid.UUID] = set() std_plans: List[sd.Command] = [] for ddl_cmd in edgeql.parse_block(ddl_text): delta_command = s_ddl.delta_from_ddl(ddl_cmd, modaliases={}, schema=schema, stdmode=True) if debug.flags.delta_plan_input: debug.header('Delta Plan Input') debug.dump(delta_command) # Apply and adapt delta, build native delta plan, which # will also update the schema. schema, plan = _process_delta(delta_command, schema) std_plans.append(delta_command) types.update(plan.new_types) plan.generate(current_block) stdglobals = '\n'.join([ f'''CREATE SUPERUSER ROLE {edbdef.EDGEDB_SUPERUSER} {{ SET id := <uuid>'{global_ids[edbdef.EDGEDB_SUPERUSER]}' }};''', f'''CREATE DATABASE {edbdef.EDGEDB_TEMPLATE_DB} {{ SET id := <uuid>'{global_ids[edbdef.EDGEDB_TEMPLATE_DB]}' }};''', f'CREATE DATABASE {edbdef.EDGEDB_SUPERUSER_DB};', ]) context = sd.CommandContext(stdmode=True) for ddl_cmd in edgeql.parse_block(stdglobals): delta_command = s_ddl.delta_from_ddl(ddl_cmd, modaliases={}, schema=schema, stdmode=True) schema = delta_command.apply(schema, context) refldelta, classlayout, introparts = s_refl.generate_structure(schema) reflschema, reflplan = _process_delta(refldelta, schema) std_plans.append(refldelta) assert current_block is not None reflplan.generate(current_block) subblock = current_block.add_block() compiler = edbcompiler.new_compiler( std_schema=schema, reflection_schema=reflschema, schema_class_layout=classlayout, bootstrap_mode=True, ) compilerctx = edbcompiler.new_compiler_context(reflschema) for std_plan in std_plans: compiler._compile_schema_storage_in_delta( ctx=compilerctx, delta=std_plan, block=subblock, is_internal_reflection=std_plan is refldelta, stdmode=True, ) sqltext = current_block.to_string() compilerctx = edbcompiler.new_compiler_context( reflschema, schema_reflection_mode=True, output_format=edbcompiler.IoFormat.JSON_ELEMENTS, ) # The introspection query bits are returned in chunks # because it's a large UNION and we currently generate SQL # that is much harder for Posgres to plan as opposed to a # straight flat UNION. sql_introparts = [] for intropart in introparts: introtokens = tokenizer.tokenize(intropart.encode()) units = compiler._compile(ctx=compilerctx, tokens=introtokens) assert len(units) == 1 and len(units[0].sql) == 1 sql_intropart = units[0].sql[0].decode() sql_introparts.append(sql_intropart) introsql = ' UNION ALL '.join(sql_introparts) return StdlibBits( stdschema=schema, reflschema=reflschema, sqltext=sqltext, types=types, classlayout=classlayout, introquery=introsql, )
async def _compile_sys_queries( ctx: BootstrapContext, schema: s_schema.Schema, compiler: edbcompiler.Compiler, config_spec: config.Spec, ) -> None: queries = {} _, sql = compile_bootstrap_script( compiler, schema, 'SELECT cfg::get_config_json()', expected_cardinality_one=True, single_statement=True, ) queries['config'] = sql _, sql = compile_bootstrap_script( compiler, schema, "SELECT cfg::get_config_json(sources := ['database'])", expected_cardinality_one=True, single_statement=True, ) queries['dbconfig'] = sql _, sql = compile_bootstrap_script( compiler, schema, "SELECT cfg::get_config_json(max_source := 'system override')", expected_cardinality_one=True, single_statement=True, ) queries['sysconfig'] = sql _, sql = compile_bootstrap_script( compiler, schema, 'SELECT (SELECT sys::Database FILTER NOT .builtin).name', expected_cardinality_one=False, single_statement=True, ) queries['listdbs'] = sql role_query = ''' SELECT sys::Role { name, superuser, password, }; ''' _, sql = compile_bootstrap_script( compiler, schema, role_query, expected_cardinality_one=False, single_statement=True, ) queries['roles'] = sql tids_query = ''' SELECT schema::ScalarType { id, backend_id, } FILTER .id IN <uuid>json_array_unpack(<json>$ids); ''' _, sql = compile_bootstrap_script( compiler, schema, tids_query, expected_cardinality_one=False, single_statement=True, ) queries['backend_tids'] = sql report_settings: list[str] = [] for setname in config_spec: setting = config_spec[setname] if setting.report: report_settings.append(setname) report_configs_query = f''' SELECT assert_single(cfg::Config {{ {', '.join(report_settings)} }}); ''' units = compiler._compile( ctx=edbcompiler.new_compiler_context( user_schema=schema, single_statement=True, expected_cardinality_one=True, json_parameters=False, output_format=edbcompiler.IoFormat.BINARY, bootstrap_mode=True, ), source=edgeql.Source.from_string(report_configs_query)) assert len(units) == 1 and len(units[0].sql) == 1 report_configs_typedesc = units[0].out_type_id + units[0].out_type_data queries['report_configs'] = units[0].sql[0].decode() await _store_static_json_cache( ctx, 'sysqueries', json.dumps(queries), ) await _store_static_bin_cache( ctx, 'report_configs_typedesc', report_configs_typedesc, )