def new_compiler(): std_schema = _load_std_schema() refl_schema, layout = _load_reflection_schema() return edbcompiler.new_compiler( std_schema=std_schema, reflection_schema=refl_schema, schema_class_layout=layout, )
async def _amend_stdlib( ddl_text: str, stdlib: StdlibBits, ) -> Tuple[StdlibBits, str]: schema = stdlib.stdschema reflschema = stdlib.reflschema topblock = dbops.PLTopBlock() plans = [] context = sd.CommandContext() context.stdmode = True for ddl_cmd in edgeql.parse_block(ddl_text): assert isinstance(ddl_cmd, qlast.DDLCommand) delta_command = s_ddl.delta_from_ddl(ddl_cmd, modaliases={}, schema=schema, stdmode=True) if debug.flags.delta_plan_input: debug.header('Delta Plan Input') debug.dump(delta_command) # Apply and adapt delta, build native delta plan, which # will also update the schema. schema, plan = _process_delta(delta_command, schema) reflschema = delta_command.apply(reflschema, context) plan.generate(topblock) plans.append(plan) compiler = edbcompiler.new_compiler( std_schema=schema, reflection_schema=reflschema, schema_class_layout=stdlib.classlayout, ) compilerctx = edbcompiler.new_compiler_context( schema, bootstrap_mode=True, ) compilerctx = edbcompiler.new_compiler_context(schema) for plan in plans: compiler._compile_schema_storage_in_delta( ctx=compilerctx, delta=plan, block=topblock, ) sqltext = topblock.to_string() return stdlib._replace(stdschema=schema, reflschema=reflschema), sqltext
async def _init_stdlib(cluster, conn, testmode, global_ids): in_dev_mode = devmode.is_in_dev_mode() specified_cache_dir = os.environ.get('_EDGEDB_WRITE_DATA_CACHE_TO') if specified_cache_dir: cache_dir = pathlib.Path(specified_cache_dir) else: cache_dir = None stdlib_cache = 'backend-stdlib.pickle' tpldbdump_cache = 'backend-tpldbdump.sql' src_hash = buildmeta.hash_dirs( buildmeta.get_cache_src_dirs(), extra_files=[__file__], ) stdlib = buildmeta.read_data_cache(src_hash, stdlib_cache, source_dir=cache_dir) tpldbdump = buildmeta.read_data_cache(src_hash, tpldbdump_cache, source_dir=cache_dir, pickled=False) if stdlib is None: logger.info('Compiling the standard library...') stdlib = await _make_stdlib(in_dev_mode or testmode, global_ids) logger.info('Creating the necessary PostgreSQL extensions...') await metaschema.create_pg_extensions(conn) if tpldbdump is None: logger.info('Populating internal SQL structures...') await metaschema.bootstrap(conn) logger.info('Executing the standard library...') await _execute_ddl(conn, stdlib.sqltext) if in_dev_mode or specified_cache_dir: tpldbdump = cluster.dump_database( edbdef.EDGEDB_TEMPLATE_DB, exclude_schemas=['edgedbinstdata', 'edgedbext'], ) # Excluding the "edgedbext" schema above apparently # doesn't apply to extensions created in that schema, # so we have to resort to commenting out extension # statements in the dump. tpldbdump = re.sub( rb'^(CREATE|COMMENT ON) EXTENSION.*$', rb'-- \g<0>', tpldbdump, flags=re.MULTILINE, ) global_metadata = await conn.fetchval(f'''\ SELECT edgedb.shobj_metadata( (SELECT oid FROM pg_database WHERE datname = {ql(edbdef.EDGEDB_TEMPLATE_DB)}), 'pg_database' )''') pl_block = dbops.PLTopBlock() dbops.SetMetadata( dbops.Database(name=edbdef.EDGEDB_TEMPLATE_DB), json.loads(global_metadata), ).generate(pl_block) tpldbdump += b'\n' + pl_block.to_string().encode('utf-8') buildmeta.write_data_cache( tpldbdump, src_hash, tpldbdump_cache, pickled=False, target_dir=cache_dir, ) buildmeta.write_data_cache( stdlib, src_hash, stdlib_cache, target_dir=cache_dir, ) else: logger.info('Initializing the standard library...') await metaschema._execute_sql_script(conn, tpldbdump.decode('utf-8')) # When we restore a database from a dump, OIDs for non-system # Postgres types might get skewed as they are not part of the dump. # A good example of that is `std::bigint` which is implemented as # a custom domain type. The OIDs are stored under # `schema::Object.backend_id` property and are injected into # array query arguments. # # The code below re-syncs backend_id properties of EdgeDB builtin # types with the actual OIDs in the DB. compiler = edbcompiler.new_compiler( std_schema=stdlib.stdschema, reflection_schema=stdlib.reflschema, schema_class_layout=stdlib.classlayout, ) _, sql = compile_bootstrap_script( compiler, stdlib.reflschema, ''' UPDATE schema::ScalarType FILTER .builtin AND NOT (.abstract ?? False) SET { backend_id := sys::_get_pg_type_for_scalar_type(.id) } ''', expected_cardinality_one=False, single_statement=True, ) await conn.execute(sql) if not in_dev_mode and testmode: # Running tests on a production build. stdlib, testmode_sql = await _amend_stdlib( s_std.get_std_module_text('_testmode'), stdlib, ) await conn.execute(testmode_sql) await metaschema.generate_support_views( conn, stdlib.reflschema, ) # Make sure that schema backend_id properties are in sync with # the database. compiler = edbcompiler.new_compiler( std_schema=stdlib.stdschema, reflection_schema=stdlib.reflschema, schema_class_layout=stdlib.classlayout, ) _, sql = compile_bootstrap_script( compiler, stdlib.reflschema, ''' SELECT schema::ScalarType { id, backend_id, } FILTER .builtin AND NOT (.abstract ?? False); ''', expected_cardinality_one=False, single_statement=True, ) schema = stdlib.stdschema typemap = await conn.fetchval(sql) for entry in json.loads(typemap): t = schema.get_by_id(uuidgen.UUID(entry['id'])) schema = t.set_field_value(schema, 'backend_id', entry['backend_id']) stdlib = stdlib._replace(stdschema=schema) await _store_static_bin_cache( cluster, 'stdschema', pickle.dumps(schema, protocol=pickle.HIGHEST_PROTOCOL), ) await _store_static_bin_cache( cluster, 'reflschema', pickle.dumps(stdlib.reflschema, protocol=pickle.HIGHEST_PROTOCOL), ) await _store_static_bin_cache( cluster, 'global_schema', pickle.dumps(stdlib.global_schema, protocol=pickle.HIGHEST_PROTOCOL), ) await _store_static_bin_cache( cluster, 'classlayout', pickle.dumps(stdlib.classlayout, protocol=pickle.HIGHEST_PROTOCOL), ) await _store_static_text_cache( cluster, 'local_intro_query', stdlib.local_intro_query, ) await _store_static_text_cache( cluster, 'global_intro_query', stdlib.global_intro_query, ) await metaschema.generate_support_views(conn, stdlib.reflschema) await metaschema.generate_support_functions(conn, stdlib.reflschema) compiler = edbcompiler.new_compiler( std_schema=schema, reflection_schema=stdlib.reflschema, schema_class_layout=stdlib.classlayout, ) await metaschema.generate_more_support_functions(conn, compiler, stdlib.reflschema, testmode) return stdlib, compiler
async def _make_stdlib(testmode: bool, global_ids) -> StdlibBits: schema = s_schema.ChainedSchema( s_schema.FlatSchema(), s_schema.FlatSchema(), s_schema.FlatSchema(), ) schema, _ = s_mod.Module.create_in_schema( schema, name=sn.UnqualName('__derived__'), ) current_block = dbops.PLTopBlock() std_texts = [] for modname in s_schema.STD_SOURCES: std_texts.append(s_std.get_std_module_text(modname)) if testmode: std_texts.append(s_std.get_std_module_text(sn.UnqualName('_testmode'))) ddl_text = '\n'.join(std_texts) types: Set[uuid.UUID] = set() std_plans: List[sd.Command] = [] for ddl_cmd in edgeql.parse_block(ddl_text): assert isinstance(ddl_cmd, qlast.DDLCommand) delta_command = s_ddl.delta_from_ddl(ddl_cmd, modaliases={}, schema=schema, stdmode=True) if debug.flags.delta_plan_input: debug.header('Delta Plan Input') debug.dump(delta_command) # Apply and adapt delta, build native delta plan, which # will also update the schema. schema, plan = _process_delta(delta_command, schema) std_plans.append(delta_command) types.update(plan.new_types) plan.generate(current_block) _, schema_version = s_std.make_schema_version(schema) schema, plan = _process_delta(schema_version, schema) std_plans.append(schema_version) plan.generate(current_block) stdglobals = '\n'.join([ f'''CREATE SUPERUSER ROLE {edbdef.EDGEDB_SUPERUSER} {{ SET id := <uuid>'{global_ids[edbdef.EDGEDB_SUPERUSER]}' }};''', ]) schema = await _execute_edgeql_ddl(schema, stdglobals) _, global_schema_version = s_std.make_global_schema_version(schema) schema, plan = _process_delta(global_schema_version, schema) std_plans.append(global_schema_version) plan.generate(current_block) reflection = s_refl.generate_structure(schema) reflschema, reflplan = _process_delta(reflection.intro_schema_delta, schema) assert current_block is not None reflplan.generate(current_block) subblock = current_block.add_block() compiler = edbcompiler.new_compiler( std_schema=schema.get_top_schema(), reflection_schema=reflschema.get_top_schema(), schema_class_layout=reflection.class_layout, # type: ignore ) compilerctx = edbcompiler.new_compiler_context( user_schema=reflschema.get_top_schema(), global_schema=schema.get_global_schema(), bootstrap_mode=True, ) for std_plan in std_plans: compiler._compile_schema_storage_in_delta( ctx=compilerctx, delta=std_plan, block=subblock, ) compilerctx = edbcompiler.new_compiler_context( user_schema=reflschema.get_top_schema(), global_schema=schema.get_global_schema(), bootstrap_mode=True, internal_schema_mode=True, ) compiler._compile_schema_storage_in_delta( ctx=compilerctx, delta=reflection.intro_schema_delta, block=subblock, ) sqltext = current_block.to_string() compilerctx = edbcompiler.new_compiler_context( user_schema=reflschema.get_top_schema(), global_schema=schema.get_global_schema(), schema_reflection_mode=True, output_format=edbcompiler.IoFormat.JSON_ELEMENTS, ) # The introspection query bits are returned in chunks # because it's a large UNION and we currently generate SQL # that is much harder for Posgres to plan as opposed to a # straight flat UNION. sql_intro_local_parts = [] sql_intro_global_parts = [] for intropart in reflection.local_intro_parts: sql_intro_local_parts.append( compile_single_query( intropart, compiler=compiler, compilerctx=compilerctx, ), ) for intropart in reflection.global_intro_parts: sql_intro_global_parts.append( compile_single_query( intropart, compiler=compiler, compilerctx=compilerctx, ), ) local_intro_sql = ' UNION ALL '.join(sql_intro_local_parts) local_intro_sql = f''' WITH intro(c) AS ({local_intro_sql}) SELECT json_agg(intro.c) FROM intro ''' global_intro_sql = ' UNION ALL '.join(sql_intro_global_parts) global_intro_sql = f''' WITH intro(c) AS ({global_intro_sql}) SELECT json_agg(intro.c) FROM intro ''' return StdlibBits( stdschema=schema.get_top_schema(), reflschema=reflschema.get_top_schema(), global_schema=schema.get_global_schema(), sqltext=sqltext, types=types, classlayout=reflection.class_layout, local_intro_query=local_intro_sql, global_intro_query=global_intro_sql, )
async def _init_stdlib(cluster, conn, testmode, global_ids): in_dev_mode = devmode.is_in_dev_mode() specified_cache_dir = os.environ.get('_EDGEDB_WRITE_DATA_CACHE_TO') if specified_cache_dir: cache_dir = pathlib.Path(specified_cache_dir) else: cache_dir = None stdlib_cache = 'backend-stdlib.pickle' tpldbdump_cache = 'backend-tpldbdump.sql' src_hash = buildmeta.hash_dirs(CACHE_SRC_DIRS) stdlib = buildmeta.read_data_cache(src_hash, stdlib_cache, source_dir=cache_dir) tpldbdump = buildmeta.read_data_cache(src_hash, tpldbdump_cache, source_dir=cache_dir, pickled=False) if stdlib is None: stdlib = await _make_stdlib(in_dev_mode or testmode, global_ids) cache_hit = False else: cache_hit = True if tpldbdump is None: await _ensure_meta_schema(conn) await _execute_ddl(conn, stdlib.sqltext) if in_dev_mode or specified_cache_dir: tpldbdump = cluster.dump_database(edbdef.EDGEDB_TEMPLATE_DB, exclude_schema='edgedbinstdata') buildmeta.write_data_cache( tpldbdump, src_hash, tpldbdump_cache, pickled=False, target_dir=cache_dir, ) else: cluster.restore_database(edbdef.EDGEDB_TEMPLATE_DB, tpldbdump) # When we restore a database from a dump, OIDs for non-system # Postgres types might get skewed as they are not part of the dump. # A good example of that is `std::bigint` which is implemented as # a custom domain type. The OIDs are stored under # `schema::Object.backend_id` property and are injected into # array query arguments. # # The code below re-syncs backend_id properties of EdgeDB builtin # types with the actual OIDs in the DB. compiler = edbcompiler.new_compiler( std_schema=stdlib.stdschema, reflection_schema=stdlib.reflschema, schema_class_layout=stdlib.classlayout, bootstrap_mode=True, ) _, sql = compile_bootstrap_script( compiler, stdlib.reflschema, ''' UPDATE schema::ScalarType FILTER .builtin AND NOT .is_abstract SET { backend_id := sys::_get_pg_type_for_scalar_type(.id) } ''', expected_cardinality_one=False, single_statement=True, ) await conn.execute(sql) if not in_dev_mode and testmode: # Running tests on a production build. stdlib, testmode_sql = await _amend_stdlib( s_std.get_std_module_text('_testmode'), stdlib, ) await conn.execute(testmode_sql) await metaschema.generate_support_views( cluster, conn, stdlib.reflschema, ) # Make sure that schema backend_id properties are in sync with # the database. compiler = edbcompiler.new_compiler( std_schema=stdlib.stdschema, reflection_schema=stdlib.reflschema, schema_class_layout=stdlib.classlayout, bootstrap_mode=True, ) _, sql = compile_bootstrap_script( compiler, stdlib.reflschema, ''' SELECT schema::ScalarType { id, backend_id, } FILTER .builtin AND NOT .is_abstract; ''', expected_cardinality_one=False, single_statement=True, ) schema = stdlib.stdschema typemap = await conn.fetchval(sql) for entry in json.loads(typemap): t = schema.get_by_id(uuidgen.UUID(entry['id'])) schema = t.set_field_value(schema, 'backend_id', entry['backend_id']) stdlib = stdlib._replace(stdschema=schema) if not cache_hit and (in_dev_mode or specified_cache_dir): buildmeta.write_data_cache( stdlib, src_hash, stdlib_cache, target_dir=cache_dir, ) await _store_static_bin_cache( cluster, 'stdschema', pickle.dumps(schema, protocol=pickle.HIGHEST_PROTOCOL), ) await _store_static_bin_cache( cluster, 'reflschema', pickle.dumps(stdlib.reflschema, protocol=pickle.HIGHEST_PROTOCOL), ) await _store_static_bin_cache( cluster, 'classlayout', pickle.dumps(stdlib.classlayout, protocol=pickle.HIGHEST_PROTOCOL), ) await _store_static_json_cache( cluster, 'introquery', json.dumps(stdlib.introquery), ) await metaschema.generate_support_views(cluster, conn, stdlib.reflschema) await metaschema.generate_support_functions(conn, stdlib.reflschema) compiler = edbcompiler.new_compiler( std_schema=schema, reflection_schema=stdlib.reflschema, schema_class_layout=stdlib.classlayout, bootstrap_mode=True, ) return schema, stdlib.reflschema, compiler
async def _make_stdlib(testmode: bool, global_ids) -> StdlibBits: schema = s_schema.Schema() schema, _ = s_mod.Module.create_in_schema(schema, name='__derived__') current_block = dbops.PLTopBlock() std_texts = [] for modname in s_schema.STD_LIB + ('stdgraphql', ): std_texts.append(s_std.get_std_module_text(modname)) if testmode: std_texts.append(s_std.get_std_module_text('_testmode')) ddl_text = '\n'.join(std_texts) types: Set[uuid.UUID] = set() std_plans: List[sd.Command] = [] for ddl_cmd in edgeql.parse_block(ddl_text): delta_command = s_ddl.delta_from_ddl(ddl_cmd, modaliases={}, schema=schema, stdmode=True) if debug.flags.delta_plan_input: debug.header('Delta Plan Input') debug.dump(delta_command) # Apply and adapt delta, build native delta plan, which # will also update the schema. schema, plan = _process_delta(delta_command, schema) std_plans.append(delta_command) types.update(plan.new_types) plan.generate(current_block) stdglobals = '\n'.join([ f'''CREATE SUPERUSER ROLE {edbdef.EDGEDB_SUPERUSER} {{ SET id := <uuid>'{global_ids[edbdef.EDGEDB_SUPERUSER]}' }};''', f'''CREATE DATABASE {edbdef.EDGEDB_TEMPLATE_DB} {{ SET id := <uuid>'{global_ids[edbdef.EDGEDB_TEMPLATE_DB]}' }};''', f'CREATE DATABASE {edbdef.EDGEDB_SUPERUSER_DB};', ]) context = sd.CommandContext(stdmode=True) for ddl_cmd in edgeql.parse_block(stdglobals): delta_command = s_ddl.delta_from_ddl(ddl_cmd, modaliases={}, schema=schema, stdmode=True) schema = delta_command.apply(schema, context) refldelta, classlayout, introparts = s_refl.generate_structure(schema) reflschema, reflplan = _process_delta(refldelta, schema) std_plans.append(refldelta) assert current_block is not None reflplan.generate(current_block) subblock = current_block.add_block() compiler = edbcompiler.new_compiler( std_schema=schema, reflection_schema=reflschema, schema_class_layout=classlayout, bootstrap_mode=True, ) compilerctx = edbcompiler.new_compiler_context(reflschema) for std_plan in std_plans: compiler._compile_schema_storage_in_delta( ctx=compilerctx, delta=std_plan, block=subblock, is_internal_reflection=std_plan is refldelta, stdmode=True, ) sqltext = current_block.to_string() compilerctx = edbcompiler.new_compiler_context( reflschema, schema_reflection_mode=True, output_format=edbcompiler.IoFormat.JSON_ELEMENTS, ) # The introspection query bits are returned in chunks # because it's a large UNION and we currently generate SQL # that is much harder for Posgres to plan as opposed to a # straight flat UNION. sql_introparts = [] for intropart in introparts: introtokens = tokenizer.tokenize(intropart.encode()) units = compiler._compile(ctx=compilerctx, tokens=introtokens) assert len(units) == 1 and len(units[0].sql) == 1 sql_intropart = units[0].sql[0].decode() sql_introparts.append(sql_intropart) introsql = ' UNION ALL '.join(sql_introparts) return StdlibBits( stdschema=schema, reflschema=reflschema, sqltext=sqltext, types=types, classlayout=classlayout, introquery=introsql, )
async def _init_stdlib( ctx: BootstrapContext, testmode: bool, global_ids: Mapping[str, uuid.UUID], ) -> Tuple[StdlibBits, config.Spec, edbcompiler.Compiler]: in_dev_mode = devmode.is_in_dev_mode() conn = ctx.conn cluster = ctx.cluster specified_cache_dir = os.environ.get('_EDGEDB_WRITE_DATA_CACHE_TO') if not specified_cache_dir: cache_dir = None else: cache_dir = pathlib.Path(specified_cache_dir) stdlib_cache = f'backend-stdlib.pickle' tpldbdump_cache = f'backend-tpldbdump.sql' src_hash = buildmeta.hash_dirs( buildmeta.get_cache_src_dirs(), extra_files=[__file__], ) stdlib = buildmeta.read_data_cache( src_hash, stdlib_cache, source_dir=cache_dir) tpldbdump = buildmeta.read_data_cache( src_hash, tpldbdump_cache, source_dir=cache_dir, pickled=False) if stdlib is None: logger.info('Compiling the standard library...') stdlib = await _make_stdlib(ctx, in_dev_mode or testmode, global_ids) logger.info('Creating the necessary PostgreSQL extensions...') await metaschema.create_pg_extensions(conn) config_spec = config.load_spec_from_schema(stdlib.stdschema) config.set_settings(config_spec) if tpldbdump is None: logger.info('Populating internal SQL structures...') await metaschema.bootstrap(conn, config_spec) logger.info('Executing the standard library...') await _execute_ddl(conn, stdlib.sqltext) if in_dev_mode or specified_cache_dir: tpl_db_name = edbdef.EDGEDB_TEMPLATE_DB tpl_pg_db_name = cluster.get_db_name(tpl_db_name) tpl_pg_db_name_dyn = ( f"edgedb.get_database_backend_name({ql(tpl_db_name)})") tpldbdump = await cluster.dump_database( tpl_pg_db_name, exclude_schemas=['edgedbinstdata', 'edgedbext'], dump_object_owners=False, ) # Excluding the "edgedbext" schema above apparently # doesn't apply to extensions created in that schema, # so we have to resort to commenting out extension # statements in the dump. tpldbdump = re.sub( rb'^(CREATE|COMMENT ON) EXTENSION.*$', rb'-- \g<0>', tpldbdump, flags=re.MULTILINE, ) global_metadata = await conn.fetchval( f'SELECT edgedb.get_database_metadata({ql(tpl_db_name)})', ) global_metadata = json.loads(global_metadata) pl_block = dbops.PLTopBlock() set_metadata_text = dbops.SetMetadata( dbops.Database(name='__dummy_placeholder_database__'), global_metadata, ).code(pl_block) set_metadata_text = set_metadata_text.replace( '__dummy_placeholder_database__', f"' || quote_ident({tpl_pg_db_name_dyn}) || '", ) set_single_db_metadata_text = dbops.SetSingleDBMetadata( edbdef.EDGEDB_TEMPLATE_DB, global_metadata ).code(pl_block) pl_block.add_command(textwrap.dedent(f"""\ IF (edgedb.get_backend_capabilities() & {int(params.BackendCapabilities.CREATE_DATABASE)}) != 0 THEN {textwrap.indent(set_metadata_text, ' ')} ELSE {textwrap.indent(set_single_db_metadata_text, ' ')} END IF """)) text = pl_block.to_string() tpldbdump += b'\n' + text.encode('utf-8') buildmeta.write_data_cache( tpldbdump, src_hash, tpldbdump_cache, pickled=False, target_dir=cache_dir, ) buildmeta.write_data_cache( stdlib, src_hash, stdlib_cache, target_dir=cache_dir, ) else: logger.info('Initializing the standard library...') await metaschema._execute_sql_script(conn, tpldbdump.decode('utf-8')) # Restore the search_path as the dump might have altered it. await conn.execute( "SELECT pg_catalog.set_config('search_path', 'edgedb', false)") if not in_dev_mode and testmode: # Running tests on a production build. stdlib, testmode_sql = await _amend_stdlib( ctx, s_std.get_std_module_text(sn.UnqualName('_testmode')), stdlib, ) await conn.execute(testmode_sql) # _testmode includes extra config settings, so make sure # those are picked up. config_spec = config.load_spec_from_schema(stdlib.stdschema) config.set_settings(config_spec) # Make sure that schema backend_id properties are in sync with # the database. compiler = edbcompiler.new_compiler( std_schema=stdlib.stdschema, reflection_schema=stdlib.reflschema, schema_class_layout=stdlib.classlayout, ) _, sql = compile_bootstrap_script( compiler, stdlib.reflschema, ''' SELECT schema::ScalarType { id, backend_id, } FILTER .builtin AND NOT (.abstract ?? False); ''', expected_cardinality_one=False, single_statement=True, ) schema = stdlib.stdschema typemap = await conn.fetchval(sql) for entry in json.loads(typemap): t = schema.get_by_id(uuidgen.UUID(entry['id'])) schema = t.set_field_value( schema, 'backend_id', entry['backend_id']) stdlib = stdlib._replace(stdschema=schema) await _store_static_bin_cache( ctx, 'stdschema', pickle.dumps(schema, protocol=pickle.HIGHEST_PROTOCOL), ) await _store_static_bin_cache( ctx, 'reflschema', pickle.dumps(stdlib.reflschema, protocol=pickle.HIGHEST_PROTOCOL), ) await _store_static_bin_cache( ctx, 'global_schema', pickle.dumps(stdlib.global_schema, protocol=pickle.HIGHEST_PROTOCOL), ) await _store_static_bin_cache( ctx, 'classlayout', pickle.dumps(stdlib.classlayout, protocol=pickle.HIGHEST_PROTOCOL), ) await _store_static_text_cache( ctx, 'local_intro_query', stdlib.local_intro_query, ) await _store_static_text_cache( ctx, 'global_intro_query', stdlib.global_intro_query, ) await metaschema.generate_support_views( conn, stdlib.reflschema, cluster.get_runtime_params() ) await metaschema.generate_support_functions(conn, stdlib.reflschema) compiler = edbcompiler.new_compiler( std_schema=schema, reflection_schema=stdlib.reflschema, schema_class_layout=stdlib.classlayout, ) await metaschema.generate_more_support_functions( conn, compiler, stdlib.reflschema, testmode) if tpldbdump is not None: # When we restore a database from a dump, OIDs for non-system # Postgres types might get skewed as they are not part of the dump. # A good example of that is `std::bigint` which is implemented as # a custom domain type. The OIDs are stored under # `schema::Object.backend_id` property and are injected into # array query arguments. # # The code below re-syncs backend_id properties of EdgeDB builtin # types with the actual OIDs in the DB. compiler = edbcompiler.new_compiler( std_schema=stdlib.stdschema, reflection_schema=stdlib.reflschema, schema_class_layout=stdlib.classlayout, ) _, sql = compile_bootstrap_script( compiler, stdlib.reflschema, ''' UPDATE schema::Type FILTER .builtin AND NOT (.abstract ?? False) AND schema::Type IS schema::ScalarType | schema::Tuple SET { backend_id := sys::_get_pg_type_for_edgedb_type( .id, <uuid>{} ) } ''', expected_cardinality_one=False, single_statement=True, ) await conn.execute(sql) _, sql = compile_bootstrap_script( compiler, stdlib.reflschema, ''' UPDATE schema::Array FILTER .builtin AND NOT (.abstract ?? False) SET { backend_id := sys::_get_pg_type_for_edgedb_type( .id, .element_type.id, ) } ''', expected_cardinality_one=False, single_statement=True, ) await conn.execute(sql) await _store_static_json_cache( ctx, 'configspec', config.spec_to_json(config_spec), ) return stdlib, config_spec, compiler