Esempio n. 1
0
async def _create_edgedb_template_database(
    ctx: BootstrapContext,
) -> uuid.UUID:
    backend_params = ctx.cluster.get_runtime_params()
    have_c_utf8 = backend_params.has_c_utf8_locale

    logger.info('Creating template database...')
    block = dbops.SQLBlock()
    dbid = uuidgen.uuid1mc()
    db = dbops.Database(
        ctx.cluster.get_db_name(edbdef.EDGEDB_TEMPLATE_DB),
        owner=ctx.cluster.get_role_name(edbdef.EDGEDB_SUPERUSER),
        is_template=True,
        lc_collate='C',
        lc_ctype='C.UTF-8' if have_c_utf8 else 'en_US.UTF-8',
        encoding='UTF8',
        metadata=dict(
            id=str(dbid),
            tenant_id=backend_params.tenant_id,
            name=edbdef.EDGEDB_TEMPLATE_DB,
            builtin=True,
        ),
    )

    dbops.CreateDatabase(db, template='template0').generate(block)
    await _execute_block(ctx.conn, block)
    return dbid
Esempio n. 2
0
async def _create_edgedb_template_database(cluster, conn):
    instance_params = cluster.get_runtime_params().instance_params
    capabilities = instance_params.capabilities
    have_c_utf8 = (capabilities & pgcluster.BackendCapabilities.C_UTF8_LOCALE)

    logger.info('Creating template database...')
    block = dbops.SQLBlock()
    dbid = uuidgen.uuid1mc()
    db = dbops.Database(
        edbdef.EDGEDB_TEMPLATE_DB,
        owner=edbdef.EDGEDB_SUPERUSER,
        is_template=True,
        template='template0',
        lc_collate='C',
        lc_ctype='C.UTF-8' if have_c_utf8 else 'en_US.UTF-8',
        encoding='UTF8',
        metadata=dict(
            id=str(dbid),
            builtin=True,
        ),
    )

    dbops.CreateDatabase(db).generate(block)
    await _execute_block(conn, block)
    return dbid
Esempio n. 3
0
async def _ensure_edgedb_database(
    conn,
    database,
    owner,
    *,
    cluster,
    builtin: bool = False,
    objid: Optional[uuid.UUID] = None,
):
    result = await _get_db_info(conn, database)
    if not result:
        logger.info(f'Creating database: ' f'{database}')

        block = dbops.SQLBlock()
        if objid is None:
            objid = uuidgen.uuid1mc()
        db = dbops.Database(
            database,
            owner=owner,
            metadata=dict(
                id=str(objid),
                builtin=builtin,
            ),
        )
        dbops.CreateDatabase(db).generate(block)
        await _execute_block(conn, block)
Esempio n. 4
0
async def _create_edgedb_database(
    ctx: BootstrapContext,
    database: str,
    owner: str,
    *,
    builtin: bool = False,
    objid: Optional[uuid.UUID] = None,
) -> uuid.UUID:
    logger.info(f'Creating database: {database}')
    block = dbops.SQLBlock()
    if objid is None:
        objid = uuidgen.uuid1mc()
    instance_params = ctx.cluster.get_runtime_params().instance_params
    db = dbops.Database(
        ctx.cluster.get_db_name(database),
        owner=ctx.cluster.get_role_name(owner),
        metadata=dict(
            id=str(objid),
            tenant_id=instance_params.tenant_id,
            name=database,
            builtin=builtin,
        ),
    )
    tpl_db = ctx.cluster.get_db_name(edbdef.EDGEDB_TEMPLATE_DB)
    dbops.CreateDatabase(db, template=tpl_db).generate(block)
    await _execute_block(ctx.conn, block)
    return objid
Esempio n. 5
0
async def _ensure_edgedb_database(conn, database, owner, *, cluster):
    result = await _get_db_info(conn, database)
    if not result:
        logger.info(
            f'Creating database: '
            f'{database}')

        block = dbops.SQLBlock()
        db = dbops.Database(database, owner=owner)
        dbops.CreateDatabase(db).generate(block)
        await _execute_block(conn, block)
Esempio n. 6
0
async def _ensure_edgedb_template_database(cluster, conn):
    result = await _get_db_info(conn, edbdef.EDGEDB_TEMPLATE_DB)

    if not result:
        logger.info('Creating template database...')
        block = dbops.SQLBlock()
        dbid = uuidgen.uuid1mc()
        db = dbops.Database(
            edbdef.EDGEDB_TEMPLATE_DB,
            owner=edbdef.EDGEDB_SUPERUSER,
            is_template=True,
            template='template0',
            lc_collate='C',
            lc_ctype=('C.UTF-8' if cluster.supports_c_utf8_locale()
                      else 'en_US.UTF-8'),
            encoding='UTF8',
            metadata=dict(
                id=str(dbid),
                builtin=True,
            ),
        )
        dbops.CreateDatabase(db).generate(block)
        await _execute_block(conn, block)

        return dbid
    else:
        alter = []
        alter_owner = False

        if not result['datistemplate']:
            alter.append('IS_TEMPLATE = true')

        if result['rolname'] != edbdef.EDGEDB_SUPERUSER:
            alter_owner = True

        if alter or alter_owner:
            logger.info('Altering template database parameters...')
            if alter:
                await _execute(
                    conn,
                    'ALTER DATABASE {} WITH {}'.format(
                        edbdef.EDGEDB_TEMPLATE_DB,
                        ' '.join(alter)))

            if alter_owner:
                await _execute(
                    conn,
                    'ALTER DATABASE {} OWNER TO {}'.format(
                        edbdef.EDGEDB_TEMPLATE_DB,
                        edbdef.EDGEDB_SUPERUSER))

        return None
Esempio n. 7
0
async def _configure(schema, conn, cluster, *, insecure=False, testmode=False):
    scripts = []

    if not testmode:
        memory_kb = psutil.virtual_memory().total // 1024
        settings = {
            'shared_buffers': f'"{int(memory_kb * 0.2)}kB"',
            'effective_cache_size': f'"{int(memory_kb * 0.5)}kB"',
            'query_work_mem': f'"{6 * (2 ** 10)}kB"',
        }

        for setting, value in settings.items():
            scripts.append(f'''
                CONFIGURE SYSTEM SET {setting} := {value};
            ''')
    else:
        settings = {}

    if insecure:
        scripts.append('''
            CONFIGURE SYSTEM INSERT Auth {
                priority := 0,
                method := (INSERT Trust),
            };
        ''')

    config_spec = config.get_settings()

    for script in scripts:
        _, sql = compiler.compile_bootstrap_script(
            schema, schema, script, single_statement=True)

        if debug.flags.bootstrap:
            debug.header('Bootstrap')
            debug.dump_code(sql, lexer='sql')

        config_op_data = await conn.fetchval(sql)
        if config_op_data is not None and isinstance(config_op_data, str):
            config_op = config.Operation.from_json(config_op_data)
            settings = config_op.apply(config_spec, immutables.Map())

    config_json = config.to_json(config_spec, settings)
    block = dbops.PLTopBlock()
    dbops.UpdateMetadata(
        dbops.Database(name=edbdef.EDGEDB_TEMPLATE_DB),
        {'sysconfig': json.loads(config_json)},
    ).generate(block)

    await _execute_block(conn, block)
Esempio n. 8
0
async def _configure(
    schema: s_schema.Schema,
    compiler: edbcompiler.Compiler,
    conn: asyncpg_con.Connection,
    cluster: pgcluster.BaseCluster,
    *,
    insecure: bool = False,
) -> None:
    config_spec = config.get_settings()

    scripts = []
    settings: Mapping[str, config.SettingValue] = {}

    if insecure:
        scripts.append('''
            CONFIGURE SYSTEM INSERT Auth {
                priority := 0,
                method := (INSERT Trust),
            };
        ''')

    for script in scripts:
        _, sql = compile_bootstrap_script(
            compiler,
            schema,
            script,
            single_statement=True,
        )

        if debug.flags.bootstrap:
            debug.header('Bootstrap')
            debug.dump_code(sql, lexer='sql')

        config_op_data = await conn.fetchval(sql)
        if config_op_data is not None and isinstance(config_op_data, str):
            config_op = config.Operation.from_json(config_op_data)
            settings = config_op.apply(config_spec, immutables.Map())

    config_json = config.to_json(config_spec, settings, include_source=False)
    block = dbops.PLTopBlock()
    dbops.UpdateMetadata(
        dbops.Database(name=edbdef.EDGEDB_TEMPLATE_DB),
        {
            'sysconfig': json.loads(config_json)
        },
    ).generate(block)

    await _execute_block(conn, block)
Esempio n. 9
0
async def _configure(
    ctx: BootstrapContext,
    config_spec: config.Spec,
    schema: s_schema.Schema,
    compiler: edbcompiler.Compiler,
) -> None:
    settings: Mapping[str, config.SettingValue] = {}

    config_json = config.to_json(config_spec, settings, include_source=False)
    block = dbops.PLTopBlock()
    metadata = {'sysconfig': json.loads(config_json)}
    if ctx.cluster.get_runtime_params().has_create_database:
        dbops.UpdateMetadata(
            dbops.Database(
                name=ctx.cluster.get_db_name(edbdef.EDGEDB_SYSTEM_DB)
            ),
            metadata,
        ).generate(block)
    else:
        dbops.UpdateSingleDBMetadata(
            edbdef.EDGEDB_SYSTEM_DB, metadata,
        ).generate(block)

    await _execute_block(ctx.conn, block)

    backend_params = ctx.cluster.get_runtime_params()
    for setname in config_spec:
        setting = config_spec[setname]
        if (
            setting.backend_setting
            and setting.default is not None
            and (
                # Do not attempt to run CONFIGURE INSTANCE on
                # backends that don't support it.
                # TODO: this should be replaced by instance-wide
                #       emulation at backend connection time.
                backend_params.has_configfile_access
            )
        ):
            if isinstance(setting.default, statypes.Duration):
                val = f'<std::duration>"{setting.default.to_iso8601()}"'
            else:
                val = repr(setting.default)
            script = f'''
                CONFIGURE INSTANCE SET {setting.name} := {val};
            '''
            schema, sql = compile_bootstrap_script(compiler, schema, script)
            await _execute_ddl(ctx.conn, sql)
Esempio n. 10
0
async def _set_edgedb_database_metadata(
    ctx: BootstrapContext,
    database: str,
    *,
    objid: Optional[uuid.UUID] = None,
) -> uuid.UUID:
    logger.info(f'Configuring database: {database}')
    block = dbops.SQLBlock()
    if objid is None:
        objid = uuidgen.uuid1mc()
    instance_params = ctx.cluster.get_runtime_params().instance_params
    db = dbops.Database(ctx.cluster.get_db_name(database))
    metadata = dict(
        id=str(objid),
        tenant_id=instance_params.tenant_id,
        name=database,
        builtin=False,
    )
    dbops.SetMetadata(db, metadata).generate(block)
    await _execute_block(ctx.conn, block)
    return objid
Esempio n. 11
0
async def _create_edgedb_database(
    conn,
    database,
    owner,
    *,
    builtin: bool = False,
    objid: Optional[uuid.UUID] = None,
) -> uuid.UUID:
    logger.info(f'Creating database: {database}')
    block = dbops.SQLBlock()
    if objid is None:
        objid = uuidgen.uuid1mc()
    db = dbops.Database(
        database,
        owner=owner,
        metadata=dict(
            id=str(objid),
            builtin=builtin,
        ),
    )
    dbops.CreateDatabase(db).generate(block)
    await _execute_block(conn, block)
    return objid
Esempio n. 12
0
async def _ensure_edgedb_database(conn, database, owner, *, cluster):
    result = await _get_db_info(conn, database)
    if not result:
        logger.info(f'Creating database: ' f'{database}')

        block = dbops.SQLBlock()
        db = dbops.Database(database, owner=owner)
        dbops.CreateDatabase(db).generate(block)
        await _execute_block(conn, block)

        if owner != edgedb_defines.EDGEDB_SUPERUSER:
            block = dbops.SQLBlock()
            reassign = dbops.ReassignOwned(edgedb_defines.EDGEDB_SUPERUSER,
                                           owner)
            reassign.generate(block)

            dbconn = await cluster.connect(
                database=database, user=edgedb_defines.EDGEDB_SUPERUSER)

            try:
                await _execute_block(dbconn, block)
            finally:
                await dbconn.close()
Esempio n. 13
0
async def _init_stdlib(cluster, conn, testmode, global_ids):
    in_dev_mode = devmode.is_in_dev_mode()

    specified_cache_dir = os.environ.get('_EDGEDB_WRITE_DATA_CACHE_TO')
    if specified_cache_dir:
        cache_dir = pathlib.Path(specified_cache_dir)
    else:
        cache_dir = None

    stdlib_cache = 'backend-stdlib.pickle'
    tpldbdump_cache = 'backend-tpldbdump.sql'
    src_hash = buildmeta.hash_dirs(
        buildmeta.get_cache_src_dirs(),
        extra_files=[__file__],
    )

    stdlib = buildmeta.read_data_cache(src_hash,
                                       stdlib_cache,
                                       source_dir=cache_dir)
    tpldbdump = buildmeta.read_data_cache(src_hash,
                                          tpldbdump_cache,
                                          source_dir=cache_dir,
                                          pickled=False)

    if stdlib is None:
        logger.info('Compiling the standard library...')
        stdlib = await _make_stdlib(in_dev_mode or testmode, global_ids)

    logger.info('Creating the necessary PostgreSQL extensions...')
    await metaschema.create_pg_extensions(conn)

    if tpldbdump is None:
        logger.info('Populating internal SQL structures...')
        await metaschema.bootstrap(conn)
        logger.info('Executing the standard library...')
        await _execute_ddl(conn, stdlib.sqltext)

        if in_dev_mode or specified_cache_dir:
            tpldbdump = cluster.dump_database(
                edbdef.EDGEDB_TEMPLATE_DB,
                exclude_schemas=['edgedbinstdata', 'edgedbext'],
            )

            # Excluding the "edgedbext" schema above apparently
            # doesn't apply to extensions created in that schema,
            # so we have to resort to commenting out extension
            # statements in the dump.
            tpldbdump = re.sub(
                rb'^(CREATE|COMMENT ON) EXTENSION.*$',
                rb'-- \g<0>',
                tpldbdump,
                flags=re.MULTILINE,
            )

            global_metadata = await conn.fetchval(f'''\
                SELECT edgedb.shobj_metadata(
                    (SELECT oid FROM pg_database
                     WHERE datname = {ql(edbdef.EDGEDB_TEMPLATE_DB)}),
                    'pg_database'
                )''')

            pl_block = dbops.PLTopBlock()

            dbops.SetMetadata(
                dbops.Database(name=edbdef.EDGEDB_TEMPLATE_DB),
                json.loads(global_metadata),
            ).generate(pl_block)

            tpldbdump += b'\n' + pl_block.to_string().encode('utf-8')

            buildmeta.write_data_cache(
                tpldbdump,
                src_hash,
                tpldbdump_cache,
                pickled=False,
                target_dir=cache_dir,
            )

            buildmeta.write_data_cache(
                stdlib,
                src_hash,
                stdlib_cache,
                target_dir=cache_dir,
            )
    else:
        logger.info('Initializing the standard library...')
        await metaschema._execute_sql_script(conn, tpldbdump.decode('utf-8'))

        # When we restore a database from a dump, OIDs for non-system
        # Postgres types might get skewed as they are not part of the dump.
        # A good example of that is `std::bigint` which is implemented as
        # a custom domain type. The OIDs are stored under
        # `schema::Object.backend_id` property and are injected into
        # array query arguments.
        #
        # The code below re-syncs backend_id properties of EdgeDB builtin
        # types with the actual OIDs in the DB.

        compiler = edbcompiler.new_compiler(
            std_schema=stdlib.stdschema,
            reflection_schema=stdlib.reflschema,
            schema_class_layout=stdlib.classlayout,
        )
        _, sql = compile_bootstrap_script(
            compiler,
            stdlib.reflschema,
            '''
            UPDATE schema::ScalarType
            FILTER .builtin AND NOT (.abstract ?? False)
            SET {
                backend_id := sys::_get_pg_type_for_scalar_type(.id)
            }
            ''',
            expected_cardinality_one=False,
            single_statement=True,
        )
        await conn.execute(sql)

    if not in_dev_mode and testmode:
        # Running tests on a production build.
        stdlib, testmode_sql = await _amend_stdlib(
            s_std.get_std_module_text('_testmode'),
            stdlib,
        )
        await conn.execute(testmode_sql)
        await metaschema.generate_support_views(
            conn,
            stdlib.reflschema,
        )

    # Make sure that schema backend_id properties are in sync with
    # the database.

    compiler = edbcompiler.new_compiler(
        std_schema=stdlib.stdschema,
        reflection_schema=stdlib.reflschema,
        schema_class_layout=stdlib.classlayout,
    )
    _, sql = compile_bootstrap_script(
        compiler,
        stdlib.reflschema,
        '''
        SELECT schema::ScalarType {
            id,
            backend_id,
        } FILTER .builtin AND NOT (.abstract ?? False);
        ''',
        expected_cardinality_one=False,
        single_statement=True,
    )
    schema = stdlib.stdschema
    typemap = await conn.fetchval(sql)
    for entry in json.loads(typemap):
        t = schema.get_by_id(uuidgen.UUID(entry['id']))
        schema = t.set_field_value(schema, 'backend_id', entry['backend_id'])

    stdlib = stdlib._replace(stdschema=schema)

    await _store_static_bin_cache(
        cluster,
        'stdschema',
        pickle.dumps(schema, protocol=pickle.HIGHEST_PROTOCOL),
    )

    await _store_static_bin_cache(
        cluster,
        'reflschema',
        pickle.dumps(stdlib.reflschema, protocol=pickle.HIGHEST_PROTOCOL),
    )

    await _store_static_bin_cache(
        cluster,
        'global_schema',
        pickle.dumps(stdlib.global_schema, protocol=pickle.HIGHEST_PROTOCOL),
    )

    await _store_static_bin_cache(
        cluster,
        'classlayout',
        pickle.dumps(stdlib.classlayout, protocol=pickle.HIGHEST_PROTOCOL),
    )

    await _store_static_text_cache(
        cluster,
        'local_intro_query',
        stdlib.local_intro_query,
    )

    await _store_static_text_cache(
        cluster,
        'global_intro_query',
        stdlib.global_intro_query,
    )

    await metaschema.generate_support_views(conn, stdlib.reflschema)
    await metaschema.generate_support_functions(conn, stdlib.reflschema)

    compiler = edbcompiler.new_compiler(
        std_schema=schema,
        reflection_schema=stdlib.reflschema,
        schema_class_layout=stdlib.classlayout,
    )

    await metaschema.generate_more_support_functions(conn, compiler,
                                                     stdlib.reflschema,
                                                     testmode)

    return stdlib, compiler
Esempio n. 14
0
async def _init_stdlib(
    ctx: BootstrapContext,
    testmode: bool,
    global_ids: Mapping[str, uuid.UUID],
) -> Tuple[StdlibBits, config.Spec, edbcompiler.Compiler]:
    in_dev_mode = devmode.is_in_dev_mode()
    conn = ctx.conn
    cluster = ctx.cluster

    specified_cache_dir = os.environ.get('_EDGEDB_WRITE_DATA_CACHE_TO')
    if not specified_cache_dir:
        cache_dir = None
    else:
        cache_dir = pathlib.Path(specified_cache_dir)

    stdlib_cache = f'backend-stdlib.pickle'
    tpldbdump_cache = f'backend-tpldbdump.sql'

    src_hash = buildmeta.hash_dirs(
        buildmeta.get_cache_src_dirs(), extra_files=[__file__],
    )

    stdlib = buildmeta.read_data_cache(
        src_hash, stdlib_cache, source_dir=cache_dir)
    tpldbdump = buildmeta.read_data_cache(
        src_hash, tpldbdump_cache, source_dir=cache_dir, pickled=False)

    if stdlib is None:
        logger.info('Compiling the standard library...')
        stdlib = await _make_stdlib(ctx, in_dev_mode or testmode, global_ids)

    logger.info('Creating the necessary PostgreSQL extensions...')
    await metaschema.create_pg_extensions(conn)

    config_spec = config.load_spec_from_schema(stdlib.stdschema)
    config.set_settings(config_spec)

    if tpldbdump is None:
        logger.info('Populating internal SQL structures...')
        await metaschema.bootstrap(conn, config_spec)
        logger.info('Executing the standard library...')
        await _execute_ddl(conn, stdlib.sqltext)

        if in_dev_mode or specified_cache_dir:
            tpl_db_name = edbdef.EDGEDB_TEMPLATE_DB
            tpl_pg_db_name = cluster.get_db_name(tpl_db_name)
            tpl_pg_db_name_dyn = (
                f"edgedb.get_database_backend_name({ql(tpl_db_name)})")
            tpldbdump = await cluster.dump_database(
                tpl_pg_db_name,
                exclude_schemas=['edgedbinstdata', 'edgedbext'],
                dump_object_owners=False,
            )

            # Excluding the "edgedbext" schema above apparently
            # doesn't apply to extensions created in that schema,
            # so we have to resort to commenting out extension
            # statements in the dump.
            tpldbdump = re.sub(
                rb'^(CREATE|COMMENT ON) EXTENSION.*$',
                rb'-- \g<0>',
                tpldbdump,
                flags=re.MULTILINE,
            )

            global_metadata = await conn.fetchval(
                f'SELECT edgedb.get_database_metadata({ql(tpl_db_name)})',
            )
            global_metadata = json.loads(global_metadata)

            pl_block = dbops.PLTopBlock()

            set_metadata_text = dbops.SetMetadata(
                dbops.Database(name='__dummy_placeholder_database__'),
                global_metadata,
            ).code(pl_block)
            set_metadata_text = set_metadata_text.replace(
                '__dummy_placeholder_database__',
                f"' || quote_ident({tpl_pg_db_name_dyn}) || '",
            )

            set_single_db_metadata_text = dbops.SetSingleDBMetadata(
                edbdef.EDGEDB_TEMPLATE_DB, global_metadata
            ).code(pl_block)

            pl_block.add_command(textwrap.dedent(f"""\
                IF (edgedb.get_backend_capabilities()
                    & {int(params.BackendCapabilities.CREATE_DATABASE)}) != 0
                THEN
                {textwrap.indent(set_metadata_text, '    ')}
                ELSE
                {textwrap.indent(set_single_db_metadata_text, '    ')}
                END IF
                """))

            text = pl_block.to_string()

            tpldbdump += b'\n' + text.encode('utf-8')

            buildmeta.write_data_cache(
                tpldbdump,
                src_hash,
                tpldbdump_cache,
                pickled=False,
                target_dir=cache_dir,
            )

            buildmeta.write_data_cache(
                stdlib,
                src_hash,
                stdlib_cache,
                target_dir=cache_dir,
            )
    else:
        logger.info('Initializing the standard library...')
        await metaschema._execute_sql_script(conn, tpldbdump.decode('utf-8'))
        # Restore the search_path as the dump might have altered it.
        await conn.execute(
            "SELECT pg_catalog.set_config('search_path', 'edgedb', false)")

    if not in_dev_mode and testmode:
        # Running tests on a production build.
        stdlib, testmode_sql = await _amend_stdlib(
            ctx,
            s_std.get_std_module_text(sn.UnqualName('_testmode')),
            stdlib,
        )
        await conn.execute(testmode_sql)
        # _testmode includes extra config settings, so make sure
        # those are picked up.
        config_spec = config.load_spec_from_schema(stdlib.stdschema)
        config.set_settings(config_spec)

    # Make sure that schema backend_id properties are in sync with
    # the database.

    compiler = edbcompiler.new_compiler(
        std_schema=stdlib.stdschema,
        reflection_schema=stdlib.reflschema,
        schema_class_layout=stdlib.classlayout,
    )
    _, sql = compile_bootstrap_script(
        compiler,
        stdlib.reflschema,
        '''
        SELECT schema::ScalarType {
            id,
            backend_id,
        } FILTER .builtin AND NOT (.abstract ?? False);
        ''',
        expected_cardinality_one=False,
        single_statement=True,
    )
    schema = stdlib.stdschema
    typemap = await conn.fetchval(sql)
    for entry in json.loads(typemap):
        t = schema.get_by_id(uuidgen.UUID(entry['id']))
        schema = t.set_field_value(
            schema, 'backend_id', entry['backend_id'])

    stdlib = stdlib._replace(stdschema=schema)

    await _store_static_bin_cache(
        ctx,
        'stdschema',
        pickle.dumps(schema, protocol=pickle.HIGHEST_PROTOCOL),
    )

    await _store_static_bin_cache(
        ctx,
        'reflschema',
        pickle.dumps(stdlib.reflschema, protocol=pickle.HIGHEST_PROTOCOL),
    )

    await _store_static_bin_cache(
        ctx,
        'global_schema',
        pickle.dumps(stdlib.global_schema, protocol=pickle.HIGHEST_PROTOCOL),
    )

    await _store_static_bin_cache(
        ctx,
        'classlayout',
        pickle.dumps(stdlib.classlayout, protocol=pickle.HIGHEST_PROTOCOL),
    )

    await _store_static_text_cache(
        ctx,
        'local_intro_query',
        stdlib.local_intro_query,
    )

    await _store_static_text_cache(
        ctx,
        'global_intro_query',
        stdlib.global_intro_query,
    )

    await metaschema.generate_support_views(
        conn, stdlib.reflschema, cluster.get_runtime_params()
    )
    await metaschema.generate_support_functions(conn, stdlib.reflschema)

    compiler = edbcompiler.new_compiler(
        std_schema=schema,
        reflection_schema=stdlib.reflschema,
        schema_class_layout=stdlib.classlayout,
    )

    await metaschema.generate_more_support_functions(
        conn, compiler, stdlib.reflschema, testmode)

    if tpldbdump is not None:
        # When we restore a database from a dump, OIDs for non-system
        # Postgres types might get skewed as they are not part of the dump.
        # A good example of that is `std::bigint` which is implemented as
        # a custom domain type. The OIDs are stored under
        # `schema::Object.backend_id` property and are injected into
        # array query arguments.
        #
        # The code below re-syncs backend_id properties of EdgeDB builtin
        # types with the actual OIDs in the DB.

        compiler = edbcompiler.new_compiler(
            std_schema=stdlib.stdschema,
            reflection_schema=stdlib.reflschema,
            schema_class_layout=stdlib.classlayout,
        )
        _, sql = compile_bootstrap_script(
            compiler,
            stdlib.reflschema,
            '''
            UPDATE schema::Type
            FILTER
                .builtin
                AND NOT (.abstract ?? False)
                AND schema::Type IS schema::ScalarType | schema::Tuple
            SET {
                backend_id := sys::_get_pg_type_for_edgedb_type(
                    .id,
                    <uuid>{}
                )
            }
            ''',
            expected_cardinality_one=False,
            single_statement=True,
        )
        await conn.execute(sql)

        _, sql = compile_bootstrap_script(
            compiler,
            stdlib.reflschema,
            '''
            UPDATE schema::Array
            FILTER
                .builtin
                AND NOT (.abstract ?? False)
            SET {
                backend_id := sys::_get_pg_type_for_edgedb_type(
                    .id,
                    .element_type.id,
                )
            }
            ''',
            expected_cardinality_one=False,
            single_statement=True,
        )
        await conn.execute(sql)

    await _store_static_json_cache(
        ctx,
        'configspec',
        config.spec_to_json(config_spec),
    )

    return stdlib, config_spec, compiler