示例#1
0
async def _bootstrap_config_spec(schema, cluster):
    config_spec = config.load_spec_from_schema(schema)
    config.set_settings(config_spec)

    await _store_static_json_cache(
        cluster,
        'configspec',
        config.spec_to_json(config_spec),
    )
示例#2
0
async def _bootstrap_config_spec(
    ctx: BootstrapContext,
    schema: s_schema.Schema,
) -> None:
    config_spec = config.load_spec_from_schema(schema)
    config.set_settings(config_spec)

    await _store_static_json_cache(
        ctx,
        'configspec',
        config.spec_to_json(config_spec),
    )
示例#3
0
    def __init__(self, connect_args: dict, data_dir: str):
        self._connect_args = connect_args
        self._dbname = None
        self._cached_db = None

        if data_dir is not None:
            self._data_dir = pathlib.Path(data_dir)
            self._std_schema = stdschema.load_std_schema(self._data_dir)
            config_spec = config.load_spec_from_schema(self._std_schema)
            config.set_settings(config_spec)
        else:
            self._data_dir = None
            self._std_schema = None
示例#4
0
async def _init_config(cluster: pgcluster.BaseCluster) -> None:
    conn = await cluster.connect(database=edbdef.EDGEDB_TEMPLATE_DB)
    try:
        await _check_data_dir_compatibility(conn)
        compiler = edbcompiler.Compiler()
        await compiler.initialize_from_pg(conn)
        std_schema = compiler.get_std_schema()
        config_spec = config.load_spec_from_schema(std_schema)

        # Initialize global config
        config.set_settings(config_spec)

    finally:
        await conn.close()
async def _bootstrap_config_spec(schema, cluster):
    config_spec = config.load_spec_from_schema(schema)
    config.set_settings(config_spec)

    data_dir = cluster.get_data_dir()
    spec_fn = os.path.join(data_dir, 'config_spec.json')
    sys_overrides_fn = os.path.join(data_dir, 'config_sys.json')

    with open(spec_fn, 'wt') as f:
        f.write(config.spec_to_json(config_spec))

    if not os.path.exists(sys_overrides_fn):
        with open(sys_overrides_fn, 'wt') as f:
            f.write('{}')
示例#6
0
async def _start(ctx: BootstrapContext) -> None:
    conn = await _check_catalog_compatibility(ctx)

    try:
        compiler = edbcompiler.Compiler()
        await compiler.initialize_from_pg(conn)
        std_schema = compiler.get_std_schema()
        config_spec = config.load_spec_from_schema(std_schema)

        # Initialize global config
        config.set_settings(config_spec)

    finally:
        await conn.close()
示例#7
0
async def _start(ctx: BootstrapContext) -> None:
    conn = await _check_catalog_compatibility(ctx)

    try:
        caps = await conn.fetchval("SELECT edgedb.get_backend_capabilities()")
        ctx.cluster.overwrite_capabilities(caps)
        _check_capabilities(ctx)

        compiler = edbcompiler.Compiler()
        await compiler.initialize_from_pg(conn)
        std_schema = compiler.get_std_schema()
        config_spec = config.load_spec_from_schema(std_schema)

        # Initialize global config
        config.set_settings(config_spec)

    finally:
        await conn.close()
async def bootstrap(cluster, args) -> bool:
    pgconn = await cluster.connect()
    pgconn.add_log_listener(_pg_log_listener)
    std_schema = None

    try:
        await _ensure_edgedb_user(pgconn,
                                  edgedb_defines.EDGEDB_SUPERUSER,
                                  is_superuser=True)
        need_meta_bootstrap = await _ensure_edgedb_template_database(pgconn)

        if need_meta_bootstrap:
            conn = await cluster.connect(
                database=edgedb_defines.EDGEDB_TEMPLATE_DB,
                user=edgedb_defines.EDGEDB_SUPERUSER)

            try:
                conn.add_log_listener(_pg_log_listener)

                await _ensure_meta_schema(conn)

                std_schema = await _init_stdlib(cluster,
                                                conn,
                                                testmode=args['testmode'])
                await _bootstrap_config_spec(std_schema, cluster)
                await _compile_sys_queries(std_schema, cluster)
                await _populate_misc_instance_data(std_schema, cluster)
                schema = await _init_defaults(std_schema, std_schema, conn)
                schema = await _populate_data(std_schema, schema, conn)
                await _configure(std_schema,
                                 conn,
                                 cluster,
                                 insecure=args['insecure'])
            finally:
                await conn.close()
        else:
            std_schema = compiler.load_std_schema(
                pathlib.Path(cluster.get_data_dir()))
            config_spec = config.load_spec_from_schema(std_schema)
            config.set_settings(config_spec)

        await _ensure_edgedb_database(pgconn,
                                      edgedb_defines.EDGEDB_SUPERUSER_DB,
                                      edgedb_defines.EDGEDB_SUPERUSER,
                                      cluster=cluster)

        await _ensure_edgedb_template_not_connectable(pgconn)

        await _ensure_edgedb_user(pgconn,
                                  args['default_database_user'],
                                  is_superuser=True)

        await _ensure_edgedb_database(pgconn,
                                      args['default_database'],
                                      args['default_database_user'],
                                      cluster=cluster)

    finally:
        await pgconn.close()

    return need_meta_bootstrap
示例#9
0
 def tearDown(self):
     config.set_settings(self._cfgspec)
     self._cfgspec = None  # some settings cannot be pickled by runner.py
示例#10
0
 def setUp(self):
     self._cfgspec = config.get_settings()
     config.set_settings(testspec1)
示例#11
0
async def bootstrap(cluster, args) -> bool:
    pgconn = await cluster.connect()
    pgconn.add_log_listener(_pg_log_listener)
    std_schema = None

    try:
        membership = set()
        session_user = cluster.get_connection_params().user
        if session_user != edbdef.EDGEDB_SUPERUSER:
            membership.add(session_user)

        await _ensure_edgedb_role(
            cluster,
            pgconn,
            edbdef.EDGEDB_SUPERUSER,
            membership=membership,
            is_superuser=True,
        )

        if session_user != edbdef.EDGEDB_SUPERUSER:
            await _execute(
                pgconn,
                f'SET ROLE {edbdef.EDGEDB_SUPERUSER};',
            )
            cluster.set_default_session_authorization(edbdef.EDGEDB_SUPERUSER)

        need_meta_bootstrap = await _ensure_edgedb_template_database(
            cluster, pgconn)

        if need_meta_bootstrap:
            conn = await cluster.connect(database=edbdef.EDGEDB_TEMPLATE_DB)
            conn.add_log_listener(_pg_log_listener)

            await _execute(
                conn,
                f'ALTER SCHEMA public OWNER TO {edbdef.EDGEDB_SUPERUSER}',
            )

            try:
                conn.add_log_listener(_pg_log_listener)

                await _ensure_meta_schema(conn)
                instancedata = await _populate_misc_instance_data(cluster)

                std_schema = await _init_stdlib(cluster,
                                                conn,
                                                testmode=args['testmode'])
                await _bootstrap_config_spec(std_schema, cluster)
                await _compile_sys_queries(std_schema, cluster)
                schema = await _init_defaults(std_schema, std_schema, conn)
                schema = await _populate_data(std_schema, schema, conn)
                await _configure(std_schema,
                                 conn,
                                 cluster,
                                 insecure=args['insecure'],
                                 testmode=args['testmode'])
            finally:
                await conn.close()

            await _ensure_edgedb_database(pgconn,
                                          edbdef.EDGEDB_SUPERUSER_DB,
                                          edbdef.EDGEDB_SUPERUSER,
                                          cluster=cluster)

        else:
            conn = await cluster.connect(database=edbdef.EDGEDB_SUPERUSER_DB)

            try:
                std_schema = await compiler.load_std_schema(conn)
                config_spec = config.load_spec_from_schema(std_schema)
                config.set_settings(config_spec)
                instancedata = await _get_instance_data(conn)
            finally:
                await conn.close()

        datadir_version = instancedata.get('version')
        if datadir_version:
            datadir_major = datadir_version.get('major')

        expected_ver = buildmeta.get_version()

        if datadir_major != expected_ver.major:
            raise errors.ConfigurationError(
                'database instance incompatible with this version of EdgeDB',
                details=(f'The database instance was initialized with '
                         f'EdgeDB version {datadir_major}, '
                         f'which is incompatible with this version '
                         f'{expected_ver.major}'),
                hint=(f'You need to recreate the instance and upgrade '
                      f'using dump/restore.'))

        datadir_catver = instancedata.get('catver')
        expected_catver = edbdef.EDGEDB_CATALOG_VERSION

        if datadir_catver != expected_catver:
            raise errors.ConfigurationError(
                'database instance incompatible with this version of EdgeDB',
                details=(f'The database instance was initialized with '
                         f'EdgeDB format version {datadir_catver}, '
                         f'but this version of the server expects '
                         f'format version {expected_catver}'),
                hint=(f'You need to recreate the instance and upgrade '
                      f'using dump/restore.'))

        await _ensure_edgedb_template_not_connectable(pgconn)

        await _ensure_edgedb_role(
            cluster,
            pgconn,
            args['default_database_user'],
            membership=membership,
            is_superuser=True,
        )

        await _execute(
            pgconn,
            f"SET ROLE {args['default_database_user']};",
        )

        await _ensure_edgedb_database(
            pgconn,
            args['default_database'],
            args['default_database_user'],
            cluster=cluster,
        )

    finally:
        await pgconn.close()

    return need_meta_bootstrap
示例#12
0
 def tearDown(self):
     config.set_settings(self._cfgspec)
示例#13
0
async def bootstrap(cluster, args) -> bool:
    pgconn = await cluster.connect()
    pgconn.add_log_listener(_pg_log_listener)
    std_schema = None

    try:
        membership = set()
        session_user = cluster.get_connection_params().user
        if session_user != edbdef.EDGEDB_SUPERUSER:
            membership.add(session_user)

        superuser_uid = await _ensure_edgedb_role(
            cluster,
            pgconn,
            edbdef.EDGEDB_SUPERUSER,
            membership=membership,
            is_superuser=True,
            builtin=True,
        )

        if session_user != edbdef.EDGEDB_SUPERUSER:
            await _execute(
                pgconn,
                f'SET ROLE {edbdef.EDGEDB_SUPERUSER};',
            )
            cluster.set_default_session_authorization(edbdef.EDGEDB_SUPERUSER)

        new_template_db_id = await _ensure_edgedb_template_database(
            cluster, pgconn)

        if new_template_db_id:
            conn = await cluster.connect(database=edbdef.EDGEDB_TEMPLATE_DB)
            conn.add_log_listener(_pg_log_listener)

            await _execute(
                conn,
                f'ALTER SCHEMA public OWNER TO {edbdef.EDGEDB_SUPERUSER}',
            )

            try:
                conn.add_log_listener(_pg_log_listener)

                await _populate_misc_instance_data(cluster, conn)

                std_schema, refl_schema, compiler = await _init_stdlib(
                    cluster,
                    conn,
                    testmode=args['testmode'],
                    global_ids={
                        edbdef.EDGEDB_SUPERUSER: superuser_uid,
                        edbdef.EDGEDB_TEMPLATE_DB: new_template_db_id,
                    })
                await _bootstrap_config_spec(std_schema, cluster)
                await _compile_sys_queries(refl_schema, compiler, cluster)
                schema = await _init_defaults(std_schema, compiler, conn)
                schema = await _populate_data(std_schema, compiler, conn)
                await _configure(schema,
                                 compiler,
                                 conn,
                                 cluster,
                                 insecure=args['insecure'])
            finally:
                await conn.close()

            superuser_db = schema.get_global(s_db.Database,
                                             edbdef.EDGEDB_SUPERUSER_DB)

            await _ensure_edgedb_database(
                pgconn,
                edbdef.EDGEDB_SUPERUSER_DB,
                edbdef.EDGEDB_SUPERUSER,
                cluster=cluster,
                builtin=True,
                objid=superuser_db.id,
            )

        else:
            conn = await cluster.connect(database=edbdef.EDGEDB_SUPERUSER_DB)

            try:
                await _check_data_dir_compatibility(conn)
                compiler = edbcompiler.Compiler({})
                await compiler.ensure_initialized(conn)
                std_schema = compiler.get_std_schema()
                config_spec = config.load_spec_from_schema(std_schema)
                config.set_settings(config_spec)
            finally:
                await conn.close()

        await _ensure_edgedb_template_not_connectable(pgconn)

        await _ensure_edgedb_role(
            cluster,
            pgconn,
            args['default_database_user'],
            membership=membership,
            is_superuser=True,
        )

        await _execute(
            pgconn,
            f"SET ROLE {args['default_database_user']};",
        )

        await _ensure_edgedb_database(
            pgconn,
            args['default_database'],
            args['default_database_user'],
            cluster=cluster,
        )

    finally:
        await pgconn.close()

    return new_template_db_id is not None
示例#14
0
async def _init_stdlib(
    ctx: BootstrapContext,
    testmode: bool,
    global_ids: Mapping[str, uuid.UUID],
) -> Tuple[StdlibBits, config.Spec, edbcompiler.Compiler]:
    in_dev_mode = devmode.is_in_dev_mode()
    conn = ctx.conn
    cluster = ctx.cluster

    specified_cache_dir = os.environ.get('_EDGEDB_WRITE_DATA_CACHE_TO')
    if not specified_cache_dir:
        cache_dir = None
    else:
        cache_dir = pathlib.Path(specified_cache_dir)

    stdlib_cache = f'backend-stdlib.pickle'
    tpldbdump_cache = f'backend-tpldbdump.sql'

    src_hash = buildmeta.hash_dirs(
        buildmeta.get_cache_src_dirs(), extra_files=[__file__],
    )

    stdlib = buildmeta.read_data_cache(
        src_hash, stdlib_cache, source_dir=cache_dir)
    tpldbdump = buildmeta.read_data_cache(
        src_hash, tpldbdump_cache, source_dir=cache_dir, pickled=False)

    if stdlib is None:
        logger.info('Compiling the standard library...')
        stdlib = await _make_stdlib(ctx, in_dev_mode or testmode, global_ids)

    logger.info('Creating the necessary PostgreSQL extensions...')
    await metaschema.create_pg_extensions(conn)

    config_spec = config.load_spec_from_schema(stdlib.stdschema)
    config.set_settings(config_spec)

    if tpldbdump is None:
        logger.info('Populating internal SQL structures...')
        await metaschema.bootstrap(conn, config_spec)
        logger.info('Executing the standard library...')
        await _execute_ddl(conn, stdlib.sqltext)

        if in_dev_mode or specified_cache_dir:
            tpl_db_name = edbdef.EDGEDB_TEMPLATE_DB
            tpl_pg_db_name = cluster.get_db_name(tpl_db_name)
            tpl_pg_db_name_dyn = (
                f"edgedb.get_database_backend_name({ql(tpl_db_name)})")
            tpldbdump = await cluster.dump_database(
                tpl_pg_db_name,
                exclude_schemas=['edgedbinstdata', 'edgedbext'],
                dump_object_owners=False,
            )

            # Excluding the "edgedbext" schema above apparently
            # doesn't apply to extensions created in that schema,
            # so we have to resort to commenting out extension
            # statements in the dump.
            tpldbdump = re.sub(
                rb'^(CREATE|COMMENT ON) EXTENSION.*$',
                rb'-- \g<0>',
                tpldbdump,
                flags=re.MULTILINE,
            )

            global_metadata = await conn.fetchval(
                f'SELECT edgedb.get_database_metadata({ql(tpl_db_name)})',
            )
            global_metadata = json.loads(global_metadata)

            pl_block = dbops.PLTopBlock()

            set_metadata_text = dbops.SetMetadata(
                dbops.Database(name='__dummy_placeholder_database__'),
                global_metadata,
            ).code(pl_block)
            set_metadata_text = set_metadata_text.replace(
                '__dummy_placeholder_database__',
                f"' || quote_ident({tpl_pg_db_name_dyn}) || '",
            )

            set_single_db_metadata_text = dbops.SetSingleDBMetadata(
                edbdef.EDGEDB_TEMPLATE_DB, global_metadata
            ).code(pl_block)

            pl_block.add_command(textwrap.dedent(f"""\
                IF (edgedb.get_backend_capabilities()
                    & {int(params.BackendCapabilities.CREATE_DATABASE)}) != 0
                THEN
                {textwrap.indent(set_metadata_text, '    ')}
                ELSE
                {textwrap.indent(set_single_db_metadata_text, '    ')}
                END IF
                """))

            text = pl_block.to_string()

            tpldbdump += b'\n' + text.encode('utf-8')

            buildmeta.write_data_cache(
                tpldbdump,
                src_hash,
                tpldbdump_cache,
                pickled=False,
                target_dir=cache_dir,
            )

            buildmeta.write_data_cache(
                stdlib,
                src_hash,
                stdlib_cache,
                target_dir=cache_dir,
            )
    else:
        logger.info('Initializing the standard library...')
        await metaschema._execute_sql_script(conn, tpldbdump.decode('utf-8'))
        # Restore the search_path as the dump might have altered it.
        await conn.execute(
            "SELECT pg_catalog.set_config('search_path', 'edgedb', false)")

    if not in_dev_mode and testmode:
        # Running tests on a production build.
        stdlib, testmode_sql = await _amend_stdlib(
            ctx,
            s_std.get_std_module_text(sn.UnqualName('_testmode')),
            stdlib,
        )
        await conn.execute(testmode_sql)
        # _testmode includes extra config settings, so make sure
        # those are picked up.
        config_spec = config.load_spec_from_schema(stdlib.stdschema)
        config.set_settings(config_spec)

    # Make sure that schema backend_id properties are in sync with
    # the database.

    compiler = edbcompiler.new_compiler(
        std_schema=stdlib.stdschema,
        reflection_schema=stdlib.reflschema,
        schema_class_layout=stdlib.classlayout,
    )
    _, sql = compile_bootstrap_script(
        compiler,
        stdlib.reflschema,
        '''
        SELECT schema::ScalarType {
            id,
            backend_id,
        } FILTER .builtin AND NOT (.abstract ?? False);
        ''',
        expected_cardinality_one=False,
        single_statement=True,
    )
    schema = stdlib.stdschema
    typemap = await conn.fetchval(sql)
    for entry in json.loads(typemap):
        t = schema.get_by_id(uuidgen.UUID(entry['id']))
        schema = t.set_field_value(
            schema, 'backend_id', entry['backend_id'])

    stdlib = stdlib._replace(stdschema=schema)

    await _store_static_bin_cache(
        ctx,
        'stdschema',
        pickle.dumps(schema, protocol=pickle.HIGHEST_PROTOCOL),
    )

    await _store_static_bin_cache(
        ctx,
        'reflschema',
        pickle.dumps(stdlib.reflschema, protocol=pickle.HIGHEST_PROTOCOL),
    )

    await _store_static_bin_cache(
        ctx,
        'global_schema',
        pickle.dumps(stdlib.global_schema, protocol=pickle.HIGHEST_PROTOCOL),
    )

    await _store_static_bin_cache(
        ctx,
        'classlayout',
        pickle.dumps(stdlib.classlayout, protocol=pickle.HIGHEST_PROTOCOL),
    )

    await _store_static_text_cache(
        ctx,
        'local_intro_query',
        stdlib.local_intro_query,
    )

    await _store_static_text_cache(
        ctx,
        'global_intro_query',
        stdlib.global_intro_query,
    )

    await metaschema.generate_support_views(
        conn, stdlib.reflschema, cluster.get_runtime_params()
    )
    await metaschema.generate_support_functions(conn, stdlib.reflschema)

    compiler = edbcompiler.new_compiler(
        std_schema=schema,
        reflection_schema=stdlib.reflschema,
        schema_class_layout=stdlib.classlayout,
    )

    await metaschema.generate_more_support_functions(
        conn, compiler, stdlib.reflschema, testmode)

    if tpldbdump is not None:
        # When we restore a database from a dump, OIDs for non-system
        # Postgres types might get skewed as they are not part of the dump.
        # A good example of that is `std::bigint` which is implemented as
        # a custom domain type. The OIDs are stored under
        # `schema::Object.backend_id` property and are injected into
        # array query arguments.
        #
        # The code below re-syncs backend_id properties of EdgeDB builtin
        # types with the actual OIDs in the DB.

        compiler = edbcompiler.new_compiler(
            std_schema=stdlib.stdschema,
            reflection_schema=stdlib.reflschema,
            schema_class_layout=stdlib.classlayout,
        )
        _, sql = compile_bootstrap_script(
            compiler,
            stdlib.reflschema,
            '''
            UPDATE schema::Type
            FILTER
                .builtin
                AND NOT (.abstract ?? False)
                AND schema::Type IS schema::ScalarType | schema::Tuple
            SET {
                backend_id := sys::_get_pg_type_for_edgedb_type(
                    .id,
                    <uuid>{}
                )
            }
            ''',
            expected_cardinality_one=False,
            single_statement=True,
        )
        await conn.execute(sql)

        _, sql = compile_bootstrap_script(
            compiler,
            stdlib.reflschema,
            '''
            UPDATE schema::Array
            FILTER
                .builtin
                AND NOT (.abstract ?? False)
            SET {
                backend_id := sys::_get_pg_type_for_edgedb_type(
                    .id,
                    .element_type.id,
                )
            }
            ''',
            expected_cardinality_one=False,
            single_statement=True,
        )
        await conn.execute(sql)

    await _store_static_json_cache(
        ctx,
        'configspec',
        config.spec_to_json(config_spec),
    )

    return stdlib, config_spec, compiler