コード例 #1
0
    def run(self):
        import edb as _edb
        from edb.server.buildmeta import hash_dirs, get_cache_src_dirs

        build = self.get_finalized_command('build')
        pkg_dir = pathlib.Path(_edb.__path__[0])

        if self.type == 'parsers':
            parser_hash = hash_dirs(
                [(pkg_dir / 'edgeql/parser/grammar', '.py')],
                extra_files=[pkg_dir / 'edgeql-parser/src/keywords.rs'],
            )
            print(binascii.hexlify(parser_hash).decode())

        elif self.type == 'postgres':
            print(_get_pg_source_stamp().strip())

        elif self.type == 'bootstrap':
            bootstrap_hash = hash_dirs(
                get_cache_src_dirs(),
                extra_files=[pkg_dir / 'server/bootstrap.py'],
            )
            print(binascii.hexlify(bootstrap_hash).decode())

        elif self.type == 'rust':
            rust_hash = hash_dirs([
                (pkg_dir / 'edgeql-parser', '.rs'),
                (pkg_dir / 'edgeql-rust', '.rs'),
                (pkg_dir / 'graphql-rewrite', '.rs'),
            ],
                                  extra_files=[
                                      pkg_dir / 'edgeql-parser/Cargo.toml',
                                      pkg_dir / 'edgeql-rust/Cargo.toml',
                                      pkg_dir / 'graphql-rewrite/Cargo.toml',
                                  ])
            print(binascii.hexlify(rust_hash).decode())

        elif self.type == 'ext':
            ext_hash = hash_dirs([
                (pkg_dir, '.pyx'),
                (pkg_dir, '.pyi'),
                (pkg_dir, '.pxd'),
                (pkg_dir, '.pxi'),
            ])
            print(binascii.hexlify(ext_hash).decode())

        elif self.type == 'cli':
            print(_get_edgedbcli_rev())

        elif self.type == 'build_temp':
            print(pathlib.Path(build.build_temp).resolve())

        elif self.type == 'build_lib':
            print(pathlib.Path(build.build_lib).resolve())

        else:
            raise RuntimeError(f'Illegal --type={self.type}; can only be: '
                               'cli, rust, ext, postgres, bootstrap, parsers,'
                               'build_temp or build_lib')
コード例 #2
0
def _load_reflection_schema():
    global _refl_schema
    global _schema_class_layout

    if _refl_schema is None:
        std_dirs_hash = buildmeta.hash_dirs(s_std.CACHE_SRC_DIRS)

        cache = None
        if devmode.is_in_dev_mode():
            cache = buildmeta.read_data_cache(std_dirs_hash,
                                              'transient-reflschema.pickle')

        if cache is not None:
            reflschema, classlayout = cache
        else:
            std_schema = _load_std_schema()
            reflection = s_refl.generate_structure(std_schema)
            classlayout = reflection.class_layout
            context = sd.CommandContext()
            context.stdmode = True
            reflschema = reflection.intro_schema_delta.apply(
                std_schema, context)

            if devmode.is_in_dev_mode():
                buildmeta.write_data_cache(
                    (reflschema, classlayout),
                    std_dirs_hash,
                    'transient-reflschema.pickle',
                )

        _refl_schema = reflschema
        _schema_class_layout = classlayout

    return _refl_schema, _schema_class_layout
コード例 #3
0
    def run(self, *args, **kwargs):
        import edb as _edb
        from edb.server.buildmeta import hash_dirs

        parser_hash = hash_dirs([(
            os.path.join(_edb.__path__[0], 'edgeql/parser/grammar'),
            '.py')])

        proc = subprocess.run(
            ['git', 'submodule', 'status', 'postgres'],
            stdout=subprocess.PIPE, universal_newlines=True, check=True)
        postgres_revision, _, _ = proc.stdout[1:].partition(' ')

        print(f'{binascii.hexlify(parser_hash).decode()}-{postgres_revision}')
コード例 #4
0
def _load_std_schema():
    global _std_schema
    if _std_schema is None:
        std_dirs_hash = buildmeta.hash_dirs(s_std.CACHE_SRC_DIRS)
        schema = None

        if devmode.is_in_dev_mode():
            schema = buildmeta.read_data_cache(std_dirs_hash,
                                               'transient-stdschema.pickle')

        if schema is None:
            schema = s_schema.FlatSchema()
            for modname in s_schema.STD_LIB + ('stdgraphql', ):
                schema = s_std.load_std_module(schema, modname)

        if devmode.is_in_dev_mode():
            buildmeta.write_data_cache(schema, std_dirs_hash,
                                       'transient-stdschema.pickle')

        _std_schema = schema

    return _std_schema
コード例 #5
0
async def _init_stdlib(cluster, conn, testmode, global_ids):
    in_dev_mode = devmode.is_in_dev_mode()

    specified_cache_dir = os.environ.get('_EDGEDB_WRITE_DATA_CACHE_TO')
    if specified_cache_dir:
        cache_dir = pathlib.Path(specified_cache_dir)
    else:
        cache_dir = None

    stdlib_cache = 'backend-stdlib.pickle'
    tpldbdump_cache = 'backend-tpldbdump.sql'
    src_hash = buildmeta.hash_dirs(
        buildmeta.get_cache_src_dirs(),
        extra_files=[__file__],
    )

    stdlib = buildmeta.read_data_cache(src_hash,
                                       stdlib_cache,
                                       source_dir=cache_dir)
    tpldbdump = buildmeta.read_data_cache(src_hash,
                                          tpldbdump_cache,
                                          source_dir=cache_dir,
                                          pickled=False)

    if stdlib is None:
        logger.info('Compiling the standard library...')
        stdlib = await _make_stdlib(in_dev_mode or testmode, global_ids)

    logger.info('Creating the necessary PostgreSQL extensions...')
    await metaschema.create_pg_extensions(conn)

    if tpldbdump is None:
        logger.info('Populating internal SQL structures...')
        await metaschema.bootstrap(conn)
        logger.info('Executing the standard library...')
        await _execute_ddl(conn, stdlib.sqltext)

        if in_dev_mode or specified_cache_dir:
            tpldbdump = cluster.dump_database(
                edbdef.EDGEDB_TEMPLATE_DB,
                exclude_schemas=['edgedbinstdata', 'edgedbext'],
            )

            # Excluding the "edgedbext" schema above apparently
            # doesn't apply to extensions created in that schema,
            # so we have to resort to commenting out extension
            # statements in the dump.
            tpldbdump = re.sub(
                rb'^(CREATE|COMMENT ON) EXTENSION.*$',
                rb'-- \g<0>',
                tpldbdump,
                flags=re.MULTILINE,
            )

            global_metadata = await conn.fetchval(f'''\
                SELECT edgedb.shobj_metadata(
                    (SELECT oid FROM pg_database
                     WHERE datname = {ql(edbdef.EDGEDB_TEMPLATE_DB)}),
                    'pg_database'
                )''')

            pl_block = dbops.PLTopBlock()

            dbops.SetMetadata(
                dbops.Database(name=edbdef.EDGEDB_TEMPLATE_DB),
                json.loads(global_metadata),
            ).generate(pl_block)

            tpldbdump += b'\n' + pl_block.to_string().encode('utf-8')

            buildmeta.write_data_cache(
                tpldbdump,
                src_hash,
                tpldbdump_cache,
                pickled=False,
                target_dir=cache_dir,
            )

            buildmeta.write_data_cache(
                stdlib,
                src_hash,
                stdlib_cache,
                target_dir=cache_dir,
            )
    else:
        logger.info('Initializing the standard library...')
        await metaschema._execute_sql_script(conn, tpldbdump.decode('utf-8'))

        # When we restore a database from a dump, OIDs for non-system
        # Postgres types might get skewed as they are not part of the dump.
        # A good example of that is `std::bigint` which is implemented as
        # a custom domain type. The OIDs are stored under
        # `schema::Object.backend_id` property and are injected into
        # array query arguments.
        #
        # The code below re-syncs backend_id properties of EdgeDB builtin
        # types with the actual OIDs in the DB.

        compiler = edbcompiler.new_compiler(
            std_schema=stdlib.stdschema,
            reflection_schema=stdlib.reflschema,
            schema_class_layout=stdlib.classlayout,
        )
        _, sql = compile_bootstrap_script(
            compiler,
            stdlib.reflschema,
            '''
            UPDATE schema::ScalarType
            FILTER .builtin AND NOT (.abstract ?? False)
            SET {
                backend_id := sys::_get_pg_type_for_scalar_type(.id)
            }
            ''',
            expected_cardinality_one=False,
            single_statement=True,
        )
        await conn.execute(sql)

    if not in_dev_mode and testmode:
        # Running tests on a production build.
        stdlib, testmode_sql = await _amend_stdlib(
            s_std.get_std_module_text('_testmode'),
            stdlib,
        )
        await conn.execute(testmode_sql)
        await metaschema.generate_support_views(
            conn,
            stdlib.reflschema,
        )

    # Make sure that schema backend_id properties are in sync with
    # the database.

    compiler = edbcompiler.new_compiler(
        std_schema=stdlib.stdschema,
        reflection_schema=stdlib.reflschema,
        schema_class_layout=stdlib.classlayout,
    )
    _, sql = compile_bootstrap_script(
        compiler,
        stdlib.reflschema,
        '''
        SELECT schema::ScalarType {
            id,
            backend_id,
        } FILTER .builtin AND NOT (.abstract ?? False);
        ''',
        expected_cardinality_one=False,
        single_statement=True,
    )
    schema = stdlib.stdschema
    typemap = await conn.fetchval(sql)
    for entry in json.loads(typemap):
        t = schema.get_by_id(uuidgen.UUID(entry['id']))
        schema = t.set_field_value(schema, 'backend_id', entry['backend_id'])

    stdlib = stdlib._replace(stdschema=schema)

    await _store_static_bin_cache(
        cluster,
        'stdschema',
        pickle.dumps(schema, protocol=pickle.HIGHEST_PROTOCOL),
    )

    await _store_static_bin_cache(
        cluster,
        'reflschema',
        pickle.dumps(stdlib.reflschema, protocol=pickle.HIGHEST_PROTOCOL),
    )

    await _store_static_bin_cache(
        cluster,
        'global_schema',
        pickle.dumps(stdlib.global_schema, protocol=pickle.HIGHEST_PROTOCOL),
    )

    await _store_static_bin_cache(
        cluster,
        'classlayout',
        pickle.dumps(stdlib.classlayout, protocol=pickle.HIGHEST_PROTOCOL),
    )

    await _store_static_text_cache(
        cluster,
        'local_intro_query',
        stdlib.local_intro_query,
    )

    await _store_static_text_cache(
        cluster,
        'global_intro_query',
        stdlib.global_intro_query,
    )

    await metaschema.generate_support_views(conn, stdlib.reflschema)
    await metaschema.generate_support_functions(conn, stdlib.reflschema)

    compiler = edbcompiler.new_compiler(
        std_schema=schema,
        reflection_schema=stdlib.reflschema,
        schema_class_layout=stdlib.classlayout,
    )

    await metaschema.generate_more_support_functions(conn, compiler,
                                                     stdlib.reflschema,
                                                     testmode)

    return stdlib, compiler
コード例 #6
0
async def _init_stdlib(cluster, conn, testmode, global_ids):
    in_dev_mode = devmode.is_in_dev_mode()

    specified_cache_dir = os.environ.get('_EDGEDB_WRITE_DATA_CACHE_TO')
    if specified_cache_dir:
        cache_dir = pathlib.Path(specified_cache_dir)
    else:
        cache_dir = None

    stdlib_cache = 'backend-stdlib.pickle'
    tpldbdump_cache = 'backend-tpldbdump.sql'
    src_hash = buildmeta.hash_dirs(CACHE_SRC_DIRS)
    stdlib = buildmeta.read_data_cache(src_hash,
                                       stdlib_cache,
                                       source_dir=cache_dir)
    tpldbdump = buildmeta.read_data_cache(src_hash,
                                          tpldbdump_cache,
                                          source_dir=cache_dir,
                                          pickled=False)

    if stdlib is None:
        stdlib = await _make_stdlib(in_dev_mode or testmode, global_ids)
        cache_hit = False
    else:
        cache_hit = True

    if tpldbdump is None:
        await _ensure_meta_schema(conn)
        await _execute_ddl(conn, stdlib.sqltext)

        if in_dev_mode or specified_cache_dir:
            tpldbdump = cluster.dump_database(edbdef.EDGEDB_TEMPLATE_DB,
                                              exclude_schema='edgedbinstdata')
            buildmeta.write_data_cache(
                tpldbdump,
                src_hash,
                tpldbdump_cache,
                pickled=False,
                target_dir=cache_dir,
            )
    else:
        cluster.restore_database(edbdef.EDGEDB_TEMPLATE_DB, tpldbdump)

        # When we restore a database from a dump, OIDs for non-system
        # Postgres types might get skewed as they are not part of the dump.
        # A good example of that is `std::bigint` which is implemented as
        # a custom domain type. The OIDs are stored under
        # `schema::Object.backend_id` property and are injected into
        # array query arguments.
        #
        # The code below re-syncs backend_id properties of EdgeDB builtin
        # types with the actual OIDs in the DB.

        compiler = edbcompiler.new_compiler(
            std_schema=stdlib.stdschema,
            reflection_schema=stdlib.reflschema,
            schema_class_layout=stdlib.classlayout,
            bootstrap_mode=True,
        )
        _, sql = compile_bootstrap_script(
            compiler,
            stdlib.reflschema,
            '''
            UPDATE schema::ScalarType
            FILTER .builtin AND NOT .is_abstract
            SET {
                backend_id := sys::_get_pg_type_for_scalar_type(.id)
            }
            ''',
            expected_cardinality_one=False,
            single_statement=True,
        )
        await conn.execute(sql)

    if not in_dev_mode and testmode:
        # Running tests on a production build.
        stdlib, testmode_sql = await _amend_stdlib(
            s_std.get_std_module_text('_testmode'),
            stdlib,
        )
        await conn.execute(testmode_sql)
        await metaschema.generate_support_views(
            cluster,
            conn,
            stdlib.reflschema,
        )

    # Make sure that schema backend_id properties are in sync with
    # the database.

    compiler = edbcompiler.new_compiler(
        std_schema=stdlib.stdschema,
        reflection_schema=stdlib.reflschema,
        schema_class_layout=stdlib.classlayout,
        bootstrap_mode=True,
    )
    _, sql = compile_bootstrap_script(
        compiler,
        stdlib.reflschema,
        '''
        SELECT schema::ScalarType {
            id,
            backend_id,
        } FILTER .builtin AND NOT .is_abstract;
        ''',
        expected_cardinality_one=False,
        single_statement=True,
    )
    schema = stdlib.stdschema
    typemap = await conn.fetchval(sql)
    for entry in json.loads(typemap):
        t = schema.get_by_id(uuidgen.UUID(entry['id']))
        schema = t.set_field_value(schema, 'backend_id', entry['backend_id'])

    stdlib = stdlib._replace(stdschema=schema)

    if not cache_hit and (in_dev_mode or specified_cache_dir):
        buildmeta.write_data_cache(
            stdlib,
            src_hash,
            stdlib_cache,
            target_dir=cache_dir,
        )

    await _store_static_bin_cache(
        cluster,
        'stdschema',
        pickle.dumps(schema, protocol=pickle.HIGHEST_PROTOCOL),
    )

    await _store_static_bin_cache(
        cluster,
        'reflschema',
        pickle.dumps(stdlib.reflschema, protocol=pickle.HIGHEST_PROTOCOL),
    )

    await _store_static_bin_cache(
        cluster,
        'classlayout',
        pickle.dumps(stdlib.classlayout, protocol=pickle.HIGHEST_PROTOCOL),
    )

    await _store_static_json_cache(
        cluster,
        'introquery',
        json.dumps(stdlib.introquery),
    )

    await metaschema.generate_support_views(cluster, conn, stdlib.reflschema)
    await metaschema.generate_support_functions(conn, stdlib.reflschema)

    compiler = edbcompiler.new_compiler(
        std_schema=schema,
        reflection_schema=stdlib.reflschema,
        schema_class_layout=stdlib.classlayout,
        bootstrap_mode=True,
    )

    return schema, stdlib.reflschema, compiler