コード例 #1
0
def _load_reflection_schema():
    global _refl_schema
    global _schema_class_layout

    if _refl_schema is None:
        std_dirs_hash = buildmeta.hash_dirs(s_std.CACHE_SRC_DIRS)

        cache = None
        if devmode.is_in_dev_mode():
            cache = buildmeta.read_data_cache(std_dirs_hash,
                                              'transient-reflschema.pickle')

        if cache is not None:
            reflschema, classlayout = cache
        else:
            std_schema = _load_std_schema()
            reflection = s_refl.generate_structure(std_schema)
            classlayout = reflection.class_layout
            context = sd.CommandContext()
            context.stdmode = True
            reflschema = reflection.intro_schema_delta.apply(
                std_schema, context)

            if devmode.is_in_dev_mode():
                buildmeta.write_data_cache(
                    (reflschema, classlayout),
                    std_dirs_hash,
                    'transient-reflschema.pickle',
                )

        _refl_schema = reflschema
        _schema_class_layout = classlayout

    return _refl_schema, _schema_class_layout
コード例 #2
0
def server_main(*, insecure=False, **kwargs):

    logsetup.setup_logging(kwargs['log_level'], kwargs['log_to'])
    exceptions.install_excepthook()

    asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())

    if kwargs['devmode'] is not None:
        devmode.enable_dev_mode(kwargs['devmode'])

    if not kwargs['data_dir']:
        if devmode.is_in_dev_mode():
            kwargs['data_dir'] = os.path.expanduser('~/.edgedb')
        else:
            abort('Please specify the instance data directory '
                  'using the -D argument')

    kwargs['insecure'] = insecure

    if kwargs['background']:
        daemon_opts = {'detach_process': True}
        pidfile = os.path.join(kwargs['pidfile'],
                               '.s.EDGEDB.{}.lock'.format(kwargs['port']))
        daemon_opts['pidfile'] = pidfile
        if kwargs['daemon_user']:
            daemon_opts['uid'] = kwargs['daemon_user']
        if kwargs['daemon_group']:
            daemon_opts['gid'] = kwargs['daemon_group']
        with daemon.DaemonContext(**daemon_opts):
            setproctitle.setproctitle('edgedb-server-{}'.format(
                kwargs['port']))
            run_server(kwargs)
    else:
        with devmode.CoverageConfig.enable_coverage_if_requested():
            run_server(kwargs)
コード例 #3
0
ファイル: cluster.py プロジェクト: fantix/edgedb
    def __init__(self,
                 postgres_dsn,
                 *,
                 data_dir_suffix=None,
                 data_dir_prefix=None,
                 data_dir_parent=None,
                 env=None,
                 testmode=False,
                 log_level=None):
        self._pg_dsn = postgres_dsn
        self._edgedb_cmd = [
            sys.executable, '-m', 'edb.server.main', '--postgres-dsn',
            postgres_dsn, '--max-backend-connections', '20'
        ]

        if log_level:
            self._edgedb_cmd.extend(['--log-level', log_level])

        if devmode.is_in_dev_mode():
            self._edgedb_cmd.append('--devmode')

        if testmode:
            self._edgedb_cmd.append('--testmode')

        self._runstate_dir = tempfile.mkdtemp(suffix=data_dir_suffix,
                                              prefix=data_dir_prefix,
                                              dir=data_dir_parent)
        self._edgedb_cmd.extend(['--runstate-dir', self._runstate_dir])

        self._pg_cluster = pgcluster.get_remote_pg_cluster(postgres_dsn)
        self._daemon_process = None
        self._port = edgedb_defines.EDGEDB_PORT
        self._effective_port = None
        self._env = env
コード例 #4
0
        async def test(pgdata_path):
            async with tb.start_edgedb_server(
                    backend_dsn=
                    f'postgres:///?user=postgres&host={pgdata_path}',
                    reset_auth=True,
                    runstate_dir=None
                    if devmode.is_in_dev_mode() else pgdata_path,
            ) as sd:
                con = await sd.connect()
                try:
                    val = await con.query_single('SELECT 123')
                    self.assertEqual(int(val), 123)

                    # stop the postgres
                    await cluster.stop()
                    with self.assertRaisesRegex(
                            errors.BackendUnavailableError,
                            'Postgres is not available',
                    ):
                        await con.query_single('SELECT 123+456')

                    # bring postgres back
                    await cluster.start()

                    # give the EdgeDB server some time to recover
                    deadline = time.monotonic() + 5
                    while time.monotonic() < deadline:
                        try:
                            val = await con.query_single('SELECT 123+456')
                            break
                        except errors.BackendUnavailableError:
                            pass
                    self.assertEqual(int(val), 579)
                finally:
                    await con.aclose()
コード例 #5
0
    def __init__(
            self, data_dir, *,
            pg_superuser='******', port=edgedb_defines.EDGEDB_PORT,
            runstate_dir=None, env=None, testmode=False):
        self._pg_dsn = None
        self._data_dir = data_dir
        self._location = data_dir
        self._edgedb_cmd = [sys.executable, '-m', 'edb.server.main',
                            '-D', self._data_dir]

        if devmode.is_in_dev_mode():
            self._edgedb_cmd.append('--devmode')

        if testmode:
            self._edgedb_cmd.append('--testmode')

        if runstate_dir is None:
            runstate_dir = buildmeta.get_runstate_path(self._data_dir)

        self._runstate_dir = runstate_dir
        self._edgedb_cmd.extend(['--runstate-dir', runstate_dir])
        self._pg_cluster = get_pg_cluster(self._data_dir)
        self._pg_superuser = pg_superuser
        self._daemon_process = None
        self._port = port
        self._effective_port = None
        self._env = env
コード例 #6
0
ファイル: buildmeta.py プロジェクト: sthagen/edgedb
def get_pg_config_path() -> pathlib.Path:
    if devmode.is_in_dev_mode():
        edb_path: os.PathLike = edb.server.__path__[0]  # type: ignore
        root = pathlib.Path(edb_path).parent.parent
        pg_config = (root / 'build' / 'postgres' / 'install' / 'bin' /
                     'pg_config').resolve()
        if not pg_config.is_file():
            try:
                pg_config = pathlib.Path(
                    get_build_metadata_value('PG_CONFIG_PATH'))
            except MetadataError:
                pass

        if not pg_config.is_file():
            raise MetadataError('DEV mode: Could not find PostgreSQL build, '
                                'run `pip install -e .`')

    else:
        pg_config = pathlib.Path(get_build_metadata_value('PG_CONFIG_PATH'))

        if not pg_config.is_file():
            raise MetadataError(
                f'invalid pg_config path: {pg_config!r}: file does not exist '
                f'or is not a regular file')

    return pg_config
コード例 #7
0
    def __init__(self,
                 runstate_dir,
                 *,
                 port=edgedb_defines.EDGEDB_PORT,
                 env=None,
                 testmode=False,
                 log_level=None):
        self._edgedb_cmd = [sys.executable, '-m', 'edb.server.main']

        if log_level:
            self._edgedb_cmd.extend(['--log-level', log_level])

        if devmode.is_in_dev_mode():
            self._edgedb_cmd.append('--devmode')

        if testmode:
            self._edgedb_cmd.append('--testmode')

        self._runstate_dir = runstate_dir
        self._edgedb_cmd.extend(['--runstate-dir', runstate_dir])
        self._pg_cluster = self._get_pg_cluster()
        self._pg_connect_args = {}
        self._daemon_process = None
        self._port = port
        self._effective_port = None
        self._env = env
コード例 #8
0
def get_runstate_path(data_dir: pathlib.Path) -> pathlib.Path:
    if devmode.is_in_dev_mode():
        return data_dir
    else:
        runstate_dir = get_build_metadata_value('RUNSTATE_DIR')
        if runstate_dir is not None:
            return pathlib.Path(runstate_dir)
        else:
            return data_dir
コード例 #9
0
def load_std_schema() -> s_schema.Schema:
    std_dirs_hash = devmode.hash_dirs(CACHE_SRC_DIRS)
    schema = None

    if devmode.is_in_dev_mode():
        schema = devmode.read_dev_mode_cache(
            std_dirs_hash, 'transient-stdschema.pickle')

    if schema is None:
        schema = s_schema.Schema()
        for modname in s_schema.STD_LIB:
            schema = load_std_module(schema, modname)

    if devmode.is_in_dev_mode():
        devmode.write_dev_mode_cache(
            schema, std_dirs_hash, 'transient-stdschema.pickle')

    return schema
コード例 #10
0
ファイル: main.py プロジェクト: stjordanis/edgedb
def _init_parsers():
    # Initialize all parsers, rebuilding grammars if
    # necessary.  Do it earlier than later so that we don't
    # end up in a situation where all our compiler processes
    # are building parsers in parallel.

    from edb.edgeql import parser as ql_parser

    ql_parser.preload(allow_rebuild=devmode.is_in_dev_mode(), paralellize=True)
コード例 #11
0
    async def _test_server_ops_ignore_other_tenants(self, td, user):
        async with tb.start_edgedb_server(
                backend_dsn=f'postgres:///?user={user}&host={td}',
                runstate_dir=None if devmode.is_in_dev_mode() else td,
                reset_auth=True,
        ) as sd:
            con = await sd.connect()
            await con.aclose()

        async with tb.start_edgedb_server(
                backend_dsn=f'postgres:///?user=postgres&host={td}',
                runstate_dir=None if devmode.is_in_dev_mode() else td,
                reset_auth=True,
                ignore_other_tenants=True,
                env={'EDGEDB_TEST_CATALOG_VERSION': '3022_01_07_00_00'},
        ) as sd:
            con = await sd.connect()
            await con.aclose()
コード例 #12
0
def server_main(*, insecure=False, **kwargs):
    logsetup.setup_logging(kwargs['log_level'], kwargs['log_to'])
    exceptions.install_excepthook()

    bump_rlimit_nofile()

    asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())

    if kwargs['devmode'] is not None:
        devmode.enable_dev_mode(kwargs['devmode'])

    if kwargs['temp_dir']:
        if kwargs['bootstrap']:
            abort('--temp-data-dir is incompatible with --bootstrap')
        if kwargs['data_dir']:
            abort('--temp-data-dir is incompatible with --data-dir/-D')
        if kwargs['runstate_dir']:
            abort('--temp-data-dir is incompatible with --runstate-dir')
        if kwargs['postgres_dsn']:
            abort('--temp-data-dir is incompatible with --postgres-dsn')
        kwargs['data_dir'] = kwargs['runstate_dir'] = pathlib.Path(
            tempfile.mkdtemp())
    else:
        if not kwargs['data_dir']:
            if kwargs['postgres_dsn']:
                pass
            elif devmode.is_in_dev_mode():
                kwargs['data_dir'] = os.path.expanduser('~/.edgedb')
            else:
                abort('Please specify the instance data directory '
                      'using the -D argument or the address of a remote '
                      'PostgreSQL cluster using the --postgres-dsn argument')
        elif kwargs['postgres_dsn']:
            abort('The -D and --postgres-dsn options are mutually exclusive.')

    kwargs['insecure'] = insecure

    if kwargs['background']:
        daemon_opts = {'detach_process': True}
        pidfile = kwargs['pidfile_dir'] / f".s.EDGEDB.{kwargs['port']}.lock"
        daemon_opts['pidfile'] = pidfile
        if kwargs['daemon_user']:
            daemon_opts['uid'] = kwargs['daemon_user']
        if kwargs['daemon_group']:
            daemon_opts['gid'] = kwargs['daemon_group']
        with daemon.DaemonContext(**daemon_opts):
            # TODO: setproctitle should probably be moved to where
            # management port is initialized, as that's where we know
            # the actual network port we listen on.  At this point
            # "port" can be "None".
            setproctitle.setproctitle(f"edgedb-server-{kwargs['port']}")

            run_server(ServerConfig(**kwargs))
    else:
        with devmode.CoverageConfig.enable_coverage_if_requested():
            run_server(ServerConfig(**kwargs))
コード例 #13
0
def get_version() -> verutils.Version:
    if devmode.is_in_dev_mode():
        root = pathlib.Path(__file__).parent.parent.resolve()
        version = verutils.parse_version(get_version_from_scm(root))
    else:
        vertuple: List[Any] = list(get_build_metadata_value('VERSION'))
        vertuple[2] = verutils.VersionStage(vertuple[2])
        version = verutils.Version(*vertuple)

    return version
コード例 #14
0
    def __init__(self,
                 runstate_dir: pathlib.Path,
                 *,
                 port: int = edgedb_defines.EDGEDB_PORT,
                 env: Optional[Mapping[str, str]] = None,
                 testmode: bool = False,
                 log_level: Optional[str] = None,
                 security: Optional[edgedb_args.ServerSecurityMode] = None,
                 http_endpoint_security: Optional[
                     edgedb_args.ServerEndpointSecurityMode] = None):
        self._edgedb_cmd = [sys.executable, '-m', 'edb.server.main']

        self._edgedb_cmd.append('--tls-cert-mode=generate_self_signed')

        if log_level:
            self._edgedb_cmd.extend(['--log-level', log_level])

        compiler_addr = os.getenv('EDGEDB_TEST_REMOTE_COMPILER')
        if compiler_addr:
            self._edgedb_cmd.extend([
                '--compiler-pool-mode',
                'remote',
                '--compiler-pool-addr',
                compiler_addr,
            ])

        if devmode.is_in_dev_mode():
            self._edgedb_cmd.append('--devmode')

        if testmode:
            self._edgedb_cmd.append('--testmode')

        if security:
            self._edgedb_cmd.extend((
                '--security',
                str(security),
            ))

        if http_endpoint_security:
            self._edgedb_cmd.extend((
                '--http-endpoint-security',
                str(http_endpoint_security),
            ))

        self._log_level = log_level
        self._runstate_dir = runstate_dir
        self._edgedb_cmd.extend(['--runstate-dir', str(runstate_dir)])
        self._pg_cluster: Optional[pgcluster.BaseCluster] = None
        self._pg_connect_args: Dict[str, Any] = {}
        self._daemon_process: Optional[subprocess.Popen[str]] = None
        self._port = port
        self._effective_port = None
        self._tls_cert_file = None
        self._env = env
コード例 #15
0
ファイル: bootstrap.py プロジェクト: LeonardAukea/edgedb
async def _init_stdlib(cluster, conn, testmode):
    in_dev_mode = devmode.is_in_dev_mode()

    cache_hit = False
    sql_text = None

    if in_dev_mode:
        schema_cache = 'backend-stdschema.pickle'
        script_cache = 'backend-stdinitsql.pickle'
        testmode_flag = 'backend-stdtestmode.pickle'

        src_hash = devmode.hash_dirs(CACHE_SRC_DIRS)

        cached_testmode = devmode.read_dev_mode_cache(src_hash, testmode_flag)

        if cached_testmode is not None and cached_testmode == testmode:
            sql_text = devmode.read_dev_mode_cache(src_hash, script_cache)

        if sql_text is not None:
            schema = devmode.read_dev_mode_cache(src_hash, schema_cache)

    if sql_text is None or schema is None:
        schema, sql_text, new_types = await _make_stdlib(testmode)
    else:
        cache_hit = True

    await _execute_ddl(conn, sql_text)

    if not cache_hit:
        typemap = await conn.fetch(
            '''
            SELECT id, backend_id FROM edgedb.type WHERE id = any($1::uuid[])
        ''', new_types)
        for tid, backend_tid in typemap:
            t = schema.get_by_id(tid)
            schema = t.set_field_value(schema, 'backend_id', backend_tid)

    if not cache_hit and in_dev_mode:
        devmode.write_dev_mode_cache(schema, src_hash, schema_cache)
        devmode.write_dev_mode_cache(sql_text, src_hash, script_cache)
        devmode.write_dev_mode_cache(testmode, src_hash, testmode_flag)

    await _store_static_bin_cache(
        cluster,
        'stdschema',
        pickle.dumps(schema, protocol=pickle.HIGHEST_PROTOCOL),
    )

    await metaschema.generate_views(conn, schema)
    await metaschema.generate_support_views(conn, schema)

    return schema
コード例 #16
0
ファイル: lang.py プロジェクト: syyunn/edgedb
def _load_std_schema():
    global _std_schema
    if _std_schema is None:
        std_dirs_hash = devmode.hash_dirs(s_std.CACHE_SRC_DIRS)
        schema = None

        if devmode.is_in_dev_mode():
            schema = devmode.read_dev_mode_cache(
                std_dirs_hash, 'transient-stdschema.pickle')

        if schema is None:
            schema = s_schema.Schema()
            for modname in s_schema.STD_LIB + ('stdgraphql',):
                schema = s_std.load_std_module(schema, modname)

        if devmode.is_in_dev_mode():
            devmode.write_dev_mode_cache(
                schema, std_dirs_hash, 'transient-stdschema.pickle')

        _std_schema = schema

    return _std_schema
コード例 #17
0
    def __init__(self, *, server, loop, pg_addr, runstate_dir,
                 internal_runstate_dir, dbindex):

        self._server = server
        self._loop = loop
        self._pg_addr = pg_addr
        self._dbindex = dbindex
        self._runstate_dir = runstate_dir
        self._internal_runstate_dir = internal_runstate_dir

        self._devmode = devmode.is_in_dev_mode()

        self._compiler_manager = None
        self._serving = False
コード例 #18
0
def _load_std_schema():
    global _std_schema
    if _std_schema is None:
        std_dirs_hash = buildmeta.hash_dirs(s_std.CACHE_SRC_DIRS)
        schema = None

        if devmode.is_in_dev_mode():
            schema = buildmeta.read_data_cache(
                std_dirs_hash, 'transient-stdschema.pickle')

        if schema is None:
            schema = s_schema.FlatSchema()
            for modname in s_schema.STD_SOURCES:
                schema = s_std.load_std_module(schema, modname)
            schema, _ = s_std.make_schema_version(schema)
            schema, _ = s_std.make_global_schema_version(schema)

        if devmode.is_in_dev_mode():
            buildmeta.write_data_cache(
                schema, std_dirs_hash, 'transient-stdschema.pickle')

        _std_schema = schema

    return _std_schema
コード例 #19
0
def get_pg_config_path() -> pathlib.Path:
    if devmode.is_in_dev_mode():
        pg_config = _get_devmode_pg_config_path()
    else:
        try:
            pg_config = pathlib.Path(
                get_build_metadata_value('PG_CONFIG_PATH'))
        except MetadataError:
            pg_config = _get_devmode_pg_config_path()
        else:
            if not pg_config.is_file():
                raise MetadataError(
                    f'invalid pg_config path: {pg_config!r}: file does not '
                    f'exist or is not a regular file')

    return pg_config
コード例 #20
0
ファイル: buildmeta.py プロジェクト: ambv/edgedb
def get_version() -> verutils.Version:
    if devmode.is_in_dev_mode():
        if setuptools_scm is None:
            raise MetadataError(
                'cannot determine build version: no setuptools_scm module')
        version = setuptools_scm.get_version(
            root='../..',
            relative_to=__file__,
            version_scheme=scm_version_scheme,
        )
        version = verutils.parse_version(version)
    else:
        vertuple: List[Any] = list(get_build_metadata_value('VERSION'))
        vertuple[2] = verutils.VersionStage(vertuple[2])
        version = verutils.Version(*vertuple)

    return version
コード例 #21
0
ファイル: buildmeta.py プロジェクト: sauravshah31/edgedb
def get_version() -> verutils.Version:
    if devmode.is_in_dev_mode():
        root = pathlib.Path(__file__).parent.parent.parent.resolve()
        if setuptools_scm is None:
            raise MetadataError(
                'cannot determine build version: no setuptools_scm module')
        version = setuptools_scm.get_version(
            root=str(root),
            version_scheme=functools.partial(scm_version_scheme, root),
        )
        version = verutils.parse_version(version)
    else:
        vertuple: List[Any] = list(get_build_metadata_value('VERSION'))
        vertuple[2] = verutils.VersionStage(vertuple[2])
        version = verutils.Version(*vertuple)

    return version
コード例 #22
0
def get_version() -> Version:
    if devmode.is_in_dev_mode():
        if pkg_resources is None:
            raise MetadataError(
                'cannot determine build version: no pkg_resources module')
        if setuptools_scm is None:
            raise MetadataError(
                'cannot determine build version: no setuptools_scm module')
        version = setuptools_scm.get_version(
            root='../..', relative_to=__file__)
        pv = pkg_resources.parse_version(version)
        version = parse_version(pv)
    else:
        vertuple = list(get_build_metadata_value('VERSION'))
        vertuple[2] = VersionStage(vertuple[2])
        version = Version(*vertuple)

    return version
コード例 #23
0
 async def test(pgdata_path):
     async with tb.start_edgedb_server(
             auto_shutdown=True,
             max_allowed_connections=None,
             postgres_dsn=
             f'postgres:///?user=postgres&host={pgdata_path}',
             reset_auth=True,
             runstate_dir=None
             if devmode.is_in_dev_mode() else pgdata_path,
     ) as sd:
         con = await sd.connect()
         try:
             max_connections = await con.query_one(
                 'SELECT cfg::SystemConfig.__pg_max_connections LIMIT 1'
             )  # TODO: remove LIMIT 1 after #2402
             self.assertEqual(int(max_connections), actual)
         finally:
             await con.aclose()
コード例 #24
0
async def _init_stdlib(cluster, conn, testmode):
    data_dir = pathlib.Path(cluster.get_data_dir())
    in_dev_mode = devmode.is_in_dev_mode()

    cache_hit = False
    sql_text = None

    cluster_schema_cache = data_dir / 'stdschema.pickle'

    if in_dev_mode:
        schema_cache = 'backend-stdschema.pickle'
        script_cache = 'backend-stdinitsql.pickle'
        testmode_flag = 'backend-stdtestmode.pickle'

        src_hash = devmode.hash_dirs(CACHE_SRC_DIRS)

        cached_testmode = devmode.read_dev_mode_cache(src_hash, testmode_flag)

        if cached_testmode is not None and cached_testmode == testmode:
            sql_text = devmode.read_dev_mode_cache(src_hash, script_cache)

        if sql_text is not None:
            schema = devmode.read_dev_mode_cache(src_hash, schema_cache)

    if sql_text is None or schema is None:
        schema, sql_text = await _make_stdlib(testmode)
    else:
        cache_hit = True

    await _execute_ddl(conn, sql_text)

    if not cache_hit and in_dev_mode:
        devmode.write_dev_mode_cache(schema, src_hash, schema_cache)
        devmode.write_dev_mode_cache(sql_text, src_hash, script_cache)
        devmode.write_dev_mode_cache(testmode, src_hash, testmode_flag)

    with open(cluster_schema_cache, 'wb') as f:
        pickle.dump(schema, file=f, protocol=pickle.HIGHEST_PROTOCOL)

    await metaschema.generate_views(conn, schema)
    await metaschema.generate_support_views(conn, schema)

    return schema
コード例 #25
0
 async def test(pgdata_path, tenant):
     async with tb.start_edgedb_server(
             tenant_id=tenant,
             reset_auth=True,
             backend_dsn=
             f'postgres:///?user=postgres&host={pgdata_path}',
             runstate_dir=None
             if devmode.is_in_dev_mode() else pgdata_path,
     ) as sd:
         con = await sd.connect()
         try:
             await con.execute(f'CREATE DATABASE {tenant}')
             await con.execute(f'CREATE SUPERUSER ROLE {tenant}')
             databases = await con.query('SELECT sys::Database.name')
             self.assertEqual(set(databases), {'edgedb', tenant})
             roles = await con.query('SELECT sys::Role.name')
             self.assertEqual(set(roles), {'edgedb', tenant})
         finally:
             await con.aclose()
コード例 #26
0
async def _init_stdlib(cluster, conn, testmode, global_ids):
    in_dev_mode = devmode.is_in_dev_mode()

    specified_cache_dir = os.environ.get('_EDGEDB_WRITE_DATA_CACHE_TO')
    if specified_cache_dir:
        cache_dir = pathlib.Path(specified_cache_dir)
    else:
        cache_dir = None

    stdlib_cache = 'backend-stdlib.pickle'
    tpldbdump_cache = 'backend-tpldbdump.sql'
    src_hash = buildmeta.hash_dirs(
        buildmeta.get_cache_src_dirs(),
        extra_files=[__file__],
    )

    stdlib = buildmeta.read_data_cache(src_hash,
                                       stdlib_cache,
                                       source_dir=cache_dir)
    tpldbdump = buildmeta.read_data_cache(src_hash,
                                          tpldbdump_cache,
                                          source_dir=cache_dir,
                                          pickled=False)

    if stdlib is None:
        logger.info('Compiling the standard library...')
        stdlib = await _make_stdlib(in_dev_mode or testmode, global_ids)

    logger.info('Creating the necessary PostgreSQL extensions...')
    await metaschema.create_pg_extensions(conn)

    if tpldbdump is None:
        logger.info('Populating internal SQL structures...')
        await metaschema.bootstrap(conn)
        logger.info('Executing the standard library...')
        await _execute_ddl(conn, stdlib.sqltext)

        if in_dev_mode or specified_cache_dir:
            tpldbdump = cluster.dump_database(
                edbdef.EDGEDB_TEMPLATE_DB,
                exclude_schemas=['edgedbinstdata', 'edgedbext'],
            )

            # Excluding the "edgedbext" schema above apparently
            # doesn't apply to extensions created in that schema,
            # so we have to resort to commenting out extension
            # statements in the dump.
            tpldbdump = re.sub(
                rb'^(CREATE|COMMENT ON) EXTENSION.*$',
                rb'-- \g<0>',
                tpldbdump,
                flags=re.MULTILINE,
            )

            global_metadata = await conn.fetchval(f'''\
                SELECT edgedb.shobj_metadata(
                    (SELECT oid FROM pg_database
                     WHERE datname = {ql(edbdef.EDGEDB_TEMPLATE_DB)}),
                    'pg_database'
                )''')

            pl_block = dbops.PLTopBlock()

            dbops.SetMetadata(
                dbops.Database(name=edbdef.EDGEDB_TEMPLATE_DB),
                json.loads(global_metadata),
            ).generate(pl_block)

            tpldbdump += b'\n' + pl_block.to_string().encode('utf-8')

            buildmeta.write_data_cache(
                tpldbdump,
                src_hash,
                tpldbdump_cache,
                pickled=False,
                target_dir=cache_dir,
            )

            buildmeta.write_data_cache(
                stdlib,
                src_hash,
                stdlib_cache,
                target_dir=cache_dir,
            )
    else:
        logger.info('Initializing the standard library...')
        await metaschema._execute_sql_script(conn, tpldbdump.decode('utf-8'))

        # When we restore a database from a dump, OIDs for non-system
        # Postgres types might get skewed as they are not part of the dump.
        # A good example of that is `std::bigint` which is implemented as
        # a custom domain type. The OIDs are stored under
        # `schema::Object.backend_id` property and are injected into
        # array query arguments.
        #
        # The code below re-syncs backend_id properties of EdgeDB builtin
        # types with the actual OIDs in the DB.

        compiler = edbcompiler.new_compiler(
            std_schema=stdlib.stdschema,
            reflection_schema=stdlib.reflschema,
            schema_class_layout=stdlib.classlayout,
        )
        _, sql = compile_bootstrap_script(
            compiler,
            stdlib.reflschema,
            '''
            UPDATE schema::ScalarType
            FILTER .builtin AND NOT (.abstract ?? False)
            SET {
                backend_id := sys::_get_pg_type_for_scalar_type(.id)
            }
            ''',
            expected_cardinality_one=False,
            single_statement=True,
        )
        await conn.execute(sql)

    if not in_dev_mode and testmode:
        # Running tests on a production build.
        stdlib, testmode_sql = await _amend_stdlib(
            s_std.get_std_module_text('_testmode'),
            stdlib,
        )
        await conn.execute(testmode_sql)
        await metaschema.generate_support_views(
            conn,
            stdlib.reflschema,
        )

    # Make sure that schema backend_id properties are in sync with
    # the database.

    compiler = edbcompiler.new_compiler(
        std_schema=stdlib.stdschema,
        reflection_schema=stdlib.reflschema,
        schema_class_layout=stdlib.classlayout,
    )
    _, sql = compile_bootstrap_script(
        compiler,
        stdlib.reflschema,
        '''
        SELECT schema::ScalarType {
            id,
            backend_id,
        } FILTER .builtin AND NOT (.abstract ?? False);
        ''',
        expected_cardinality_one=False,
        single_statement=True,
    )
    schema = stdlib.stdschema
    typemap = await conn.fetchval(sql)
    for entry in json.loads(typemap):
        t = schema.get_by_id(uuidgen.UUID(entry['id']))
        schema = t.set_field_value(schema, 'backend_id', entry['backend_id'])

    stdlib = stdlib._replace(stdschema=schema)

    await _store_static_bin_cache(
        cluster,
        'stdschema',
        pickle.dumps(schema, protocol=pickle.HIGHEST_PROTOCOL),
    )

    await _store_static_bin_cache(
        cluster,
        'reflschema',
        pickle.dumps(stdlib.reflschema, protocol=pickle.HIGHEST_PROTOCOL),
    )

    await _store_static_bin_cache(
        cluster,
        'global_schema',
        pickle.dumps(stdlib.global_schema, protocol=pickle.HIGHEST_PROTOCOL),
    )

    await _store_static_bin_cache(
        cluster,
        'classlayout',
        pickle.dumps(stdlib.classlayout, protocol=pickle.HIGHEST_PROTOCOL),
    )

    await _store_static_text_cache(
        cluster,
        'local_intro_query',
        stdlib.local_intro_query,
    )

    await _store_static_text_cache(
        cluster,
        'global_intro_query',
        stdlib.global_intro_query,
    )

    await metaschema.generate_support_views(conn, stdlib.reflschema)
    await metaschema.generate_support_functions(conn, stdlib.reflschema)

    compiler = edbcompiler.new_compiler(
        std_schema=schema,
        reflection_schema=stdlib.reflschema,
        schema_class_layout=stdlib.classlayout,
    )

    await metaschema.generate_more_support_functions(conn, compiler,
                                                     stdlib.reflschema,
                                                     testmode)

    return stdlib, compiler
コード例 #27
0
def run_server(args: ServerConfig):
    ver = buildmeta.get_version()

    if devmode.is_in_dev_mode():
        logger.info(f'EdgeDB server ({ver}) starting in DEV mode.')
    else:
        logger.info(f'EdgeDB server ({ver}) starting.')

    _init_parsers()

    pg_cluster_init_by_us = False
    pg_cluster_started_by_us = False

    if args.data_dir:
        cluster = pgcluster.get_local_pg_cluster(args.data_dir)
        default_runstate_dir = cluster.get_data_dir()
        cluster.set_connection_params(
            pgconnparams.ConnectionParameters(
                user='******',
                database='template1',
            ), )
    elif args.postgres_dsn:
        cluster = pgcluster.get_remote_pg_cluster(args.postgres_dsn)
        default_runstate_dir = None
    else:
        # This should have been checked by main() already,
        # but be extra careful.
        abort('Neither the data directory nor the remote Postgres DSN '
              'are specified')

    try:
        pg_cluster_init_by_us = cluster.ensure_initialized()

        cluster_status = cluster.get_status()

        specified_runstate_dir: Optional[pathlib.Path]
        if args.runstate_dir:
            specified_runstate_dir = args.runstate_dir
        elif args.bootstrap:
            # When bootstrapping a new EdgeDB instance it is often necessary
            # to avoid using the main runstate dir due to lack of permissions,
            # possibility of conflict with another running instance, etc.
            # The --bootstrap mode is also often runs unattended, i.e.
            # as a post-install hook during package installation.
            specified_runstate_dir = default_runstate_dir
        else:
            specified_runstate_dir = None

        runstate_dir_mgr = _ensure_runstate_dir(
            default_runstate_dir,
            specified_runstate_dir,
        )

        with runstate_dir_mgr as runstate_dir, \
                _internal_state_dir(runstate_dir) as internal_runstate_dir:

            if cluster_status == 'stopped':
                cluster.start(port=edgedb_cluster.find_available_port())
                pg_cluster_started_by_us = True

            elif cluster_status != 'running':
                abort('Could not start database cluster in %s', args.data_dir)

            need_cluster_restart = _init_cluster(cluster, args)

            if need_cluster_restart and pg_cluster_started_by_us:
                logger.info('Restarting server to reload configuration...')
                cluster_port = cluster.get_connection_spec()['port']
                cluster.stop()
                cluster.start(port=cluster_port)

            if not args.bootstrap:
                if args.data_dir:
                    cluster.set_connection_params(
                        pgconnparams.ConnectionParameters(
                            user=defines.EDGEDB_SUPERUSER,
                            database=defines.EDGEDB_SUPERUSER_DB,
                        ), )

                _run_server(cluster, args, runstate_dir, internal_runstate_dir)

    except BaseException:
        if pg_cluster_init_by_us and not _server_initialized:
            logger.warning('server bootstrap did not complete successfully, '
                           'removing the data directory')
            if cluster.get_status() == 'running':
                cluster.stop()
            cluster.destroy()
        raise

    finally:
        if args.temp_dir:
            if cluster.get_status() == 'running':
                cluster.stop()
            cluster.destroy()

        elif pg_cluster_started_by_us:
            cluster.stop()
コード例 #28
0
def parse_args(**kwargs: Any):
    kwargs['bind_addresses'] = kwargs.pop('bind_address')

    if kwargs['echo_runtime_info']:
        warnings.warn(
            "The `--echo-runtime-info` option is deprecated, use "
            "`--emit-server-status` instead.",
            DeprecationWarning,
        )

    if kwargs['bootstrap']:
        warnings.warn(
            "Option `--bootstrap` is deprecated, use `--bootstrap-only`",
            DeprecationWarning,
        )
        kwargs['bootstrap_only'] = True

    kwargs.pop('bootstrap', False)

    if kwargs['default_database_user']:
        if kwargs['default_database_user'] == 'edgedb':
            warnings.warn(
                "Option `--default-database-user` is deprecated."
                " Role `edgedb` is always created and"
                " no role named after unix user is created any more.",
                DeprecationWarning,
            )
        else:
            warnings.warn(
                "Option `--default-database-user` is deprecated."
                " Please create the role explicitly.",
                DeprecationWarning,
            )

    if kwargs['default_database']:
        if kwargs['default_database'] == 'edgedb':
            warnings.warn(
                "Option `--default-database` is deprecated."
                " Database `edgedb` is always created and"
                " no database named after unix user is created any more.",
                DeprecationWarning,
            )
        else:
            warnings.warn(
                "Option `--default-database` is deprecated."
                " Please create the database explicitly.",
                DeprecationWarning,
            )

    if kwargs['auto_shutdown']:
        warnings.warn(
            "The `--auto-shutdown` option is deprecated, use "
            "`--auto-shutdown-after` instead.",
            DeprecationWarning,
        )
        if kwargs['auto_shutdown_after'] < 0:
            kwargs['auto_shutdown_after'] = 0

    del kwargs['auto_shutdown']

    if kwargs['postgres_dsn']:
        warnings.warn(
            "The `--postgres-dsn` option is deprecated, use "
            "`--backend-dsn` instead.",
            DeprecationWarning,
        )
        if not kwargs['backend_dsn']:
            kwargs['backend_dsn'] = kwargs['postgres_dsn']

    del kwargs['postgres_dsn']

    if kwargs['generate_self_signed_cert']:
        warnings.warn(
            "The `--generate-self-signed-cert` option is deprecated, use "
            "`--tls-cert-mode=generate_self_signed` instead.",
            DeprecationWarning,
        )
        if kwargs['tls_cert_mode'] == 'default':
            kwargs['tls_cert_mode'] = 'generate_self_signed'

    del kwargs['generate_self_signed_cert']

    if os.environ.get('EDGEDB_SERVER_ALLOW_INSECURE_BINARY_CLIENTS') == "1":
        if kwargs['binary_endpoint_security'] == "tls":
            abort("The value of deprecated "
                  "EDGEDB_SERVER_ALLOW_INSECURE_BINARY_CLIENTS environment "
                  "variable disagrees with --binary-endpoint-security")
        else:
            if kwargs['binary_endpoint_security'] == "default":
                warnings.warn(
                    "EDGEDB_SERVER_ALLOW_INSECURE_BINARY_CLIENTS is "
                    "deprecated. Use EDGEDB_SERVER_BINARY_ENDPOINT_SECURITY "
                    "instead.",
                    DeprecationWarning,
                )
            kwargs['binary_endpoint_security'] = 'optional'

    if os.environ.get('EDGEDB_SERVER_ALLOW_INSECURE_HTTP_CLIENTS') == "1":
        if kwargs['http_endpoint_security'] == "tls":
            abort("The value of deprecated "
                  "EDGEDB_SERVER_ALLOW_INSECURE_HTTP_CLIENTS environment "
                  "variable disagrees with --http-endpoint-security")
        else:
            if kwargs['http_endpoint_security'] == "default":
                warnings.warn(
                    "EDGEDB_SERVER_ALLOW_INSECURE_BINARY_CLIENTS is "
                    "deprecated. Use EDGEDB_SERVER_BINARY_ENDPOINT_SECURITY "
                    "instead.",
                    DeprecationWarning,
                )
            kwargs['http_endpoint_security'] = 'optional'

    if kwargs['security'] == 'default':
        if devmode.is_in_dev_mode():
            kwargs['security'] = 'insecure_dev_mode'
        else:
            kwargs['security'] = 'strict'

    if kwargs['security'] == 'insecure_dev_mode':
        if kwargs['http_endpoint_security'] == 'default':
            kwargs['http_endpoint_security'] = 'optional'
        if not kwargs['default_auth_method']:
            kwargs['default_auth_method'] = 'Trust'
        if kwargs['tls_cert_mode'] == 'default':
            kwargs['tls_cert_mode'] = 'generate_self_signed'
    elif not kwargs['default_auth_method']:
        kwargs['default_auth_method'] = 'SCRAM'

    if kwargs['binary_endpoint_security'] == 'default':
        kwargs['binary_endpoint_security'] = 'tls'

    if kwargs['http_endpoint_security'] == 'default':
        kwargs['http_endpoint_security'] = 'tls'

    if kwargs['tls_cert_mode'] == 'default':
        kwargs['tls_cert_mode'] = 'require_file'

    kwargs['security'] = ServerSecurityMode(kwargs['security'])
    kwargs['binary_endpoint_security'] = ServerEndpointSecurityMode(
        kwargs['binary_endpoint_security'])
    kwargs['http_endpoint_security'] = ServerEndpointSecurityMode(
        kwargs['http_endpoint_security'])
    kwargs['tls_cert_mode'] = ServerTlsCertMode(kwargs['tls_cert_mode'])
    kwargs['default_auth_method'] = ServerAuthMethod(
        kwargs['default_auth_method'])
    kwargs['compiler_pool_mode'] = CompilerPoolMode(
        kwargs['compiler_pool_mode'])
    if kwargs['compiler_pool_size'] is None:
        if kwargs['compiler_pool_mode'] == CompilerPoolMode.Remote:
            # this reflects to a local semaphore to control concurrency,
            # 2 means this is a small EdgeDB instance that could only issue
            # at max 2 concurrent compile requests at a time.
            kwargs['compiler_pool_size'] = 2
        else:
            kwargs['compiler_pool_size'] = compute_default_compiler_pool_size()
    if kwargs['compiler_pool_mode'] == CompilerPoolMode.Remote:
        if kwargs['compiler_pool_addr'] is None:
            kwargs['compiler_pool_addr'] = (
                "localhost", defines.EDGEDB_REMOTE_COMPILER_PORT)
    elif kwargs['compiler_pool_addr'] is not None:
        abort('--compiler-pool-addr is only meaningful '
              'under --compiler-pool-mode=remote')

    if kwargs['temp_dir']:
        if kwargs['data_dir']:
            abort('--temp-dir is incompatible with --data-dir/-D')
        if kwargs['runstate_dir']:
            abort('--temp-dir is incompatible with --runstate-dir')
        if kwargs['backend_dsn']:
            abort('--temp-dir is incompatible with --backend-dsn')
        kwargs['data_dir'] = kwargs['runstate_dir'] = pathlib.Path(
            tempfile.mkdtemp())
    else:
        if not kwargs['data_dir']:
            if kwargs['backend_dsn']:
                pass
            elif devmode.is_in_dev_mode():
                data_dir = devmode.get_dev_mode_data_dir()
                if not data_dir.parent.exists():
                    data_dir.parent.mkdir(exist_ok=True, parents=True)

                kwargs["data_dir"] = data_dir
            else:
                abort('Please specify the instance data directory '
                      'using the -D argument or the address of a remote '
                      'backend cluster using the --backend-dsn argument')
        elif kwargs['backend_dsn']:
            abort('The -D and --backend-dsn options are mutually exclusive.')

    if kwargs['tls_key_file'] and not kwargs['tls_cert_file']:
        abort('When --tls-key-file is set, --tls-cert-file must also be set.')

    if kwargs['tls_cert_file'] and not kwargs['tls_key_file']:
        abort('When --tls-cert-file is set, --tls-key-file must also be set.')

    self_signing = kwargs['tls_cert_mode'] is ServerTlsCertMode.SelfSigned

    if not kwargs['tls_cert_file']:
        if kwargs['data_dir']:
            tls_cert_file = kwargs['data_dir'] / TLS_CERT_FILE_NAME
            tls_key_file = kwargs['data_dir'] / TLS_KEY_FILE_NAME
        elif self_signing:
            tls_cert_file = pathlib.Path('<runstate>') / TLS_CERT_FILE_NAME
            tls_key_file = pathlib.Path('<runstate>') / TLS_KEY_FILE_NAME
        else:
            abort(
                "no TLS certificate specified and certificate auto-generation"
                " has not been requested; see help for --tls-cert-mode",
                exit_code=10,
            )
        kwargs['tls_cert_file'] = tls_cert_file
        kwargs['tls_key_file'] = tls_key_file

    if not kwargs['bootstrap_only'] and not self_signing:
        if not kwargs['tls_cert_file'].exists():
            abort(
                f"TLS certificate file \"{kwargs['tls_cert_file']}\""
                " does not exist and certificate auto-generation has not been"
                " requested; see help for --tls-cert-mode",
                exit_code=10,
            )

    if (kwargs['tls_cert_file'] and kwargs['tls_cert_file'].exists()
            and not kwargs['tls_cert_file'].is_file()):
        abort(f"TLS certificate file \"{kwargs['tls_cert_file']}\""
              " is not a regular file")

    if (kwargs['tls_key_file'] and kwargs['tls_key_file'].exists()
            and not kwargs['tls_key_file'].is_file()):
        abort(f"TLS private key file \"{kwargs['tls_key_file']}\""
              " is not a regular file")

    if kwargs['log_level']:
        kwargs['log_level'] = kwargs['log_level'].lower()[0]

    bootstrap_script_text: Optional[str]
    if kwargs['bootstrap_script']:
        with open(kwargs['bootstrap_script']) as f:
            bootstrap_script_text = f.read()
    elif kwargs['bootstrap_command']:
        bootstrap_script_text = kwargs['bootstrap_command']
    else:
        bootstrap_script_text = None

    if bootstrap_script_text is None:
        startup_script = None
    else:
        startup_script = StartupScript(
            text=bootstrap_script_text,
            database=(kwargs['default_database']
                      or defines.EDGEDB_SUPERUSER_DB),
            user=(kwargs['default_database_user'] or defines.EDGEDB_SUPERUSER),
        )

    status_sinks = []

    if status_sink_addrs := kwargs['emit_server_status']:
        for status_sink_addr in status_sink_addrs:
            if status_sink_addr.startswith('file://'):
                status_sink = _status_sink_file(
                    status_sink_addr[len('file://'):])
            elif status_sink_addr.startswith('fd://'):
                fileno_str = status_sink_addr[len('fd://'):]
                try:
                    fileno = int(fileno_str)
                except ValueError:
                    abort(f'invalid file descriptor number in '
                          f'--emit-server-status: {fileno_str!r}')

                status_sink = _status_sink_fd(fileno)
            elif m := re.match(r'^(\w+)://', status_sink_addr):
                abort(
                    f'unsupported destination scheme in --emit-server-status: '
                    f'{m.group(1)}')
            else:
                # Assume it's a file.
                status_sink = _status_sink_file(status_sink_addr)

            status_sinks.append(status_sink)
コード例 #29
0
def get_runstate_path(data_dir: os.PathLike) -> os.PathLike:
    if devmode.is_in_dev_mode():
        return data_dir
    else:
        return pathlib.Path(get_build_metadata_value('RUNSTATE_DIR'))
コード例 #30
0
ファイル: buildmeta.py プロジェクト: sthagen/edgedb
def get_shared_data_dir_path() -> pathlib.Path:
    if devmode.is_in_dev_mode():
        return devmode.get_dev_mode_cache_dir()  # type: ignore[return-value]
    else:
        return pathlib.Path(get_build_metadata_value('SHARED_DATA_DIR'))