Esempio n. 1
0
def _interpret_wrong_object_type(code, schema, err_details, hint):
    if err_details.message == 'covariance error':
        ptr = schema.get_by_id(uuidgen.UUID(err_details.column_name))
        wrong_obj = schema.get_by_id(uuidgen.UUID(err_details.table_name))

        vn = ptr.get_verbosename(schema, with_parent=True)
        return errors.InvalidLinkTargetError(
            f"invalid target for {vn}: '{wrong_obj.get_name(schema)}'"
            f" (expecting '{ptr.get_target(schema).get_name(schema)}')")

    return errors.InternalServerError(err_details.message)
Esempio n. 2
0
def get_object_from_backend_name(schema, metaclass, name, *, aspect=None):

    if issubclass(metaclass, s_objtypes.ObjectType):
        table_name = name[1]
        obj_id = uuidgen.UUID(table_name)
        return schema.get_by_id(obj_id)

    elif issubclass(metaclass, s_pointers.Pointer):
        obj_id = uuidgen.UUID(name)
        return schema.get_by_id(obj_id)

    else:
        raise ValueError(
            f'cannot determine object from backend name for {metaclass!r}')
Esempio n. 3
0
 def replace(r):
     type_id = uuidgen.UUID(r.group('id'))
     stype = schema.get_by_id(type_id, None)
     if stype:
         return f'{r.group("p")} {stype.get_displayname(schema)!r}'
     else:
         return f'{r.group("p")} {r.group("v")}'
Esempio n. 4
0
def _parse_expression(val: Dict[str, Any]) -> s_expr.Expression:
    refids = frozenset(uuidgen.UUID(r) for r in val['refs'])
    return s_expr.Expression(text=val['text'],
                             refs=s_obj.ObjectSet(
                                 refids,
                                 _private_init=True,
                             ))
Esempio n. 5
0
async def _init_stdlib(cluster, conn, testmode, global_ids):
    in_dev_mode = devmode.is_in_dev_mode()

    specified_cache_dir = os.environ.get('_EDGEDB_WRITE_DATA_CACHE_TO')
    if specified_cache_dir:
        cache_dir = pathlib.Path(specified_cache_dir)
    else:
        cache_dir = None

    stdlib_cache = 'backend-stdlib.pickle'
    tpldbdump_cache = 'backend-tpldbdump.sql'
    src_hash = buildmeta.hash_dirs(
        buildmeta.get_cache_src_dirs(),
        extra_files=[__file__],
    )

    stdlib = buildmeta.read_data_cache(src_hash,
                                       stdlib_cache,
                                       source_dir=cache_dir)
    tpldbdump = buildmeta.read_data_cache(src_hash,
                                          tpldbdump_cache,
                                          source_dir=cache_dir,
                                          pickled=False)

    if stdlib is None:
        logger.info('Compiling the standard library...')
        stdlib = await _make_stdlib(in_dev_mode or testmode, global_ids)

    logger.info('Creating the necessary PostgreSQL extensions...')
    await metaschema.create_pg_extensions(conn)

    if tpldbdump is None:
        logger.info('Populating internal SQL structures...')
        await metaschema.bootstrap(conn)
        logger.info('Executing the standard library...')
        await _execute_ddl(conn, stdlib.sqltext)

        if in_dev_mode or specified_cache_dir:
            tpldbdump = cluster.dump_database(
                edbdef.EDGEDB_TEMPLATE_DB,
                exclude_schemas=['edgedbinstdata', 'edgedbext'],
            )

            # Excluding the "edgedbext" schema above apparently
            # doesn't apply to extensions created in that schema,
            # so we have to resort to commenting out extension
            # statements in the dump.
            tpldbdump = re.sub(
                rb'^(CREATE|COMMENT ON) EXTENSION.*$',
                rb'-- \g<0>',
                tpldbdump,
                flags=re.MULTILINE,
            )

            global_metadata = await conn.fetchval(f'''\
                SELECT edgedb.shobj_metadata(
                    (SELECT oid FROM pg_database
                     WHERE datname = {ql(edbdef.EDGEDB_TEMPLATE_DB)}),
                    'pg_database'
                )''')

            pl_block = dbops.PLTopBlock()

            dbops.SetMetadata(
                dbops.Database(name=edbdef.EDGEDB_TEMPLATE_DB),
                json.loads(global_metadata),
            ).generate(pl_block)

            tpldbdump += b'\n' + pl_block.to_string().encode('utf-8')

            buildmeta.write_data_cache(
                tpldbdump,
                src_hash,
                tpldbdump_cache,
                pickled=False,
                target_dir=cache_dir,
            )

            buildmeta.write_data_cache(
                stdlib,
                src_hash,
                stdlib_cache,
                target_dir=cache_dir,
            )
    else:
        logger.info('Initializing the standard library...')
        await metaschema._execute_sql_script(conn, tpldbdump.decode('utf-8'))

        # When we restore a database from a dump, OIDs for non-system
        # Postgres types might get skewed as they are not part of the dump.
        # A good example of that is `std::bigint` which is implemented as
        # a custom domain type. The OIDs are stored under
        # `schema::Object.backend_id` property and are injected into
        # array query arguments.
        #
        # The code below re-syncs backend_id properties of EdgeDB builtin
        # types with the actual OIDs in the DB.

        compiler = edbcompiler.new_compiler(
            std_schema=stdlib.stdschema,
            reflection_schema=stdlib.reflschema,
            schema_class_layout=stdlib.classlayout,
        )
        _, sql = compile_bootstrap_script(
            compiler,
            stdlib.reflschema,
            '''
            UPDATE schema::ScalarType
            FILTER .builtin AND NOT (.abstract ?? False)
            SET {
                backend_id := sys::_get_pg_type_for_scalar_type(.id)
            }
            ''',
            expected_cardinality_one=False,
            single_statement=True,
        )
        await conn.execute(sql)

    if not in_dev_mode and testmode:
        # Running tests on a production build.
        stdlib, testmode_sql = await _amend_stdlib(
            s_std.get_std_module_text('_testmode'),
            stdlib,
        )
        await conn.execute(testmode_sql)
        await metaschema.generate_support_views(
            conn,
            stdlib.reflschema,
        )

    # Make sure that schema backend_id properties are in sync with
    # the database.

    compiler = edbcompiler.new_compiler(
        std_schema=stdlib.stdschema,
        reflection_schema=stdlib.reflschema,
        schema_class_layout=stdlib.classlayout,
    )
    _, sql = compile_bootstrap_script(
        compiler,
        stdlib.reflschema,
        '''
        SELECT schema::ScalarType {
            id,
            backend_id,
        } FILTER .builtin AND NOT (.abstract ?? False);
        ''',
        expected_cardinality_one=False,
        single_statement=True,
    )
    schema = stdlib.stdschema
    typemap = await conn.fetchval(sql)
    for entry in json.loads(typemap):
        t = schema.get_by_id(uuidgen.UUID(entry['id']))
        schema = t.set_field_value(schema, 'backend_id', entry['backend_id'])

    stdlib = stdlib._replace(stdschema=schema)

    await _store_static_bin_cache(
        cluster,
        'stdschema',
        pickle.dumps(schema, protocol=pickle.HIGHEST_PROTOCOL),
    )

    await _store_static_bin_cache(
        cluster,
        'reflschema',
        pickle.dumps(stdlib.reflschema, protocol=pickle.HIGHEST_PROTOCOL),
    )

    await _store_static_bin_cache(
        cluster,
        'global_schema',
        pickle.dumps(stdlib.global_schema, protocol=pickle.HIGHEST_PROTOCOL),
    )

    await _store_static_bin_cache(
        cluster,
        'classlayout',
        pickle.dumps(stdlib.classlayout, protocol=pickle.HIGHEST_PROTOCOL),
    )

    await _store_static_text_cache(
        cluster,
        'local_intro_query',
        stdlib.local_intro_query,
    )

    await _store_static_text_cache(
        cluster,
        'global_intro_query',
        stdlib.global_intro_query,
    )

    await metaschema.generate_support_views(conn, stdlib.reflschema)
    await metaschema.generate_support_functions(conn, stdlib.reflschema)

    compiler = edbcompiler.new_compiler(
        std_schema=schema,
        reflection_schema=stdlib.reflschema,
        schema_class_layout=stdlib.classlayout,
    )

    await metaschema.generate_more_support_functions(conn, compiler,
                                                     stdlib.reflschema,
                                                     testmode)

    return stdlib, compiler
Esempio n. 6
0
def parse_into(
    schema: s_schema.Schema,
    data: Sequence[str],
    schema_class_layout: Dict[Type[s_obj.Object], sr_struct.SchemaTypeLayout],
) -> s_schema.Schema:
    """Parse JSON-encoded schema objects and populate the schema with them.

    Args:
        schema:
            A schema instance to use as a starting point.
        data:
            A sequence of JSON-encoded schema object data as returned
            by an introspection query.
        schema_class_layout:
            A mapping describing schema class layout in the reflection,
            as returned from
            :func:`schema.reflection.structure.generate_structure`.

    Returns:
        A schema instance including objects encoded in the provided
        JSON sequence.
    """

    id_to_type = {}
    id_to_data = {}
    name_to_id = {}
    shortname_to_id = collections.defaultdict(set)
    globalname_to_id = {}
    dict_of_dicts: Callable[
        [],
        Dict[Tuple[Type[s_obj.Object], str], Dict[uuid.UUID, None]],
    ] = functools.partial(collections.defaultdict, dict)  # type: ignore
    refs_to: Dict[
        uuid.UUID,
        Dict[Tuple[Type[s_obj.Object], str], Dict[uuid.UUID, None]]
    ] = collections.defaultdict(dict_of_dicts)

    objects: Dict[uuid.UUID, Tuple[s_obj.Object, Dict[str, Any]]] = {}

    for entry_json in data:
        entry = json.loads(entry_json)
        _, _, clsname = entry['_tname'].rpartition('::')
        mcls = s_obj.ObjectMeta.get_schema_metaclass(clsname)
        if mcls is None:
            raise ValueError(
                f'unexpected type in schema reflection: {clsname}')
        objid = uuidgen.UUID(entry['id'])
        objects[objid] = (mcls._create_from_id(objid), entry)

    refdict_updates = {}

    for objid, (obj, entry) in objects.items():
        mcls = type(obj)
        name = entry['name__internal']
        layout = schema_class_layout[mcls]

        if isinstance(obj, s_obj.QualifiedObject):
            name = s_name.Name(name)
            name_to_id[name] = objid
        else:
            globalname_to_id[mcls, name] = objid

        if isinstance(obj, (s_func.Function, s_oper.Operator)):
            shortname = mcls.get_shortname_static(name)
            shortname_to_id[mcls, shortname].add(objid)

        id_to_type[objid] = obj

        objdata: Dict[str, Any] = {}
        val: Any

        for k, v in entry.items():
            desc = layout.get(k)
            if desc is None:
                continue

            fn = desc.fieldname

            if desc.storage is not None:
                if v is None:
                    pass
                elif desc.storage.ptrkind == 'link':
                    refid = uuidgen.UUID(v['id'])
                    newobj = objects.get(refid)
                    if newobj is not None:
                        val = newobj[0]
                    else:
                        val = schema.get_by_id(refid)
                    objdata[fn] = val
                    refs_to[val.id][mcls, fn][objid] = None

                elif desc.storage.ptrkind == 'multi link':
                    ftype = mcls.get_field(fn).type
                    if issubclass(ftype, s_obj.ObjectDict):
                        refids = ftype._container(
                            uuidgen.UUID(e['value']) for e in v)
                        val = ftype(refids, _private_init=True)
                        val._keys = tuple(e['name'] for e in v)
                    else:
                        refids = ftype._container(
                            uuidgen.UUID(e['id']) for e in v)
                        val = ftype(refids, _private_init=True)
                    objdata[fn] = val
                    for refid in refids:
                        refs_to[refid][mcls, fn][objid] = None

                elif desc.storage.shadow_ptrkind:
                    val = entry[f'{k}__internal']
                    ftype = mcls.get_field(fn).type
                    if val is not None and type(val) is not ftype:
                        if issubclass(ftype, s_expr.Expression):
                            val = _parse_expression(val)
                            for refid in val.refs.ids(schema):
                                refs_to[refid][mcls, fn][objid] = None
                        elif issubclass(ftype, s_expr.ExpressionList):
                            exprs = []
                            for e_dict in val:
                                e = _parse_expression(e_dict)
                                assert e.refs is not None
                                for refid in e.refs.ids(schema):
                                    refs_to[refid][mcls, fn][objid] = None
                                exprs.append(e)
                            val = ftype(exprs)
                        else:
                            val = ftype(val)
                    objdata[fn] = val

                else:
                    ftype = mcls.get_field(fn).type
                    if type(v) is not ftype:
                        objdata[fn] = ftype(v)
                    else:
                        objdata[fn] = v

            elif desc.is_refdict:
                ftype = mcls.get_field(fn).type
                refids = ftype._container(uuidgen.UUID(e['id']) for e in v)
                for refid in refids:
                    refs_to[refid][mcls, fn][objid] = None

                val = ftype(refids, _private_init=True)
                objdata[fn] = val
                if desc.properties:
                    for e_dict in v:
                        refdict_updates[uuidgen.UUID(e_dict['id'])] = {
                            p: pv for p in desc.properties
                            if (pv := e_dict[f'@{p}']) is not None
                        }

        id_to_data[objid] = immutables.Map(objdata)

    for objid, updates in refdict_updates.items():
        if updates:
            id_to_data[objid] = id_to_data[objid].update(updates)

    with schema._refs_to.mutate() as mm:
        for referred_id, refdata in refs_to.items():
            try:
                refs = mm[referred_id]
            except KeyError:
                refs = immutables.Map((
                    (k, immutables.Map(r)) for k, r in refdata.items()
                ))
            else:
                refs_update = {}
                for k, referrers in refdata.items():
                    try:
                        rt = refs[k]
                    except KeyError:
                        rt = immutables.Map(referrers)
                    else:
                        rt = rt.update(referrers)
                    refs_update[k] = rt

                refs = refs.update(refs_update)

            mm[referred_id] = refs

    schema = schema._replace(
        id_to_type=schema._id_to_type.update(id_to_type),
        id_to_data=schema._id_to_data.update(id_to_data),
        name_to_id=schema._name_to_id.update(name_to_id),
        shortname_to_id=schema._shortname_to_id.update(
            (k, frozenset(v)) for k, v in shortname_to_id.items()
        ),
        globalname_to_id=schema._globalname_to_id.update(globalname_to_id),
        refs_to=mm.finish(),
    )

    return schema
Esempio n. 7
0
async def _init_stdlib(cluster, conn, testmode, global_ids):
    in_dev_mode = devmode.is_in_dev_mode()

    specified_cache_dir = os.environ.get('_EDGEDB_WRITE_DATA_CACHE_TO')
    if specified_cache_dir:
        cache_dir = pathlib.Path(specified_cache_dir)
    else:
        cache_dir = None

    stdlib_cache = 'backend-stdlib.pickle'
    tpldbdump_cache = 'backend-tpldbdump.sql'
    src_hash = buildmeta.hash_dirs(CACHE_SRC_DIRS)
    stdlib = buildmeta.read_data_cache(src_hash,
                                       stdlib_cache,
                                       source_dir=cache_dir)
    tpldbdump = buildmeta.read_data_cache(src_hash,
                                          tpldbdump_cache,
                                          source_dir=cache_dir,
                                          pickled=False)

    if stdlib is None:
        stdlib = await _make_stdlib(in_dev_mode or testmode, global_ids)
        cache_hit = False
    else:
        cache_hit = True

    if tpldbdump is None:
        await _ensure_meta_schema(conn)
        await _execute_ddl(conn, stdlib.sqltext)

        if in_dev_mode or specified_cache_dir:
            tpldbdump = cluster.dump_database(edbdef.EDGEDB_TEMPLATE_DB,
                                              exclude_schema='edgedbinstdata')
            buildmeta.write_data_cache(
                tpldbdump,
                src_hash,
                tpldbdump_cache,
                pickled=False,
                target_dir=cache_dir,
            )
    else:
        cluster.restore_database(edbdef.EDGEDB_TEMPLATE_DB, tpldbdump)

        # When we restore a database from a dump, OIDs for non-system
        # Postgres types might get skewed as they are not part of the dump.
        # A good example of that is `std::bigint` which is implemented as
        # a custom domain type. The OIDs are stored under
        # `schema::Object.backend_id` property and are injected into
        # array query arguments.
        #
        # The code below re-syncs backend_id properties of EdgeDB builtin
        # types with the actual OIDs in the DB.

        compiler = edbcompiler.new_compiler(
            std_schema=stdlib.stdschema,
            reflection_schema=stdlib.reflschema,
            schema_class_layout=stdlib.classlayout,
            bootstrap_mode=True,
        )
        _, sql = compile_bootstrap_script(
            compiler,
            stdlib.reflschema,
            '''
            UPDATE schema::ScalarType
            FILTER .builtin AND NOT .is_abstract
            SET {
                backend_id := sys::_get_pg_type_for_scalar_type(.id)
            }
            ''',
            expected_cardinality_one=False,
            single_statement=True,
        )
        await conn.execute(sql)

    if not in_dev_mode and testmode:
        # Running tests on a production build.
        stdlib, testmode_sql = await _amend_stdlib(
            s_std.get_std_module_text('_testmode'),
            stdlib,
        )
        await conn.execute(testmode_sql)
        await metaschema.generate_support_views(
            cluster,
            conn,
            stdlib.reflschema,
        )

    # Make sure that schema backend_id properties are in sync with
    # the database.

    compiler = edbcompiler.new_compiler(
        std_schema=stdlib.stdschema,
        reflection_schema=stdlib.reflschema,
        schema_class_layout=stdlib.classlayout,
        bootstrap_mode=True,
    )
    _, sql = compile_bootstrap_script(
        compiler,
        stdlib.reflschema,
        '''
        SELECT schema::ScalarType {
            id,
            backend_id,
        } FILTER .builtin AND NOT .is_abstract;
        ''',
        expected_cardinality_one=False,
        single_statement=True,
    )
    schema = stdlib.stdschema
    typemap = await conn.fetchval(sql)
    for entry in json.loads(typemap):
        t = schema.get_by_id(uuidgen.UUID(entry['id']))
        schema = t.set_field_value(schema, 'backend_id', entry['backend_id'])

    stdlib = stdlib._replace(stdschema=schema)

    if not cache_hit and (in_dev_mode or specified_cache_dir):
        buildmeta.write_data_cache(
            stdlib,
            src_hash,
            stdlib_cache,
            target_dir=cache_dir,
        )

    await _store_static_bin_cache(
        cluster,
        'stdschema',
        pickle.dumps(schema, protocol=pickle.HIGHEST_PROTOCOL),
    )

    await _store_static_bin_cache(
        cluster,
        'reflschema',
        pickle.dumps(stdlib.reflschema, protocol=pickle.HIGHEST_PROTOCOL),
    )

    await _store_static_bin_cache(
        cluster,
        'classlayout',
        pickle.dumps(stdlib.classlayout, protocol=pickle.HIGHEST_PROTOCOL),
    )

    await _store_static_json_cache(
        cluster,
        'introquery',
        json.dumps(stdlib.introquery),
    )

    await metaschema.generate_support_views(cluster, conn, stdlib.reflschema)
    await metaschema.generate_support_functions(conn, stdlib.reflschema)

    compiler = edbcompiler.new_compiler(
        std_schema=schema,
        reflection_schema=stdlib.reflschema,
        schema_class_layout=stdlib.classlayout,
        bootstrap_mode=True,
    )

    return schema, stdlib.reflschema, compiler
Esempio n. 8
0
from edb.schema import types as s_types

from . import enums

_int32_packer = struct.Struct('!l').pack
_uint32_packer = struct.Struct('!L').pack
_uint16_packer = struct.Struct('!H').pack
_uint8_packer = struct.Struct('!B').pack

EMPTY_TUPLE_ID = s_obj.get_known_type_id('empty-tuple')
EMPTY_TUPLE_DESC = b'\x04' + EMPTY_TUPLE_ID.bytes + b'\x00\x00'

UUID_TYPE_ID = s_obj.get_known_type_id('std::uuid')
STR_TYPE_ID = s_obj.get_known_type_id('std::str')

NULL_TYPE_ID = uuidgen.UUID(b'\x00' * 16)
NULL_TYPE_DESC = b''

CTYPE_SET = b'\x00'
CTYPE_SHAPE = b'\x01'
CTYPE_BASE_SCALAR = b'\x02'
CTYPE_SCALAR = b'\x03'
CTYPE_TUPLE = b'\x04'
CTYPE_NAMEDTUPLE = b'\x05'
CTYPE_ARRAY = b'\x06'
CTYPE_ENUM = b'\x07'

CTYPE_ANNO_TYPENAME = b'\xff'

EMPTY_BYTEARRAY = bytearray()
Esempio n. 9
0
def interpret_backend_error(schema, fields):
    err_details = get_error_details(fields)
    hint = None
    details = None
    if err_details.detail_json:
        hint = err_details.detail_json.get('hint')

    # all generic errors are static and have been handled by this point

    if err_details.code == PGErrorCode.NotNullViolationError:
        colname = err_details.column_name
        if colname:
            if colname.startswith('??'):
                ptr_id, *_ = colname[2:].partition('_')
            else:
                ptr_id = colname
            pointer = common.get_object_from_backend_name(
                schema, s_pointers.Pointer, ptr_id)
            pname = pointer.get_verbosename(schema, with_parent=True)
        else:
            pname = None

        if pname is not None:
            if err_details.detail_json:
                object_id = err_details.detail_json.get('object_id')
                if object_id is not None:
                    details = f'Failing object id is {str(object_id)!r}.'

            return errors.MissingRequiredError(
                f'missing value for required {pname}',
                details=details,
                hint=hint,
            )
        else:
            return errors.InternalServerError(err_details.message)

    elif err_details.code in constraint_errors:
        error_type = None
        match = None

        for errtype, ere in constraint_res.items():
            m = ere.match(err_details.message)
            if m:
                error_type = errtype
                match = m
                break
        # no need for else clause since it would have been handled by
        # the static version

        if error_type == 'constraint':
            # similarly, if we're here it's because we have a constraint_id
            constraint_id, _, _ = err_details.constraint_name.rpartition(';')
            constraint_id = uuidgen.UUID(constraint_id)

            constraint = schema.get_by_id(constraint_id)

            return errors.ConstraintViolationError(
                constraint.format_error_message(schema))
        elif error_type == 'newconstraint':
            # If we're here, it means that we already validated that
            # schema_name, table_name and column_name all exist.
            tabname = (err_details.schema_name, err_details.table_name)
            source = common.get_object_from_backend_name(
                schema, s_objtypes.ObjectType, tabname)
            source_name = source.get_displayname(schema)
            pointer = common.get_object_from_backend_name(
                schema, s_pointers.Pointer, err_details.column_name)
            pointer_name = pointer.get_shortname(schema).name

            return errors.ConstraintViolationError(
                f'Existing {source_name}.{pointer_name} '
                f'values violate the new constraint')
        elif error_type == 'scalar':
            domain_name = match.group(1)
            stype_name = types.base_type_name_map_r.get(domain_name)
            if stype_name:
                msg = f'invalid value for scalar type {str(stype_name)!r}'
            else:
                msg = translate_pgtype(schema, err_details.message)
            return errors.InvalidValueError(msg)

    elif err_details.code == PGErrorCode.InvalidTextRepresentation:
        return errors.InvalidValueError(
            translate_pgtype(schema, err_details.message))

    elif err_details.code == PGErrorCode.NumericValueOutOfRange:
        return errors.NumericOutOfRangeError(
            translate_pgtype(schema, err_details.message))

    elif err_details.code in {
            PGErrorCode.InvalidDatetimeFormatError, PGErrorCode.DatetimeError
    }:
        return errors.InvalidValueError(translate_pgtype(
            schema, err_details.message),
                                        hint=hint)

    return errors.InternalServerError(err_details.message)
Esempio n. 10
0
def static_interpret_backend_error(fields):
    err_details = get_error_details(fields)
    # handle some generic errors if possible
    err = get_generic_exception_from_err_details(err_details)
    if err is not None:
        return err

    if err_details.code == PGErrorCode.NotNullViolationError:
        if err_details.table_name or err_details.column_name:
            return SchemaRequired

        else:
            return errors.InternalServerError(err_details.message)

    elif err_details.code in constraint_errors:
        source = pointer = None

        for errtype, ere in constraint_res.items():
            m = ere.match(err_details.message)
            if m:
                error_type = errtype
                break
        else:
            return errors.InternalServerError(err_details.message)

        if error_type == 'cardinality':
            return errors.CardinalityViolationError('cardinality violation',
                                                    source=source,
                                                    pointer=pointer)

        elif error_type == 'link_target':
            if err_details.detail_json:
                srcname = err_details.detail_json.get('source')
                ptrname = err_details.detail_json.get('pointer')
                target = err_details.detail_json.get('target')
                expected = err_details.detail_json.get('expected')

                if srcname and ptrname:
                    srcname = sn.QualName.from_string(srcname)
                    ptrname = sn.QualName.from_string(ptrname)
                    lname = '{}.{}'.format(srcname, ptrname.name)
                else:
                    lname = ''

                msg = (f'invalid target for link {lname!r}: {target!r} '
                       f'(expecting {expected!r})')

            else:
                msg = 'invalid target for link'

            return errors.UnknownLinkError(msg)

        elif error_type == 'link_target_del':
            return errors.ConstraintViolationError(err_details.message,
                                                   details=err_details.detail)

        elif error_type == 'constraint':
            if err_details.constraint_name is None:
                return errors.InternalServerError(err_details.message)

            constraint_id, _, _ = err_details.constraint_name.rpartition(';')

            try:
                constraint_id = uuidgen.UUID(constraint_id)
            except ValueError:
                return errors.InternalServerError(err_details.message)

            return SchemaRequired

        elif error_type == 'newconstraint':
            # We can reconstruct what went wrong from the schema_name,
            # table_name, and column_name. But we don't expect
            # constraint_name to be present (because the constraint is
            # not yet present in the schema?).
            if (err_details.schema_name and err_details.table_name
                    and err_details.column_name):
                return SchemaRequired

            else:
                return errors.InternalServerError(err_details.message)

        elif error_type == 'scalar':
            return SchemaRequired

        elif error_type == 'id':
            return errors.ConstraintViolationError(
                'unique link constraint violation')

    elif err_details.code in SCHEMA_CODES:
        if err_details.code == PGErrorCode.InvalidDatetimeFormatError:
            hint = None
            if err_details.detail_json:
                hint = err_details.detail_json.get('hint')

            if err_details.message.startswith('missing required time zone'):
                return errors.InvalidValueError(err_details.message, hint=hint)
            elif err_details.message.startswith('unexpected time zone'):
                return errors.InvalidValueError(err_details.message, hint=hint)

        return SchemaRequired

    elif err_details.code == PGErrorCode.InvalidParameterValue:
        return errors.InvalidValueError(
            err_details.message,
            details=err_details.detail if err_details.detail else None)

    elif err_details.code == PGErrorCode.WrongObjectType:
        return errors.InvalidValueError(
            err_details.message,
            details=err_details.detail if err_details.detail else None)

    elif err_details.code == PGErrorCode.DivisionByZeroError:
        return errors.DivisionByZeroError(err_details.message)

    elif err_details.code == PGErrorCode.ReadOnlySQLTransactionError:
        return errors.TransactionError(
            'cannot execute query in a read-only transaction')

    elif err_details.code == PGErrorCode.TransactionSerializationFailure:
        return errors.TransactionSerializationError(err_details.message)

    elif err_details.code == PGErrorCode.TransactionDeadlockDetected:
        return errors.TransactionDeadlockError(err_details.message)

    elif err_details.code == PGErrorCode.InvalidCatalogNameError:
        return errors.AuthenticationError(err_details.message)

    elif err_details.code == PGErrorCode.ObjectInUse:
        return errors.ExecutionError(err_details.message)

    return errors.InternalServerError(err_details.message)
Esempio n. 11
0
def interpret_backend_error(schema, fields):
    err_details = get_error_details(fields)
    hint = None
    details = None
    if err_details.detail_json:
        hint = err_details.detail_json.get('hint')

    # all generic errors are static and have been handled by this point

    if err_details.code == pgerrors.ERROR_NOT_NULL_VIOLATION:
        colname = err_details.column_name
        if colname:
            if colname.startswith('??'):
                ptr_id, *_ = colname[2:].partition('_')
            else:
                ptr_id = colname
            pointer = common.get_object_from_backend_name(
                schema, s_pointers.Pointer, ptr_id)
            pname = pointer.get_verbosename(schema, with_parent=True)
        else:
            pname = None

        if pname is not None:
            if err_details.detail_json:
                object_id = err_details.detail_json.get('object_id')
                if object_id is not None:
                    details = f'Failing object id is {str(object_id)!r}.'

            return errors.MissingRequiredError(
                f'missing value for required {pname}',
                details=details,
                hint=hint,
            )
        else:
            return errors.InternalServerError(err_details.message)

    elif err_details.code in constraint_errors:
        error_type = None
        match = None

        for errtype, ere in constraint_res.items():
            m = ere.match(err_details.message)
            if m:
                error_type = errtype
                match = m
                break
        # no need for else clause since it would have been handled by
        # the static version

        if error_type == 'constraint':
            # similarly, if we're here it's because we have a constraint_id
            constraint_id, _, _ = err_details.constraint_name.rpartition(';')
            constraint_id = uuidgen.UUID(constraint_id)

            constraint = schema.get_by_id(constraint_id)

            return errors.ConstraintViolationError(
                constraint.format_error_message(schema))
        elif error_type == 'newconstraint':
            # If we're here, it means that we already validated that
            # schema_name, table_name and column_name all exist.
            tabname = (err_details.schema_name, err_details.table_name)
            source = common.get_object_from_backend_name(
                schema, s_objtypes.ObjectType, tabname)
            source_name = source.get_displayname(schema)
            pointer = common.get_object_from_backend_name(
                schema, s_pointers.Pointer, err_details.column_name)
            pointer_name = pointer.get_shortname(schema).name

            return errors.ConstraintViolationError(
                f'Existing {source_name}.{pointer_name} '
                f'values violate the new constraint')
        elif error_type == 'scalar':
            domain_name = match.group(1)
            stype_name = types.base_type_name_map_r.get(domain_name)
            if stype_name:
                if match.group(2) in range_constraints:
                    msg = f'{str(stype_name)!r} value out of range'
                else:
                    msg = f'invalid value for scalar type {str(stype_name)!r}'
            else:
                msg = translate_pgtype(schema, err_details.message)
            return errors.InvalidValueError(msg)

    elif err_details.code == pgerrors.ERROR_INVALID_TEXT_REPRESENTATION:
        return errors.InvalidValueError(
            translate_pgtype(schema, err_details.message))

    elif err_details.code == pgerrors.ERROR_NUMERIC_VALUE_OUT_OF_RANGE:
        return errors.NumericOutOfRangeError(
            translate_pgtype(schema, err_details.message))

    elif err_details.code in {pgerrors.ERROR_INVALID_DATETIME_FORMAT,
                              pgerrors.ERROR_DATETIME_FIELD_OVERFLOW}:
        return errors.InvalidValueError(
            translate_pgtype(schema, err_details.message),
            hint=hint)

    elif (
        err_details.code == pgerrors.ERROR_WRONG_OBJECT_TYPE
        and err_details.message == 'covariance error'
    ):
        ptr = schema.get_by_id(uuidgen.UUID(err_details.column_name))
        wrong_obj = schema.get_by_id(uuidgen.UUID(err_details.table_name))

        vn = ptr.get_verbosename(schema, with_parent=True)
        return errors.InvalidLinkTargetError(
            f"invalid target for {vn}: '{wrong_obj.get_name(schema)}'"
            f" (expecting '{ptr.get_target(schema).get_name(schema)}')"
        )

    return errors.InternalServerError(err_details.message)
Esempio n. 12
0
def static_interpret_backend_error(fields):
    err_details = get_error_details(fields)
    # handle some generic errors if possible
    err = get_generic_exception_from_err_details(err_details)
    if err is not None:
        return err

    if err_details.code == pgerrors.ERROR_NOT_NULL_VIOLATION:
        if err_details.table_name or err_details.column_name:
            return SchemaRequired

        else:
            return errors.InternalServerError(err_details.message)

    elif err_details.code in constraint_errors:
        source = pointer = None

        for errtype, ere in constraint_res.items():
            m = ere.match(err_details.message)
            if m:
                error_type = errtype
                break
        else:
            return errors.InternalServerError(err_details.message)

        if error_type == 'cardinality':
            return errors.CardinalityViolationError(
                'cardinality violation',
                source=source, pointer=pointer)

        elif error_type == 'link_target':
            if err_details.detail_json:
                srcname = err_details.detail_json.get('source')
                ptrname = err_details.detail_json.get('pointer')
                target = err_details.detail_json.get('target')
                expected = err_details.detail_json.get('expected')

                if srcname and ptrname:
                    srcname = sn.QualName.from_string(srcname)
                    ptrname = sn.QualName.from_string(ptrname)
                    lname = '{}.{}'.format(srcname, ptrname.name)
                else:
                    lname = ''

                msg = (
                    f'invalid target for link {lname!r}: {target!r} '
                    f'(expecting {expected!r})'
                )

            else:
                msg = 'invalid target for link'

            return errors.UnknownLinkError(msg)

        elif error_type == 'link_target_del':
            return errors.ConstraintViolationError(
                err_details.message, details=err_details.detail)

        elif error_type == 'constraint':
            if err_details.constraint_name is None:
                return errors.InternalServerError(err_details.message)

            constraint_id, _, _ = err_details.constraint_name.rpartition(';')

            try:
                constraint_id = uuidgen.UUID(constraint_id)
            except ValueError:
                return errors.InternalServerError(err_details.message)

            return SchemaRequired

        elif error_type == 'newconstraint':
            # We can reconstruct what went wrong from the schema_name,
            # table_name, and column_name. But we don't expect
            # constraint_name to be present (because the constraint is
            # not yet present in the schema?).
            if (err_details.schema_name and err_details.table_name and
                    err_details.column_name):
                return SchemaRequired

            else:
                return errors.InternalServerError(err_details.message)

        elif error_type == 'scalar':
            return SchemaRequired

        elif error_type == 'id':
            return errors.ConstraintViolationError(
                'unique link constraint violation')

    elif err_details.code in SCHEMA_CODES:
        if err_details.code == pgerrors.ERROR_INVALID_DATETIME_FORMAT:
            hint = None
            if err_details.detail_json:
                hint = err_details.detail_json.get('hint')

            if err_details.message.startswith('missing required time zone'):
                return errors.InvalidValueError(err_details.message, hint=hint)
            elif err_details.message.startswith('unexpected time zone'):
                return errors.InvalidValueError(err_details.message, hint=hint)

        return SchemaRequired

    elif err_details.code == pgerrors.ERROR_INVALID_PARAMETER_VALUE:
        return errors.InvalidValueError(
            err_details.message,
            details=err_details.detail if err_details.detail else None
        )

    elif err_details.code == pgerrors.ERROR_WRONG_OBJECT_TYPE:
        if err_details.column_name:
            return SchemaRequired

        return errors.InvalidValueError(
            err_details.message,
            details=err_details.detail if err_details.detail else None
        )

    elif err_details.code == pgerrors.ERROR_DIVISION_BY_ZERO:
        return errors.DivisionByZeroError(err_details.message)

    elif err_details.code == pgerrors.ERROR_INTERVAL_FIELD_OVERFLOW:
        return errors.NumericOutOfRangeError(err_details.message)

    elif err_details.code == pgerrors.ERROR_READ_ONLY_SQL_TRANSACTION:
        return errors.TransactionError(
            'cannot execute query in a read-only transaction')

    elif err_details.code == pgerrors.ERROR_SERIALIZATION_FAILURE:
        return errors.TransactionSerializationError(err_details.message)

    elif err_details.code == pgerrors.ERROR_DEADLOCK_DETECTED:
        return errors.TransactionDeadlockError(err_details.message)

    elif err_details.code == pgerrors.ERROR_INVALID_CATALOG_NAME:
        return errors.UnknownDatabaseError(err_details.message)

    elif err_details.code == pgerrors.ERROR_OBJECT_IN_USE:
        return errors.ExecutionError(err_details.message)

    elif err_details.code == pgerrors.ERROR_DUPLICATE_DATABASE:
        return errors.DuplicateDatabaseDefinitionError(err_details.message)

    elif (
        err_details.code == pgerrors.ERROR_CARDINALITY_VIOLATION
        and err_details.constraint_name == 'std::assert_single'
    ):
        return errors.CardinalityViolationError(err_details.message)

    return errors.InternalServerError(err_details.message)
Esempio n. 13
0
def _interpret_constraint_errors(code, schema, err_details, hint):
    details = None
    if code == pgerrors.ERROR_NOT_NULL_VIOLATION:
        colname = err_details.column_name
        if colname:
            if colname.startswith('??'):
                ptr_id, *_ = colname[2:].partition('_')
            else:
                ptr_id = colname
            pointer = common.get_object_from_backend_name(
                schema, s_pointers.Pointer, ptr_id)
            pname = pointer.get_verbosename(schema, with_parent=True)
        else:
            pname = None

        if pname is not None:
            if err_details.detail_json:
                object_id = err_details.detail_json.get('object_id')
                if object_id is not None:
                    details = f'Failing object id is {str(object_id)!r}.'

            return errors.MissingRequiredError(
                f'missing value for required {pname}',
                details=details,
                hint=hint,
            )
        else:
            return errors.InternalServerError(err_details.message)

    error_type = None
    match = None

    for errtype, ere in constraint_res.items():
        m = ere.match(err_details.message)
        if m:
            error_type = errtype
            match = m
            break
    # no need for else clause since it would have been handled by
    # the static version

    if error_type == 'constraint':
        # similarly, if we're here it's because we have a constraint_id
        constraint_id, _, _ = err_details.constraint_name.rpartition(';')
        constraint_id = uuidgen.UUID(constraint_id)

        constraint = schema.get_by_id(constraint_id)

        msg = constraint.format_error(schema)
        subject = constraint.get_subject(schema)
        vname = subject.get_verbosename(schema, with_parent=True)
        subjtitle = f"value of {vname}"
        details = constraint.format_error_message(schema, subjtitle)
        return errors.ConstraintViolationError(msg, details=details)
    elif error_type == 'newconstraint':
        # If we're here, it means that we already validated that
        # schema_name, table_name and column_name all exist.
        tabname = (err_details.schema_name, err_details.table_name)
        source = common.get_object_from_backend_name(schema,
                                                     s_objtypes.ObjectType,
                                                     tabname)
        source_name = source.get_displayname(schema)
        pointer = common.get_object_from_backend_name(schema,
                                                      s_pointers.Pointer,
                                                      err_details.column_name)
        pointer_name = pointer.get_shortname(schema).name

        return errors.ConstraintViolationError(
            f'Existing {source_name}.{pointer_name} '
            f'values violate the new constraint')
    elif error_type == 'scalar':
        domain_name = match.group(1)
        stype_name = types.base_type_name_map_r.get(domain_name)
        if stype_name:
            if match.group(2) in range_constraints:
                msg = f'{str(stype_name)!r} value out of range'
            else:
                msg = f'invalid value for scalar type {str(stype_name)!r}'
        else:
            msg = translate_pgtype(schema, err_details.message)
        return errors.InvalidValueError(msg)

    return errors.InternalServerError(err_details.message)
Esempio n. 14
0
def _static_interpret_constraint_errors(code, err_details):
    if code == pgerrors.ERROR_NOT_NULL_VIOLATION:
        if err_details.table_name or err_details.column_name:
            return SchemaRequired
        else:
            return errors.InternalServerError(err_details.message)

    source = pointer = None

    for errtype, ere in constraint_res.items():
        m = ere.match(err_details.message)
        if m:
            error_type = errtype
            break
    else:
        return errors.InternalServerError(err_details.message)

    if error_type == 'cardinality':
        return errors.CardinalityViolationError('cardinality violation',
                                                source=source,
                                                pointer=pointer)

    elif error_type == 'link_target':
        if err_details.detail_json:
            srcname = err_details.detail_json.get('source')
            ptrname = err_details.detail_json.get('pointer')
            target = err_details.detail_json.get('target')
            expected = err_details.detail_json.get('expected')

            if srcname and ptrname:
                srcname = sn.QualName.from_string(srcname)
                ptrname = sn.QualName.from_string(ptrname)
                lname = '{}.{}'.format(srcname, ptrname.name)
            else:
                lname = ''

            msg = (f'invalid target for link {lname!r}: {target!r} '
                   f'(expecting {expected!r})')

        else:
            msg = 'invalid target for link'

        return errors.UnknownLinkError(msg)

    elif error_type == 'link_target_del':
        return errors.ConstraintViolationError(err_details.message,
                                               details=err_details.detail)

    elif error_type == 'constraint':
        if err_details.constraint_name is None:
            return errors.InternalServerError(err_details.message)

        constraint_id, _, _ = err_details.constraint_name.rpartition(';')

        try:
            constraint_id = uuidgen.UUID(constraint_id)
        except ValueError:
            return errors.InternalServerError(err_details.message)

        return SchemaRequired

    elif error_type == 'newconstraint':
        # We can reconstruct what went wrong from the schema_name,
        # table_name, and column_name. But we don't expect
        # constraint_name to be present (because the constraint is
        # not yet present in the schema?).
        if (err_details.schema_name and err_details.table_name
                and err_details.column_name):
            return SchemaRequired

        else:
            return errors.InternalServerError(err_details.message)

    elif error_type == 'scalar':
        return SchemaRequired

    elif error_type == 'id':
        return errors.ConstraintViolationError(
            'unique link constraint violation')
Esempio n. 15
0
async def _init_stdlib(
    ctx: BootstrapContext,
    testmode: bool,
    global_ids: Mapping[str, uuid.UUID],
) -> Tuple[StdlibBits, config.Spec, edbcompiler.Compiler]:
    in_dev_mode = devmode.is_in_dev_mode()
    conn = ctx.conn
    cluster = ctx.cluster

    specified_cache_dir = os.environ.get('_EDGEDB_WRITE_DATA_CACHE_TO')
    if not specified_cache_dir:
        cache_dir = None
    else:
        cache_dir = pathlib.Path(specified_cache_dir)

    stdlib_cache = f'backend-stdlib.pickle'
    tpldbdump_cache = f'backend-tpldbdump.sql'

    src_hash = buildmeta.hash_dirs(
        buildmeta.get_cache_src_dirs(), extra_files=[__file__],
    )

    stdlib = buildmeta.read_data_cache(
        src_hash, stdlib_cache, source_dir=cache_dir)
    tpldbdump = buildmeta.read_data_cache(
        src_hash, tpldbdump_cache, source_dir=cache_dir, pickled=False)

    if stdlib is None:
        logger.info('Compiling the standard library...')
        stdlib = await _make_stdlib(ctx, in_dev_mode or testmode, global_ids)

    logger.info('Creating the necessary PostgreSQL extensions...')
    await metaschema.create_pg_extensions(conn)

    config_spec = config.load_spec_from_schema(stdlib.stdschema)
    config.set_settings(config_spec)

    if tpldbdump is None:
        logger.info('Populating internal SQL structures...')
        await metaschema.bootstrap(conn, config_spec)
        logger.info('Executing the standard library...')
        await _execute_ddl(conn, stdlib.sqltext)

        if in_dev_mode or specified_cache_dir:
            tpl_db_name = edbdef.EDGEDB_TEMPLATE_DB
            tpl_pg_db_name = cluster.get_db_name(tpl_db_name)
            tpl_pg_db_name_dyn = (
                f"edgedb.get_database_backend_name({ql(tpl_db_name)})")
            tpldbdump = await cluster.dump_database(
                tpl_pg_db_name,
                exclude_schemas=['edgedbinstdata', 'edgedbext'],
                dump_object_owners=False,
            )

            # Excluding the "edgedbext" schema above apparently
            # doesn't apply to extensions created in that schema,
            # so we have to resort to commenting out extension
            # statements in the dump.
            tpldbdump = re.sub(
                rb'^(CREATE|COMMENT ON) EXTENSION.*$',
                rb'-- \g<0>',
                tpldbdump,
                flags=re.MULTILINE,
            )

            global_metadata = await conn.fetchval(
                f'SELECT edgedb.get_database_metadata({ql(tpl_db_name)})',
            )
            global_metadata = json.loads(global_metadata)

            pl_block = dbops.PLTopBlock()

            set_metadata_text = dbops.SetMetadata(
                dbops.Database(name='__dummy_placeholder_database__'),
                global_metadata,
            ).code(pl_block)
            set_metadata_text = set_metadata_text.replace(
                '__dummy_placeholder_database__',
                f"' || quote_ident({tpl_pg_db_name_dyn}) || '",
            )

            set_single_db_metadata_text = dbops.SetSingleDBMetadata(
                edbdef.EDGEDB_TEMPLATE_DB, global_metadata
            ).code(pl_block)

            pl_block.add_command(textwrap.dedent(f"""\
                IF (edgedb.get_backend_capabilities()
                    & {int(params.BackendCapabilities.CREATE_DATABASE)}) != 0
                THEN
                {textwrap.indent(set_metadata_text, '    ')}
                ELSE
                {textwrap.indent(set_single_db_metadata_text, '    ')}
                END IF
                """))

            text = pl_block.to_string()

            tpldbdump += b'\n' + text.encode('utf-8')

            buildmeta.write_data_cache(
                tpldbdump,
                src_hash,
                tpldbdump_cache,
                pickled=False,
                target_dir=cache_dir,
            )

            buildmeta.write_data_cache(
                stdlib,
                src_hash,
                stdlib_cache,
                target_dir=cache_dir,
            )
    else:
        logger.info('Initializing the standard library...')
        await metaschema._execute_sql_script(conn, tpldbdump.decode('utf-8'))
        # Restore the search_path as the dump might have altered it.
        await conn.execute(
            "SELECT pg_catalog.set_config('search_path', 'edgedb', false)")

    if not in_dev_mode and testmode:
        # Running tests on a production build.
        stdlib, testmode_sql = await _amend_stdlib(
            ctx,
            s_std.get_std_module_text(sn.UnqualName('_testmode')),
            stdlib,
        )
        await conn.execute(testmode_sql)
        # _testmode includes extra config settings, so make sure
        # those are picked up.
        config_spec = config.load_spec_from_schema(stdlib.stdschema)
        config.set_settings(config_spec)

    # Make sure that schema backend_id properties are in sync with
    # the database.

    compiler = edbcompiler.new_compiler(
        std_schema=stdlib.stdschema,
        reflection_schema=stdlib.reflschema,
        schema_class_layout=stdlib.classlayout,
    )
    _, sql = compile_bootstrap_script(
        compiler,
        stdlib.reflschema,
        '''
        SELECT schema::ScalarType {
            id,
            backend_id,
        } FILTER .builtin AND NOT (.abstract ?? False);
        ''',
        expected_cardinality_one=False,
        single_statement=True,
    )
    schema = stdlib.stdschema
    typemap = await conn.fetchval(sql)
    for entry in json.loads(typemap):
        t = schema.get_by_id(uuidgen.UUID(entry['id']))
        schema = t.set_field_value(
            schema, 'backend_id', entry['backend_id'])

    stdlib = stdlib._replace(stdschema=schema)

    await _store_static_bin_cache(
        ctx,
        'stdschema',
        pickle.dumps(schema, protocol=pickle.HIGHEST_PROTOCOL),
    )

    await _store_static_bin_cache(
        ctx,
        'reflschema',
        pickle.dumps(stdlib.reflschema, protocol=pickle.HIGHEST_PROTOCOL),
    )

    await _store_static_bin_cache(
        ctx,
        'global_schema',
        pickle.dumps(stdlib.global_schema, protocol=pickle.HIGHEST_PROTOCOL),
    )

    await _store_static_bin_cache(
        ctx,
        'classlayout',
        pickle.dumps(stdlib.classlayout, protocol=pickle.HIGHEST_PROTOCOL),
    )

    await _store_static_text_cache(
        ctx,
        'local_intro_query',
        stdlib.local_intro_query,
    )

    await _store_static_text_cache(
        ctx,
        'global_intro_query',
        stdlib.global_intro_query,
    )

    await metaschema.generate_support_views(
        conn, stdlib.reflschema, cluster.get_runtime_params()
    )
    await metaschema.generate_support_functions(conn, stdlib.reflschema)

    compiler = edbcompiler.new_compiler(
        std_schema=schema,
        reflection_schema=stdlib.reflschema,
        schema_class_layout=stdlib.classlayout,
    )

    await metaschema.generate_more_support_functions(
        conn, compiler, stdlib.reflschema, testmode)

    if tpldbdump is not None:
        # When we restore a database from a dump, OIDs for non-system
        # Postgres types might get skewed as they are not part of the dump.
        # A good example of that is `std::bigint` which is implemented as
        # a custom domain type. The OIDs are stored under
        # `schema::Object.backend_id` property and are injected into
        # array query arguments.
        #
        # The code below re-syncs backend_id properties of EdgeDB builtin
        # types with the actual OIDs in the DB.

        compiler = edbcompiler.new_compiler(
            std_schema=stdlib.stdschema,
            reflection_schema=stdlib.reflschema,
            schema_class_layout=stdlib.classlayout,
        )
        _, sql = compile_bootstrap_script(
            compiler,
            stdlib.reflschema,
            '''
            UPDATE schema::Type
            FILTER
                .builtin
                AND NOT (.abstract ?? False)
                AND schema::Type IS schema::ScalarType | schema::Tuple
            SET {
                backend_id := sys::_get_pg_type_for_edgedb_type(
                    .id,
                    <uuid>{}
                )
            }
            ''',
            expected_cardinality_one=False,
            single_statement=True,
        )
        await conn.execute(sql)

        _, sql = compile_bootstrap_script(
            compiler,
            stdlib.reflschema,
            '''
            UPDATE schema::Array
            FILTER
                .builtin
                AND NOT (.abstract ?? False)
            SET {
                backend_id := sys::_get_pg_type_for_edgedb_type(
                    .id,
                    .element_type.id,
                )
            }
            ''',
            expected_cardinality_one=False,
            single_statement=True,
        )
        await conn.execute(sql)

    await _store_static_json_cache(
        ctx,
        'configspec',
        config.spec_to_json(config_spec),
    )

    return stdlib, config_spec, compiler