示例#1
0
    def _validate_enum_change(
        self,
        stype: s_types.Type,
        cur_labels: Sequence[str],
        new_labels: Sequence[str],
        schema: s_schema.Schema,
        context: sd.CommandContext,
    ) -> s_schema.Schema:
        new_set = set(new_labels)
        if len(new_set) != len(new_labels):
            raise errors.SchemaError(f'enums cannot contain duplicate values')

        cur_set = set(cur_labels)
        if cur_set - new_set:
            raise errors.SchemaError(
                f'cannot remove labels from an enumeration type')

        for cur_label, new_label in zip(cur_labels, new_labels):
            if cur_label != new_label:
                raise errors.SchemaError(
                    f'cannot change the existing labels in an enumeration '
                    f'type, only appending new labels is allowed')

        self.set_attribute_value('enum_values', new_labels)
        schema = stype.set_field_value(schema, 'enum_values', new_labels)
        return schema
示例#2
0
    async def read_indexes(self, schema, only_modules, exclude_modules):
        pg_index_data = await introspection.tables.fetch_indexes(
            self.connection, schema_pattern='edgedb%', index_pattern='%_index')

        pg_indexes = set()
        for row in pg_index_data:
            table_name = tuple(row['table_name'])
            for pg_index in self.interpret_indexes(table_name, row['indexes']):
                pg_indexes.add(
                    (table_name, pg_index.get_metadata('schemaname')))

        ds = datasources.schema.indexes
        indexes = await ds.fetch(self.connection,
                                 modules=only_modules,
                                 exclude_modules=exclude_modules)

        basemap = {}

        for index_data in indexes:
            subj = schema.get(index_data['subject_name'])
            subj_table_name = common.get_backend_name(schema,
                                                      subj,
                                                      catenate=False)
            index_name = sn.Name(index_data['name'])

            if index_data['is_local']:
                try:
                    pg_indexes.remove((subj_table_name, index_name))
                except KeyError:
                    raise errors.SchemaError(
                        'internal metadata inconsistency',
                        details=(
                            f'Index {index_name} is defined in schema, but '
                            f'the corresponding PostgreSQL index is missing.'
                        )) from None

            schema, index = s_indexes.Index.create_in_schema(
                schema,
                id=index_data['id'],
                name=index_name,
                subject=subj,
                is_local=index_data['is_local'],
                inherited_fields=self._unpack_inherited_fields(
                    index_data['inherited_fields']),
                expr=self.unpack_expr(index_data['expr'], schema))

            schema = subj.add_index(schema, index)

            basemap[index] = (index_data['bases'], index_data['ancestors'])

        for scls, (basenames, ancestors) in basemap.items():
            schema = self._set_reflist(schema, scls, 'bases', basenames)
            schema = self._set_reflist(schema, scls, 'ancestors', ancestors)

        if pg_indexes and not only_modules and not exclude_modules:
            details = f'Extraneous PostgreSQL indexes found: {pg_indexes!r}'
            raise errors.SchemaError('internal metadata inconsistency',
                                     details=details)

        return schema
示例#3
0
    def _validate_enum_change(
        self,
        stype: s_types.Type,
        cur_labels: Sequence[str],
        new_labels: Sequence[str],
        schema: s_schema.Schema,
        context: sd.CommandContext,
    ) -> s_schema.Schema:
        if len(set(new_labels)) != len(new_labels):
            raise errors.SchemaError(
                f'enum labels are not unique')

        cur_set = set(cur_labels)

        if cur_set - set(new_labels):
            raise errors.SchemaError(
                f'cannot remove labels from an enumeration type')

        existing = [label for label in new_labels if label in cur_set]
        if existing != cur_labels:
            raise errors.SchemaError(
                f'cannot change the relative order of existing labels '
                f'in an enumeration type')

        self.set_attribute_value('enum_values', new_labels)
        schema = stype.set_field_value(schema, 'enum_values', new_labels)
        return schema
示例#4
0
    def apply(self, schema, context):
        scls = self.get_object(schema, context)
        self.scls = scls

        enum_values = scls.get_enum_values(schema)
        if enum_values:
            raise errors.UnsupportedFeatureError(
                f'altering enum composition is not supported')

            if self.removed_bases and not self.added_bases:
                raise errors.SchemaError(f'cannot DROP EXTENDING enum')

            all_bases = []

            for bases, pos in self.added_bases:
                if pos:
                    raise errors.SchemaError(
                        f'cannot add another enum as supertype '
                        f'use EXTENDING without position qualification')

                all_bases.extend(bases)

            if len(all_bases) > 1:
                raise errors.SchemaError(
                    f'cannot set more than one enum as supertype ')

            new_base = all_bases[0]
            new_values = new_base.elements

            schema = self._validate_enum_change(scls, enum_values, new_values,
                                                schema, context)

            return schema, scls
        else:
            return super().apply(self, schema, context)
示例#5
0
    def apply(
        self,
        schema: s_schema.Schema,
        context: sd.CommandContext,
    ) -> s_schema.Schema:
        scls = self.get_object(schema, context)
        self.scls = scls
        assert isinstance(scls, ScalarType)

        cur_labels = scls.get_enum_values(schema)

        if cur_labels:

            if self.removed_bases and not self.added_bases:
                raise errors.SchemaError(
                    f'cannot DROP EXTENDING enum')

            all_bases = []

            for bases, pos in self.added_bases:
                # Check that there aren't any non-enum bases.
                for base in bases:
                    if isinstance(base, AnonymousEnumTypeShell):
                        is_enum_base = True
                    elif isinstance(base, s_types.TypeShell):
                        is_enum_base = base.resolve(schema).is_enum(schema)
                    else:
                        is_enum_base = base.is_enum(schema)

                    if not is_enum_base:
                        raise errors.SchemaError(
                            f'cannot add another type as supertype, '
                            f'enumeration must be the only supertype specified'
                        )

                # Since all bases are enums at this point, error
                # messages only mention about enums.
                if pos:
                    raise errors.SchemaError(
                        f'cannot add another enum as supertype, '
                        f'use EXTENDING without position qualification')

                all_bases.extend(bases)

            if len(all_bases) > 1:
                raise errors.SchemaError(
                    f'cannot set more than one enum as supertype')

            new_base = all_bases[0]
            new_labels = new_base.elements

            schema = self._validate_enum_change(
                scls, cur_labels, new_labels, schema, context)

        else:
            schema = super().apply(schema, context)

        self.validate_scalar_bases(schema, context)
        return schema
示例#6
0
    def _normalize_ptr_default(self, expr, source, ptr, ptrdecl):
        module_aliases = {None: source.get_name(self._schema).module}

        ir, _, expr_text = qlutils.normalize_tree(
            expr,
            self._schema,
            modaliases=module_aliases,
            anchors={qlast.Source: source},
            singletons=[source])

        expr_type = ir.stype

        self._schema = ptr.set_field_value(self._schema, 'default', expr_text)

        if ptr.is_pure_computable(self._schema):
            # Pure computable without explicit target.
            # Fixup pointer target and target property.
            self._schema = ptr.set_field_value(self._schema, 'target',
                                               expr_type)

            if isinstance(ptr, s_links.Link):
                if not isinstance(expr_type, s_objtypes.ObjectType):
                    raise errors.InvalidLinkTargetError(
                        f'invalid link target, expected object type, got '
                        f'{expr_type.__class__.__name__}',
                        context=ptrdecl.expr.context)
            else:
                if not isinstance(expr_type,
                                  (s_scalars.ScalarType, s_types.Collection)):
                    raise errors.InvalidPropertyTargetError(
                        f'invalid property target, expected primitive type, '
                        f'got {expr_type.__class__.__name__}',
                        context=ptrdecl.expr.context)

            if isinstance(ptr, s_links.Link):
                tgt_prop = ptr.getptr(self._schema, 'target')
                self._schema = tgt_prop.set_field_value(
                    self._schema, 'target', expr_type)

            self._schema = ptr.set_field_value(self._schema, 'cardinality',
                                               ir.cardinality)

            if ptrdecl.cardinality is not ptr.get_cardinality(self._schema):
                if ptrdecl.cardinality is qlast.Cardinality.ONE:
                    raise errors.SchemaError(
                        f'computable expression possibly returns more than '
                        f'one value, but the {ptr.schema_class_displayname!r} '
                        f'is declared as "single"',
                        context=expr.context)

        if (not isinstance(expr_type, s_abc.Type)
                or (ptr.get_target(self._schema) is not None
                    and not expr_type.issubclass(
                        self._schema, ptr.get_target(self._schema)))):
            raise errors.SchemaError(
                'default value query must yield a single result of '
                'type {!r}'.format(
                    ptr.get_target(self._schema).get_name(self._schema)),
                context=expr.context)
示例#7
0
文件: schema.py 项目: yew1eb/edgedb
    def _update_obj_name(
        self,
        obj_id: uuid.UUID,
        scls: so.Object,
        old_name: Optional[str],
        new_name: Optional[str],
    ) -> Tuple[immu.Map[str, uuid.UUID], immu.Map[Tuple[
            Type[so.Object], str], FrozenSet[uuid.UUID]], immu.Map[Tuple[
                Type[so.Object], str], uuid.UUID], ]:
        name_to_id = self._name_to_id
        shortname_to_id = self._shortname_to_id
        globalname_to_id = self._globalname_to_id
        stype = type(scls)
        is_global = not issubclass(stype, so.QualifiedObject)

        has_sn_cache = issubclass(stype, (s_func.Function, s_oper.Operator))

        if old_name is not None:
            if is_global:
                globalname_to_id = globalname_to_id.delete((stype, old_name))
            else:
                name_to_id = name_to_id.delete(old_name)
            if has_sn_cache:
                old_shortname = sn.shortname_from_fullname(old_name)
                sn_key = (stype, old_shortname)

                new_ids = shortname_to_id[sn_key] - {obj_id}
                if new_ids:
                    shortname_to_id = shortname_to_id.set(sn_key, new_ids)
                else:
                    shortname_to_id = shortname_to_id.delete(sn_key)

        if new_name is not None:
            if is_global:
                key = (stype, new_name)
                if key in globalname_to_id:
                    raise errors.SchemaError(
                        f'{stype.__name__} {new_name!r} '
                        f'is already present in the schema')
                globalname_to_id = globalname_to_id.set(key, obj_id)
            else:
                if new_name in name_to_id:
                    raise errors.SchemaError(
                        f'name {new_name!r} is already in the schema')
                name_to_id = name_to_id.set(new_name, obj_id)

            if has_sn_cache:
                new_shortname = sn.shortname_from_fullname(new_name)
                sn_key = (stype, new_shortname)

                try:
                    ids = shortname_to_id[sn_key]
                except KeyError:
                    ids = frozenset()

                shortname_to_id = shortname_to_id.set(sn_key, ids | {obj_id})

        return name_to_id, shortname_to_id, globalname_to_id
示例#8
0
    def merge_targets(cls, schema, ptr, t1, t2, *, allow_contravariant=False):
        if t1 is t2:
            return schema, t1

        # When two pointers are merged, check target compatibility
        # and return a target that satisfies both specified targets.

        if (isinstance(t1, s_abc.ScalarType) !=
                isinstance(t2, s_abc.ScalarType)):
            # Mixing a property with a link.
            vnp = ptr.get_verbosename(schema, with_parent=True)
            vn = ptr.get_verbosename(schema)
            t1_vn = t1.get_verbosename(schema)
            t2_vn = t2.get_verbosename(schema)
            raise errors.SchemaError(
                f'cannot redefine {vnp} as {t2_vn}',
                details=f'{vn} is defined as a link to {t1_vn} in a '
                        f'parent type'
            )

        elif isinstance(t1, s_abc.ScalarType):
            # Targets are both scalars
            if t1 != t2:
                vnp = ptr.get_verbosename(schema, with_parent=True)
                vn = ptr.get_verbosename(schema)
                t1_vn = t1.get_verbosename(schema)
                t2_vn = t2.get_verbosename(schema)
                raise errors.SchemaError(
                    f'cannot redefine {vnp} as {t2_vn}',
                    details=f'{vn} is defined as {t1_vn} in a parent type, '
                            f'which is incompatible with {t2_vn} ',
                )

            return schema, t1

        else:
            if t2.issubclass(schema, t1):
                # The new target is a subclass of the current target, so
                # it is a more specific requirement.
                current_target = t2
            elif allow_contravariant and t1.issubclass(schema, t2):
                current_target = t1
            else:
                # The new target is not a subclass, of the previously seen
                # targets, which creates an unresolvable target requirement
                # conflict.
                vnp = ptr.get_verbosename(schema, with_parent=True)
                vn = ptr.get_verbosename(schema)
                t2_vn = t2.get_verbosename(schema)
                raise errors.SchemaError(
                    f'cannot redefine {vnp} as {t2_vn}',
                    details=(
                        f'{vn} targets {t2_vn} that is not related '
                        f'to a type found in this link in the parent type: '
                        f'{t1.get_displayname(schema)!r}.'))

            return schema, current_target
    def _get_bases(self, obj, decl):
        """Resolve object bases from the "extends" declaration."""
        bases = []
        enum_values = None

        if decl.extends:
            # Explicit inheritance
            has_enums = any(br.maintype.name == 'enum' and br.subtypes
                            for br in decl.extends)

            if has_enums:
                if not obj.is_scalar():
                    raise errors.SchemaError(
                        f'{obj.get_displayname(self._schema)} '
                        f'cannot be an enumerated type',
                        context=decl.context,
                    )

                if len(decl.extends) > 1:
                    raise errors.SchemaError(
                        f'invalid scalar type definition, enumeration must '
                        f'be the only supertype specified',
                        context=decl.extends[0].context,
                    )

                enum_values = [st.val.value for st in decl.extends[0].subtypes]

                bases = [self._schema.get('std::anyenum')]

            else:
                for base_ref in decl.extends:
                    base_name = self._get_ref_name(base_ref.maintype)

                    base = self._schema.get(base_name,
                                            type=obj.__class__,
                                            module_aliases=self._mod_aliases)
                    if base.get_is_final(self._schema):
                        msg = '{!r} is final and cannot be inherited ' \
                            'from'.format(base.get_name(self._schema))
                        raise errors.SchemaError(msg, context=decl)

                    bases.append(base)

        elif obj.get_name(self._schema) not in type(obj).get_root_classes():
            # Implicit inheritance from the default base class
            default_base_name = type(obj).get_default_base_name()
            if default_base_name is not None:
                default_base = self._schema.get(
                    default_base_name, module_aliases=self._mod_aliases)
                bases.append(default_base)

        return s_obj.ObjectList.create(self._schema, bases), enum_values
示例#10
0
    async def read_indexes(self, schema, only_modules, exclude_modules):
        pg_index_data = await introspection.tables.fetch_indexes(
            self.connection,
            schema_pattern='edgedb%', index_pattern='%_index')

        pg_indexes = set()
        for row in pg_index_data:
            table_name = tuple(row['table_name'])
            for pg_index in self.interpret_indexes(table_name, row['indexes']):
                pg_indexes.add(
                    (table_name, pg_index.get_metadata('schemaname'))
                )

        ds = datasources.schema.indexes
        indexes = await ds.fetch(
            self.connection, modules=only_modules,
            exclude_modules=exclude_modules)

        for index_data in indexes:
            subj = schema.get(index_data['subject_name'])
            subj_table_name = common.get_backend_name(
                schema, subj, catenate=False)
            index_name = sn.Name(index_data['name'])

            try:
                pg_indexes.remove((subj_table_name, index_name))
            except KeyError:
                raise errors.SchemaError(
                    'internal metadata inconsistency',
                    details=f'Index {index_name} is defined in schema, but'
                            f'the corresponding PostgreSQL index is missing.'
                ) from None

            schema, index = s_indexes.Index.create_in_schema(
                schema,
                id=index_data['id'],
                name=index_name,
                subject=subj,
                expr=s_expr.Expression(**index_data['expr']))

            schema = subj.add_index(schema, index)

        if pg_indexes and not only_modules and not exclude_modules:
            details = f'Extraneous PostgreSQL indexes found: {pg_indexes!r}'
            raise errors.SchemaError(
                'internal metadata inconsistency',
                details=details)

        return schema
示例#11
0
def _get_local_obj(
    refname: s_name.QualName,
    tracer_type: Type[qltracer.NamedObject],
    sourcectx: Optional[parsing.ParserContext],
    *,
    ctx: LayoutTraceContext,
) -> Optional[qltracer.NamedObject]:

    obj = ctx.objects.get(refname)

    if isinstance(obj, s_pseudo.PseudoType):
        raise errors.SchemaError(
            f'invalid type: {obj.get_verbosename(ctx.schema)} is a generic '
            f'type and they are not supported in user-defined schema',
            context=sourcectx,
        )

    elif obj is not None and not isinstance(obj, tracer_type):
        obj_type = TRACER_TO_REAL_TYPE_MAP[type(obj)]
        real_type = TRACER_TO_REAL_TYPE_MAP[tracer_type]
        raise errors.InvalidReferenceError(
            f'{str(refname)!r} exists, but is '
            f'{english.add_a(obj_type.get_schema_class_displayname())}, '
            f'not {english.add_a(real_type.get_schema_class_displayname())}',
            context=sourcectx,
        )

    return obj
示例#12
0
    def validate_scalar_ancestors(
        self,
        ancestors: Sequence[so.SubclassableObject],
        schema: s_schema.Schema,
        context: sd.CommandContext,
    ) -> None:
        concrete_ancestors = {
            ancestor
            for ancestor in ancestors if not ancestor.get_abstract(schema)
        }
        # Filter out anything that has a subclass relation with
        # every other concrete ancestor. This lets us allow chains
        # of concrete scalar types while prohibiting diamonds (for
        # example if X <: A, B <: int64 where A, B are concrete).
        # (If we wanted to allow diamonds, we could instead filter out
        # anything that has concrete bases.)
        concrete_ancestors = {
            c1
            for c1 in concrete_ancestors
            if not all(c1 == c2 or c1.issubclass(schema, c2)
                       or c2.issubclass(schema, c1)
                       for c2 in concrete_ancestors)
        }

        if len(concrete_ancestors) > 1:
            raise errors.SchemaError(
                f'scalar type may not have more than '
                f'one concrete base type',
                context=self.source_context,
            )
示例#13
0
def merge_cardinality(target: so.Object, sources: typing.List[so.Object],
                      field_name: str, *, schema) -> object:
    current = None
    current_from = None

    target_source = target.get_source(schema)

    for source in [target] + list(sources):
        nextval = source.get_explicit_field_value(schema, field_name, None)
        if nextval is not None:
            if current is None:
                current = nextval
                current_from = source
            else:
                if current is not nextval:
                    current_from_source = current_from.get_source(schema)
                    source_source = source.get_source(schema)

                    tgt_repr = (f'{target_source.get_displayname(schema)}.'
                                f'{target.get_displayname(schema)}')
                    cf_repr = (
                        f'{current_from_source.get_displayname(schema)}.'
                        f'{current_from.get_displayname(schema)}')
                    other_repr = (f'{source_source.get_displayname(schema)}.'
                                  f'{source.get_displayname(schema)}')

                    raise errors.SchemaError(
                        f'cannot redefine the target cardinality of '
                        f'{tgt_repr!r}: it is defined '
                        f'as {current.as_ptr_qual()!r} in {cf_repr!r} and '
                        f'as {nextval.as_ptr_qual()!r} in {other_repr!r}.')

        return current
示例#14
0
    def from_subtypes(cls, schema, subtypes, typemods=None, *, name=None):
        if len(subtypes) != 1:
            raise errors.SchemaError(
                f'unexpected number of subtypes, expecting 1: {subtypes!r}')
        stype = subtypes[0]

        if isinstance(stype, Array):
            raise errors.UnsupportedFeatureError(
                f'nested arrays are not supported')

        if typemods:
            dimensions = typemods[0]
        else:
            dimensions = []

        if isinstance(stype, cls):
            # There is no array of arrays, only multi-dimensional arrays.
            element_type = stype.element_type
            if not dimensions:
                dimensions.append(-1)
            dimensions += stype.dimensions
        else:
            element_type = stype
            dimensions = []

        return cls.create(schema,
                          element_type=element_type,
                          dimensions=dimensions,
                          name=name)
示例#15
0
def merge_cardinality(target: Pointer, sources: List[so.Object],
                      field_name: str, *, schema: s_schema.Schema) -> object:
    current = None
    current_from = None

    for source in [target] + list(sources):
        nextval = source.get_explicit_field_value(schema, field_name, None)
        if nextval is not None:
            if current is None:
                current = nextval
                current_from = source
            elif current is not nextval:
                tgt_repr = target.get_verbosename(
                    schema, with_parent=True)
                cf_repr = current_from.get_verbosename(
                    schema, with_parent=True)
                other_repr = source.get_verbosename(
                    schema, with_parent=True)

                raise errors.SchemaError(
                    f'cannot redefine the target cardinality of '
                    f'{tgt_repr}: it is defined '
                    f'as {current.as_ptr_qual()!r} in {cf_repr} and '
                    f'as {nextval.as_ptr_qual()!r} in {other_repr}.'
                )

    return current
示例#16
0
    def _classbases_from_ast(cls, schema, astnode, context):
        classname = cls._classname_from_ast(schema, astnode, context)

        modaliases = context.modaliases

        bases = so.ObjectList.create(
            schema,
            [utils.ast_to_typeref(b, modaliases=modaliases, schema=schema)
             for b in getattr(astnode, 'bases', None) or []]
        )

        for base in bases.objects(schema):
            if base.is_type() and base.contains_any():
                base_type_name = base.get_displayname(schema)
                raise errors.SchemaError(
                    f"{base_type_name!r} cannot be a parent type")

        mcls = cls.get_schema_metaclass()
        if not bases and classname not in mcls.get_root_classes():
            default_base = mcls.get_default_base_name()

            if default_base is not None and classname != default_base:
                default_base = schema.get(default_base)
                bases = so.ObjectList.create(
                    schema,
                    [utils.reduce_to_typeref(schema, default_base)])

        return bases
示例#17
0
    def _delete_finalize(self, schema, context, scls):
        ref_strs = []

        if not context.canonical:
            refs = schema.get_referrers(self.scls)
            if refs:
                for ref in refs:
                    if (not context.is_deleting(ref)
                            and ref.is_blocking_ref(schema, scls)):
                        ref_strs.append(
                            ref.get_verbosename(schema, with_parent=True))

            if ref_strs:
                vn = self.scls.get_verbosename(schema, with_parent=True)
                dn = self.scls.get_displayname(schema)
                detail = '; '.join(f'{ref_str} depends on {dn}'
                                   for ref_str in ref_strs)
                raise errors.SchemaError(
                    f'cannot drop {vn} because '
                    f'other objects in the schema depend on it',
                    details=detail,
                )

        schema = schema.delete(scls)
        return schema
示例#18
0
    def _set_obj_field(self, obj_id, field, value):
        try:
            data = self._id_to_data[obj_id]
        except KeyError:
            err = (f'cannot set {field!r} value: item {str(obj_id)!r} '
                   f'is not present in the schema {self!r}')
            raise errors.SchemaError(err) from None

        name_to_id = None
        shortname_to_id = None
        globalname_to_id = None
        if field == 'name':
            old_name = data.get('name')
            name_to_id, shortname_to_id, globalname_to_id = (
                self._update_obj_name(obj_id, self._id_to_type[obj_id],
                                      old_name, value))

        new_data = data.set(field, value)
        id_to_data = self._id_to_data.set(obj_id, new_data)
        scls = self._id_to_type[obj_id]

        if field in data:
            orig_field_data = {field: data[field]}
        else:
            orig_field_data = {}

        refs_to = self._update_refs_to(scls, orig_field_data, {field: value})

        return self._replace(name_to_id=name_to_id,
                             shortname_to_id=shortname_to_id,
                             globalname_to_id=globalname_to_id,
                             id_to_data=id_to_data,
                             refs_to=refs_to)
示例#19
0
    def _parse_field_setters(
            self, scls, field_decls: typing.List[qlast.Field]):
        fields = type(scls).get_fields()
        updates = {}

        for field_decl in field_decls:
            fieldname = field_decl.name.name

            attrfield = fields.get(fieldname)
            if attrfield is None or not attrfield.allow_ddl_set:
                raise errors.SchemaError(
                    f'unexpected field {fieldname}',
                    context=field_decl.context)

            if issubclass(attrfield.type, s_expr.Expression):
                updates[fieldname] = s_expr.Expression.from_ast(
                    field_decl.value,
                    self._schema,
                    self._mod_aliases,
                )

            else:
                updates[fieldname] = qlcompiler.evaluate_ast_to_python_val(
                    field_decl.value, self._schema,
                    modaliases=self._mod_aliases)

        if updates:
            self._schema = scls.update(self._schema, updates)
示例#20
0
    def _get_bases(self, obj, decl):
        """Resolve object bases from the "extends" declaration."""
        bases = []

        if decl.extends:
            # Explicit inheritance
            for base_ref in decl.extends:
                base_name = self._get_ref_name(base_ref.maintype)

                base = self._schema.get(base_name,
                                        type=obj.__class__,
                                        module_aliases=self._mod_aliases)
                if base.get_is_final(self._schema):
                    msg = '{!r} is final and cannot be inherited ' \
                          'from'.format(base.get_name(self._schema))
                    raise errors.SchemaError(msg, context=decl)

                bases.append(base)

        elif obj.get_name(self._schema) not in type(obj).get_root_classes():
            # Implicit inheritance from the default base class
            default_base_name = type(obj).get_default_base_name()
            if default_base_name is not None:
                default_base = self._schema.get(
                    default_base_name, module_aliases=self._mod_aliases)
                bases.append(default_base)

        return s_obj.ObjectList.create(self._schema, bases)
示例#21
0
def _merge_lineage(schema, obj, lineage):
    result = []

    while True:
        nonempty = [line for line in lineage if line]
        if not nonempty:
            return result

        for line in nonempty:
            candidate = line[0]
            tails = [
                m for m in nonempty if id(candidate) in {id(c)
                                                         for c in m[1:]}
            ]
            if not tails:
                break
        else:
            name = obj.get_verbosename(schema)
            raise errors.SchemaError(
                f"Could not find consistent ancestor order for {name}")

        result.append(candidate)

        for line in nonempty:
            if line[0] is candidate:
                del line[0]

    return result
示例#22
0
def _get_bases(decl, *, ctx):
    """Resolve object bases from the "extends" declaration."""
    bases = []

    if decl.bases:
        # Explicit inheritance
        has_enums = any(br.maintype.name == "enum" and br.subtypes
                        for br in decl.bases)

        if has_enums:
            if len(decl.bases) > 1:
                raise errors.SchemaError(
                    f"invalid scalar type definition, enumeration must "
                    f"be the only supertype specified",
                    context=decl.bases[0].context,
                )

            bases = [s_name.Name("std::anyenum")]

        else:
            for base_ref in decl.bases:
                base_name = ctx.get_ref_name(base_ref.maintype)
                bases.append(base_name)

    return bases
示例#23
0
    def _add(self, id, scls, data) -> 'Schema':
        name = data['name']

        if name in self._name_to_id:
            raise errors.SchemaError(
                f'{type(scls).__name__} {name!r} is already present '
                f'in the schema {self!r}')

        data = immu.Map(data)

        name_to_id, shortname_to_id = self._update_obj_name(
            id, scls, None, name)

        updates = dict(
            id_to_data=self._id_to_data.set(id, data),
            id_to_type=self._id_to_type.set(id, scls),
            name_to_id=name_to_id,
            shortname_to_id=shortname_to_id,
            refs_to=self._update_refs_to(scls, None, data),
        )

        if isinstance(scls, s_modules.Module):
            updates['modules'] = self._modules.set(name, id)
        elif name.module not in self._modules:
            raise errors.UnknownModuleError(
                f'module {name.module!r} is not in this schema')

        return self._replace(**updates)
示例#24
0
    def _validate_base_refs(
        cls,
        schema: s_schema.Schema,
        base_refs: List[so.Object],
        astnode: qlast.ObjectDDL,
        context: sd.CommandContext,
    ) -> so.ObjectList[so.InheritingObject]:
        classname = cls._classname_from_ast(schema, astnode, context)

        bases = so.ObjectList[so.InheritingObject].create(schema, base_refs)

        for base in bases.objects(schema):
            if base.is_type() and base.contains_any(schema):
                base_type_name = base.get_displayname(schema)
                raise errors.SchemaError(
                    f"{base_type_name!r} cannot be a parent type")

        mcls = cls.get_schema_metaclass()
        if not bases and classname not in mcls.get_root_classes():
            default_base = mcls.get_default_base_name()

            if default_base is not None and classname != default_base:
                default_base = schema.get(default_base)
                bases = so.ObjectList[so.InheritingObject].create(
                    schema,
                    [utils.reduce_to_typeref(schema, default_base)],
                )

        return bases
示例#25
0
文件: schema.py 项目: yew1eb/edgedb
    def _add(
        self,
        id: uuid.UUID,
        scls: so.Object,
        data: Mapping[str, Any],
    ) -> Schema:
        name = data['name']

        if name in self._name_to_id:
            raise errors.SchemaError(
                f'{type(scls).__name__} {name!r} is already present '
                f'in the schema {self!r}')

        data = immu.Map(data)

        name_to_id, shortname_to_id, globalname_to_id = self._update_obj_name(
            id, scls, None, name)

        updates = dict(
            id_to_data=self._id_to_data.set(id, data),
            id_to_type=self._id_to_type.set(id, scls),
            name_to_id=name_to_id,
            shortname_to_id=shortname_to_id,
            globalname_to_id=globalname_to_id,
            refs_to=self._update_refs_to(scls, None, data),
        )

        if (isinstance(scls, so.QualifiedObject)
                and not self.has_module(name.module)):
            raise errors.UnknownModuleError(
                f'module {name.module!r} is not in this schema')

        return self._replace(**updates)  # type: ignore
示例#26
0
def _merge_mro(schema, obj, mros):
    result = []

    while True:
        nonempty = [mro for mro in mros if mro]
        if not nonempty:
            return result

        for mro in nonempty:
            candidate = mro[0]
            tails = [
                m for m in nonempty if id(candidate) in {id(c)
                                                         for c in m[1:]}
            ]
            if not tails:
                break
        else:
            raise errors.SchemaError(
                f"Could not find consistent ancestor order for "
                f"{obj.get_verbosename(schema)}")

        result.append(candidate)

        for mro in nonempty:
            if mro[0] is candidate:
                del mro[0]

    return result
示例#27
0
文件: types.py 项目: willingc/edgedb
    def derive_subtype(
        self: TypeT,
        schema,
        *,
        name: str,
        mark_derived=False,
        attrs=None,
        inheritance_merge=True,
        preserve_path_id=None,
        refdict_whitelist=None,
        **kwargs,
    ) -> typing.Tuple[s_schema.Schema, TypeT]:

        if self.get_name(schema) == name:
            raise errors.SchemaError(
                f'cannot derive {self!r}({name}) from itself')

        derived_attrs: typing.Dict[str, object] = {}

        if attrs is not None:
            derived_attrs.update(attrs)

        derived_attrs['name'] = name
        derived_attrs['bases'] = so.ObjectList.create(schema, [self])

        cmdcls = sd.ObjectCommandMeta.get_command_class_or_die(
            sd.CreateObject, type(self))

        cmd = cmdcls(classname=name)

        for k, v in derived_attrs.items():
            cmd.set_attribute_value(k, v)

        context = sd.CommandContext(
            modaliases={},
            schema=schema,
        )

        delta = sd.DeltaRoot()

        with context(sd.DeltaRootContext(schema=schema, op=delta)):
            if not inheritance_merge:
                context.current().inheritance_merge = False

            if refdict_whitelist is not None:
                context.current().inheritance_refdicts = refdict_whitelist

            if mark_derived:
                context.current().mark_derived = True

            if preserve_path_id:
                context.current().preserve_path_id = True

            delta.add(cmd)
            schema, _ = delta.apply(schema, context)

        derived = schema.get(name)

        return schema, derived
示例#28
0
    async def read_modules(self, schema, only_modules, exclude_modules):
        schemas = await introspection.schemas.fetch(self.connection,
                                                    schema_pattern='edgedb_%')
        schemas = {
            s['name']
            for s in schemas if not s['name'].startswith('edgedb_aux_')
        }

        modules = await datasources.schema.modules.fetch(
            self.connection,
            modules=only_modules,
            exclude_modules=exclude_modules)

        modules = [{
            'id': m['id'],
            'name': m['name'],
            'builtin': m['builtin']
        } for m in modules]

        recorded_schemas = set()
        for module in modules:
            schema, mod = s_mod.Module.create_in_schema(
                schema,
                id=module['id'],
                name=module['name'],
                builtin=module['builtin'])

            recorded_schemas.add(common.get_backend_name(schema, mod))

        # Sanity checks
        extra_schemas = schemas - recorded_schemas - {'edgedb', 'edgedbss'}
        missing_schemas = recorded_schemas - schemas

        if extra_schemas and not only_modules and not exclude_modules:
            msg = 'internal metadata incosistency'
            details = 'Extraneous data schemas exist: {}'.format(', '.join(
                '"%s"' % s for s in extra_schemas))
            raise errors.SchemaError(msg, details=details)

        if missing_schemas:
            msg = 'internal metadata incosistency'
            details = 'Missing schemas for modules: {}'.format(', '.join(
                '{!r}'.format(s) for s in missing_schemas))
            raise errors.SchemaError(msg, details=details)

        return schema
示例#29
0
    def get_topmost_concrete_base(self, schema):
        # Get the topmost non-abstract base.
        for ancestor in reversed(self.compute_mro(schema)):
            if not ancestor.get_is_abstract(schema):
                return ancestor

        raise errors.SchemaError(
            f'{self.get_verbosename(schema)} has no non-abstract ancestors')
示例#30
0
 def get_class(cls, schema_name):
     if schema_name == 'array':
         return Array
     elif schema_name == 'tuple':
         return Tuple
     else:
         raise errors.SchemaError(
             'unknown collection type: {!r}'.format(schema_name))