コード例 #1
0
ファイル: intromech.py プロジェクト: mcaramma/edgedb
    async def read_modules(self, schema):
        schemas = await introspection.schemas.fetch(self.connection,
                                                    schema_pattern='edgedb_%')
        schemas = {
            s['name']
            for s in schemas if not s['name'].startswith('edgedb_aux_')
        }

        modules = await datasources.schema.modules.fetch(self.connection)
        modules = {
            common.edgedb_module_name_to_schema_name(m['name']): {
                'name': m['name'],
                'imports': m['imports']
            }
            for m in modules
        }

        recorded_schemas = set(modules.keys())

        # Sanity checks
        extra_schemas = schemas - recorded_schemas - {'edgedb', 'edgedbss'}
        missing_schemas = recorded_schemas - schemas

        if extra_schemas:
            msg = 'internal metadata incosistency'
            details = 'Extraneous data schemas exist: {}'.format(', '.join(
                '"%s"' % s for s in extra_schemas))
            raise s_err.SchemaError(msg, details=details)

        if missing_schemas:
            msg = 'internal metadata incosistency'
            details = 'Missing schemas for modules: {}'.format(', '.join(
                '{!r}'.format(s) for s in missing_schemas))
            raise s_err.SchemaError(msg, details=details)

        mods = []

        for module in modules.values():
            mod = s_mod.Module(name=module['name'])
            schema.add_module(mod)
            mods.append(mod)

        for mod in mods:
            for imp_name in mod.imports:
                if not schema.has_module(imp_name):
                    # Must be a foreign module, import it directly
                    try:
                        impmod = importlib.import_module(imp_name)
                    except ImportError:
                        # Module has moved, create a dummy
                        impmod = so.DummyModule(imp_name)

                    schema.add_module(impmod)
コード例 #2
0
    async def read_indexes(self, schema):
        pg_index_data = await introspection.tables.fetch_indexes(
            self.connection,
            schema_pattern='edgedb%', index_pattern='%_reg_idx')

        pg_indexes = set()
        for row in pg_index_data:
            table_name = tuple(row['table_name'])
            for pg_index in self.interpret_indexes(table_name, row['indexes']):
                pg_indexes.add(
                    (table_name, pg_index.get_metadata('schemaname'))
                )

        ds = datasources.schema.indexes

        for index_data in await ds.fetch(self.connection):
            subj = schema.get(index_data['subject_name'])
            subj_table_name = common.get_table_name(subj, catenate=False)
            index_name = sn.Name(index_data['name'])

            try:
                pg_indexes.remove((subj_table_name, index_name))
            except KeyError:
                raise s_err.SchemaError(
                    'internal metadata inconsistency',
                    details=f'Index {index_name} is defined in schema, but'
                            f'the corresponding PostgreSQL index is missing.'
                ) from None

            index = s_indexes.SourceIndex(
                name=index_name,
                subject=subj,
                expr=index_data['expr'])

            subj.add_index(index)
            schema.add(index)

        if pg_indexes:
            details = f'Extraneous PostgreSQL indexes found: {pg_indexes!r}'
            raise s_err.SchemaError(
                'internal metadata inconsistency',
                details=details)
コード例 #3
0
ファイル: intromech.py プロジェクト: mcaramma/edgedb
    def get_type_id(self, objtype):
        objtype_id = None

        type_cache = self.type_cache
        if type_cache:
            objtype_id = type_cache.get(objtype.name)

        if objtype_id is None:
            msg = 'could not determine backend id for type in this context'
            details = 'ObjectType: {}'.format(objtype.name)
            raise s_err.SchemaError(msg, details=details)

        return objtype_id
コード例 #4
0
ファイル: intromech.py プロジェクト: mcaramma/edgedb
    async def read_pointer_target_column(self, schema, pointer,
                                         constraints_cache):
        ptr_stor_info = types.get_pointer_storage_info(pointer,
                                                       schema=schema,
                                                       resolve_type=False)
        cols = await self._type_mech.get_table_columns(
            ptr_stor_info.table_name, connection=self.connection)

        col = cols.get(ptr_stor_info.column_name)

        if not col:
            msg = 'internal metadata inconsistency'
            details = ('Record for {!r} hosted by {!r} exists, but ' +
                       'the corresponding table column is missing').format(
                           pointer.shortname, pointer.source.name)
            raise s_err.SchemaError(msg, details=details)

        return self._get_pointer_column_target(schema, pointer.source,
                                               pointer.shortname, col)
コード例 #5
0
ファイル: base.py プロジェクト: versada/edgedb
def classname_from_type(typ):
    """Return canonical Object name for a given type.

    Arguments:

    - type             -- Type to normalize

    Result:

    Canonical Object name.
    """
    is_composite = isinstance(typ, tuple)

    if is_composite:
        container_type = typ[0]
        item_type = typ[1]
    else:
        item_type = typ

    classname = None

    if isinstance(item_type, s_types.Type):
        classname = item_type.name

    elif isinstance(item_type, s_pointers.Pointer):
        classname = item_type.name

    elif isinstance(item_type, s_obj.ObjectMeta):
        classname = item_type

    else:
        classname = BaseTypeMeta.type_to_edgedb_builtin(item_type)

    if classname is None:
        raise s_err.SchemaError('could not find matching schema item for %r' %
                                typ)

    if is_composite:
        result = (container_type, classname)
    else:
        result = classname

    return result
コード例 #6
0
ファイル: base.py プロジェクト: versada/edgedb
def normalize_type(type, schema):
    """Normalize provided type description into a canonical Object form.

    Arguments:

    - type             -- Type to normalize
    - schema     -- Schema to use for Object lookups

    Result:

    Normalized type.
    """

    classname = classname_from_type(type)
    if classname is None:
        raise s_err.SchemaError('could not find matching schema item for %r' %
                                type)

    is_composite = isinstance(classname, tuple)

    if is_composite:
        container_type = classname[0]
        item_class_name = classname[1]
    else:
        item_class_name = classname

    if isinstance(item_class_name, s_obj.ObjectMeta):
        item_class = item_class_name
    else:
        item_class = schema.get(item_class_name)

    if is_composite:
        result = (container_type, item_class)
    else:
        result = item_class

    return result
コード例 #7
0
ファイル: intromech.py プロジェクト: mcaramma/edgedb
    async def read_objtypes(self, schema):
        tables = await introspection.tables.fetch_tables(
            self.connection, schema_pattern='edgedb%', table_pattern='%_data')
        tables = {(t['schema'], t['name']): t for t in tables}

        objtype_list = await datasources.schema.objtypes.fetch(self.connection)
        objtype_list = collections.OrderedDict(
            (sn.Name(row['name']), row) for row in objtype_list)

        visited_tables = set()

        self.table_cache.update({
            common.objtype_name_to_table_name(n, catenate=False): c
            for n, c in objtype_list.items()
        })

        basemap = {}

        for name, row in objtype_list.items():
            objtype = {
                'name':
                name,
                'title':
                self.json_to_word_combination(row['title']),
                'description':
                row['description'],
                'is_abstract':
                row['is_abstract'],
                'is_final':
                row['is_final'],
                'view_type': (s_types.ViewType(row['view_type'])
                              if row['view_type'] else None),
                'expr':
                (s_expr.ExpressionText(row['expr']) if row['expr'] else None)
            }

            table_name = common.objtype_name_to_table_name(name,
                                                           catenate=False)
            table = tables.get(table_name)

            if not table:
                msg = 'internal metadata incosistency'
                details = 'Record for type {!r} exists but ' \
                          'the table is missing'.format(name)
                raise s_err.SchemaError(msg, details=details)

            visited_tables.add(table_name)

            bases = await self.pg_table_inheritance_to_bases(
                table['name'], table['schema'], self.table_cache)

            basemap[name] = bases

            objtype = s_objtypes.ObjectType(name=name,
                                            title=objtype['title'],
                                            description=objtype['description'],
                                            is_abstract=objtype['is_abstract'],
                                            is_final=objtype['is_final'],
                                            view_type=objtype['view_type'],
                                            expr=objtype['expr'])

            schema.add(objtype)

        for objtype in schema.get_objects(type='ObjectType'):
            try:
                bases = basemap[objtype.name]
            except KeyError:
                pass
            else:
                objtype.bases = [schema.get(b) for b in bases]

        derived = await datasources.schema.objtypes.fetch_derived(
            self.connection)

        for row in derived:
            attrs = dict(row)
            attrs['name'] = sn.SchemaName(attrs['name'])
            attrs['bases'] = [schema.get(b) for b in attrs['bases']]
            attrs['view_type'] = (s_types.ViewType(attrs['view_type'])
                                  if attrs['view_type'] else None)
            attrs['is_derived'] = True
            objtype = s_objtypes.ObjectType(**attrs)
            schema.add(objtype)

        tabdiff = set(tables.keys()) - visited_tables
        if tabdiff:
            msg = 'internal metadata incosistency'
            details = 'Extraneous data tables exist: {}'.format(', '.join(
                '"%s.%s"' % t for t in tabdiff))
            raise s_err.SchemaError(msg, details=details)
コード例 #8
0
ファイル: intromech.py プロジェクト: mcaramma/edgedb
    async def read_scalars(self, schema):
        seqs = await introspection.sequences.fetch(
            self.connection,
            schema_pattern='edgedb%',
            sequence_pattern='%_sequence')
        seqs = {(s['schema'], s['name']): s for s in seqs}

        seen_seqs = set()

        scalar_list = await datasources.schema.scalars.fetch(self.connection)

        basemap = {}

        for row in scalar_list:
            name = sn.Name(row['name'])

            scalar_data = {
                'name':
                name,
                'title':
                self.json_to_word_combination(row['title']),
                'description':
                row['description'],
                'is_abstract':
                row['is_abstract'],
                'is_final':
                row['is_final'],
                'view_type': (s_types.ViewType(row['view_type'])
                              if row['view_type'] else None),
                'bases':
                row['bases'],
                'default':
                row['default'],
                'expr':
                (s_expr.ExpressionText(row['expr']) if row['expr'] else None)
            }

            self.scalar_cache[name] = scalar_data
            scalar_data['default'] = self.unpack_default(row['default'])

            if scalar_data['bases']:
                basemap[name] = scalar_data['bases']

            scalar = s_scalars.ScalarType(
                name=name,
                default=scalar_data['default'],
                title=scalar_data['title'],
                description=scalar_data['description'],
                is_abstract=scalar_data['is_abstract'],
                is_final=scalar_data['is_final'],
                view_type=scalar_data['view_type'],
                expr=scalar_data['expr'])

            schema.add(scalar)

        for scalar in schema.get_objects(type='ScalarType'):
            try:
                basename = basemap[scalar.name]
            except KeyError:
                pass
            else:
                scalar.bases = [schema.get(sn.Name(basename[0]))]

        sequence = schema.get('std::sequence', None)
        for scalar in schema.get_objects(type='ScalarType'):
            if (sequence is not None and scalar.issubclass(sequence)
                    and not scalar.is_abstract):
                seq_name = common.scalar_name_to_sequence_name(scalar.name,
                                                               catenate=False)
                if seq_name not in seqs:
                    msg = 'internal metadata incosistency'
                    details = (f'Missing sequence for sequence '
                               f'scalar {scalar.name}')
                    raise s_err.SchemaError(msg, details=details)
                seen_seqs.add(seq_name)

        extra_seqs = set(seqs) - seen_seqs
        if extra_seqs:
            msg = 'internal metadata incosistency'
            details = 'Extraneous sequences exist: {}'.format(', '.join(
                common.qname(*t) for t in extra_seqs))
            raise s_err.SchemaError(msg, details=details)