def _compare_tables(conn_table_names, metadata_table_names, object_filters, inspector, metadata, diffs, autogen_context): for s, tname in metadata_table_names.difference(conn_table_names): name = '%s.%s' % (s, tname) if s else tname metadata_table = metadata.tables[sa_schema._get_table_key(tname, s)] if _run_filters(metadata_table, tname, "table", False, None, object_filters): diffs.append(("add_table", metadata.tables[name])) log.info("Detected added table %r", name) _compare_indexes(s, tname, object_filters, None, metadata_table, diffs, autogen_context, inspector, set()) removal_metadata = sa_schema.MetaData() for s, tname in conn_table_names.difference(metadata_table_names): name = sa_schema._get_table_key(tname, s) exists = name in removal_metadata.tables t = sa_schema.Table(tname, removal_metadata, schema=s) if not exists: inspector.reflecttable(t, None) if _run_filters(t, tname, "table", True, None, object_filters): diffs.append(("remove_table", t)) log.info("Detected removed table %r", name) existing_tables = conn_table_names.intersection(metadata_table_names) existing_metadata = sa_schema.MetaData() conn_column_info = {} for s, tname in existing_tables: name = sa_schema._get_table_key(tname, s) exists = name in existing_metadata.tables t = sa_schema.Table(tname, existing_metadata, schema=s) if not exists: inspector.reflecttable(t, None) conn_column_info[(s, tname)] = t for s, tname in sorted(existing_tables): name = '%s.%s' % (s, tname) if s else tname metadata_table = metadata.tables[name] conn_table = existing_metadata.tables[name] if _run_filters(metadata_table, tname, "table", False, conn_table, object_filters): _compare_columns(s, tname, object_filters, conn_table, metadata_table, diffs, autogen_context, inspector) c_uniques = _compare_uniques(s, tname, object_filters, conn_table, metadata_table, diffs, autogen_context, inspector) _compare_indexes(s, tname, object_filters, conn_table, metadata_table, diffs, autogen_context, inspector, c_uniques)
def __table_cls__(cls, *args, **kwargs): """This is called by SQLAlchemy during mapper setup. It determines the final table object that the model will use. If no primary key is found, that indicates single-table inheritance, so no table will be created and ``__tablename__`` will be unset. """ # check if a table with this name already exists # allows reflected tables to be applied to model by name key = _get_table_key(args[0], kwargs.get('schema')) if key in cls.metadata.tables: return sa.Table(*args, **kwargs) # if a primary key or constraint is found, create a table for # joined-table inheritance for arg in args: if ((isinstance(arg, sa.Column) and arg.primary_key) or isinstance(arg, sa.PrimaryKeyConstraint)): return sa.Table(*args, **kwargs) # if no base classes define a table, return one # ensures the correct error shows up when missing a primary key for base in cls.__mro__[1:-1]: if '__table__' in base.__dict__: break else: return sa.Table(*args, **kwargs) # single-table inheritance, use the parent tablename if '__tablename__' in cls.__dict__: del cls.__tablename__
def __table_cls__(cls, *args, **kwargs): """This is called by SQLAlchemy during mapper setup. It determines the final table object that the model will use. If no primary key is found, that indicates single-table inheritance, so no table will be created and ``__tablename__`` will be unset. """ # check if a table with this name already exists # allows reflected tables to be applied to model by name key = _get_table_key(args[0], kwargs.get('schema')) if key in cls.metadata.tables: return sa.Table(*args, **kwargs) # if a primary key or constraint is found, create a table for # joined-table inheritance for arg in args: if ( (isinstance(arg, sa.Column) and arg.primary_key) or isinstance(arg, sa.PrimaryKeyConstraint) ): return sa.Table(*args, **kwargs) # if no base classes define a table, return one # ensures the correct error shows up when missing a primary key for base in cls.__mro__[1:-1]: if '__table__' in base.__dict__: break else: return sa.Table(*args, **kwargs) # single-table inheritance, use the parent tablename if '__tablename__' in cls.__dict__: del cls.__tablename__
def _setup_referent(self, metadata, constraint): spec = constraint.elements[0]._get_colspec() parts = spec.split(".") tname = parts[-2] if len(parts) == 3: referent_schema = parts[0] else: referent_schema = None if tname != '_alembic_batch_temp': key = sql_schema._get_table_key(tname, referent_schema) if key in metadata.tables: t = metadata.tables[key] for elem in constraint.elements: colname = elem._get_colspec().split(".")[-1] if not t.c.contains_column(colname): t.append_column(Column(colname, sqltypes.NULLTYPE)) else: Table(tname, metadata, *[ Column(n, sqltypes.NULLTYPE) for n in [ elem._get_colspec().split(".")[-1] for elem in constraint.elements ] ], schema=referent_schema)
def _setup_referent(self, metadata: "MetaData", constraint: "ForeignKeyConstraint") -> None: spec = constraint.elements[0]._get_colspec( ) # type:ignore[attr-defined] parts = spec.split(".") tname = parts[-2] if len(parts) == 3: referent_schema = parts[0] else: referent_schema = None if tname != self.temp_table_name: key = sql_schema._get_table_key(tname, referent_schema) def colspec(elem: Any): return elem._get_colspec() if key in metadata.tables: t = metadata.tables[key] for elem in constraint.elements: colname = colspec(elem).split(".")[-1] if colname not in t.c: t.append_column(Column(colname, sqltypes.NULLTYPE)) else: Table(tname, metadata, *[ Column(n, sqltypes.NULLTYPE) for n in [ colspec(elem).split(".")[-1] for elem in constraint.elements ] ], schema=referent_schema)
def _setup_referent(self, metadata, constraint): spec = constraint.elements[0]._get_colspec() parts = spec.split(".") tname = parts[-2] if len(parts) == 3: referent_schema = parts[0] else: referent_schema = None if tname != self.temp_table_name: key = sql_schema._get_table_key(tname, referent_schema) if key in metadata.tables: t = metadata.tables[key] for elem in constraint.elements: colname = elem._get_colspec().split(".")[-1] if not t.c.contains_column(colname): t.append_column(Column(colname, sqltypes.NULLTYPE)) else: Table( tname, metadata, *[ Column(n, sqltypes.NULLTYPE) for n in [ elem._get_colspec().split(".")[-1] for elem in constraint.elements ] ], schema=referent_schema )
def reflecttable(self, connection, table, include_columns): denormalize = self.identifier_preparer._denormalize_name normalize = self.identifier_preparer._normalize_name st = ('SELECT COLUMNNAME, MODE, DATATYPE, CODETYPE, LEN, DEC, ' ' NULLABLE, "DEFAULT", DEFAULTFUNCTION ' 'FROM COLUMNS ' 'WHERE TABLENAME=? AND SCHEMANAME=%s ' 'ORDER BY POS') fk = ('SELECT COLUMNNAME, FKEYNAME, ' ' REFSCHEMANAME, REFTABLENAME, REFCOLUMNNAME, RULE, ' ' (CASE WHEN REFSCHEMANAME = CURRENT_SCHEMA ' ' THEN 1 ELSE 0 END) AS in_schema ' 'FROM FOREIGNKEYCOLUMNS ' 'WHERE TABLENAME=? AND SCHEMANAME=%s ' 'ORDER BY FKEYNAME ') params = [denormalize(table.name)] if not table.schema: st = st % 'CURRENT_SCHEMA' fk = fk % 'CURRENT_SCHEMA' else: st = st % '?' fk = fk % '?' params.append(denormalize(table.schema)) rows = connection.execute(st, params).fetchall() if not rows: raise exc.NoSuchTableError(table.fullname) include_columns = set(include_columns or []) for row in rows: (name, mode, col_type, encoding, length, scale, nullable, constant_def, func_def) = row name = normalize(name) if include_columns and name not in include_columns: continue type_args, type_kw = [], {} if col_type == 'FIXED': type_args = length, scale # Convert FIXED(10) DEFAULT SERIAL to our Integer if (scale == 0 and func_def is not None and func_def.startswith('SERIAL')): col_type = 'INTEGER' type_args = length, elif col_type in 'FLOAT': type_args = length, elif col_type in ('CHAR', 'VARCHAR'): type_args = length, type_kw['encoding'] = encoding elif col_type == 'LONG': type_kw['encoding'] = encoding try: type_cls = ischema_names[col_type.lower()] type_instance = type_cls(*type_args, **type_kw) except KeyError: util.warn("Did not recognize type '%s' of column '%s'" % (col_type, name)) type_instance = sqltypes.NullType col_kw = {'autoincrement': False} col_kw['nullable'] = (nullable == 'YES') col_kw['primary_key'] = (mode == 'KEY') if func_def is not None: if func_def.startswith('SERIAL'): if col_kw['primary_key']: # No special default- let the standard autoincrement # support handle SERIAL pk columns. col_kw['autoincrement'] = True else: # strip current numbering col_kw['server_default'] = schema.DefaultClause( sql.text('SERIAL')) col_kw['autoincrement'] = True else: col_kw['server_default'] = schema.DefaultClause( sql.text(func_def)) elif constant_def is not None: col_kw['server_default'] = schema.DefaultClause(sql.text( "'%s'" % constant_def.replace("'", "''"))) table.append_column(schema.Column(name, type_instance, **col_kw)) fk_sets = itertools.groupby(connection.execute(fk, params), lambda row: row.FKEYNAME) for fkeyname, fkey in fk_sets: fkey = list(fkey) if include_columns: key_cols = set([r.COLUMNNAME for r in fkey]) if key_cols != include_columns: continue columns, referants = [], [] quote = self.identifier_preparer._maybe_quote_identifier for row in fkey: columns.append(normalize(row.COLUMNNAME)) if table.schema or not row.in_schema: referants.append('.'.join( [quote(normalize(row[c])) for c in ('REFSCHEMANAME', 'REFTABLENAME', 'REFCOLUMNNAME')])) else: referants.append('.'.join( [quote(normalize(row[c])) for c in ('REFTABLENAME', 'REFCOLUMNNAME')])) constraint_kw = {'name': fkeyname.lower()} if fkey[0].RULE is not None: rule = fkey[0].RULE if rule.startswith('DELETE '): rule = rule[7:] constraint_kw['ondelete'] = rule table_kw = {} if table.schema or not row.in_schema: table_kw['schema'] = normalize(fkey[0].REFSCHEMANAME) ref_key = schema._get_table_key(normalize(fkey[0].REFTABLENAME), table_kw.get('schema')) if ref_key not in table.metadata.tables: schema.Table(normalize(fkey[0].REFTABLENAME), table.metadata, autoload=True, autoload_with=connection, **table_kw) constraint = schema.ForeignKeyConstraint(columns, referants, link_to_name=True, **constraint_kw) table.append_constraint(constraint)
def _compare_tables(conn_table_names, metadata_table_names, object_filters, inspector, metadata, diffs, autogen_context): default_schema = inspector.bind.dialect.default_schema_name # tables coming from the connection will not have "schema" # set if it matches default_schema_name; so we need a list # of table names from local metadata that also have "None" if schema # == default_schema_name. Most setups will be like this anyway but # some are not (see #170) metadata_table_names_no_dflt_schema = OrderedSet([ (schema if schema != default_schema else None, tname) for schema, tname in metadata_table_names ]) # to adjust for the MetaData collection storing the tables either # as "schemaname.tablename" or just "tablename", create a new lookup # which will match the "non-default-schema" keys to the Table object. tname_to_table = dict( ( no_dflt_schema, metadata.tables[sa_schema._get_table_key(tname, schema)] ) for no_dflt_schema, (schema, tname) in zip( metadata_table_names_no_dflt_schema, metadata_table_names) ) metadata_table_names = metadata_table_names_no_dflt_schema for s, tname in metadata_table_names.difference(conn_table_names): name = '%s.%s' % (s, tname) if s else tname metadata_table = tname_to_table[(s, tname)] if _run_filters(metadata_table, tname, "table", False, None, object_filters): diffs.append(("add_table", metadata_table)) log.info("Detected added table %r", name) _compare_indexes_and_uniques(s, tname, object_filters, None, metadata_table, diffs, autogen_context, inspector) removal_metadata = sa_schema.MetaData() for s, tname in conn_table_names.difference(metadata_table_names): name = sa_schema._get_table_key(tname, s) exists = name in removal_metadata.tables t = sa_schema.Table(tname, removal_metadata, schema=s) if not exists: inspector.reflecttable(t, None) if _run_filters(t, tname, "table", True, None, object_filters): diffs.append(("remove_table", t)) log.info("Detected removed table %r", name) existing_tables = conn_table_names.intersection(metadata_table_names) existing_metadata = sa_schema.MetaData() conn_column_info = {} for s, tname in existing_tables: name = sa_schema._get_table_key(tname, s) exists = name in existing_metadata.tables t = sa_schema.Table(tname, existing_metadata, schema=s) if not exists: inspector.reflecttable(t, None) conn_column_info[(s, tname)] = t for s, tname in sorted(existing_tables): name = '%s.%s' % (s, tname) if s else tname metadata_table = tname_to_table[(s, tname)] conn_table = existing_metadata.tables[name] if _run_filters(metadata_table, tname, "table", False, conn_table, object_filters): _compare_columns(s, tname, object_filters, conn_table, metadata_table, diffs, autogen_context, inspector) _compare_indexes_and_uniques(s, tname, object_filters, conn_table, metadata_table, diffs, autogen_context, inspector)
def reflecttable(self, connection, table, include_columns): denormalize = self.identifier_preparer._denormalize_name normalize = self.identifier_preparer._normalize_name st = ('SELECT COLUMNNAME, MODE, DATATYPE, CODETYPE, LEN, DEC, ' ' NULLABLE, "DEFAULT", DEFAULTFUNCTION ' 'FROM COLUMNS ' 'WHERE TABLENAME=? AND SCHEMANAME=%s ' 'ORDER BY POS') fk = ('SELECT COLUMNNAME, FKEYNAME, ' ' REFSCHEMANAME, REFTABLENAME, REFCOLUMNNAME, RULE, ' ' (CASE WHEN REFSCHEMANAME = CURRENT_SCHEMA ' ' THEN 1 ELSE 0 END) AS in_schema ' 'FROM FOREIGNKEYCOLUMNS ' 'WHERE TABLENAME=? AND SCHEMANAME=%s ' 'ORDER BY FKEYNAME ') params = [denormalize(table.name)] if not table.schema: st = st % 'CURRENT_SCHEMA' fk = fk % 'CURRENT_SCHEMA' else: st = st % '?' fk = fk % '?' params.append(denormalize(table.schema)) rows = connection.execute(st, params).fetchall() if not rows: raise exc.NoSuchTableError(table.fullname) include_columns = set(include_columns or []) for row in rows: (name, mode, col_type, encoding, length, scale, nullable, constant_def, func_def) = row name = normalize(name) if include_columns and name not in include_columns: continue type_args, type_kw = [], {} if col_type == 'FIXED': type_args = length, scale # Convert FIXED(10) DEFAULT SERIAL to our Integer if (scale == 0 and func_def is not None and func_def.startswith('SERIAL')): col_type = 'INTEGER' type_args = length, elif col_type in 'FLOAT': type_args = length, elif col_type in ('CHAR', 'VARCHAR'): type_args = length, type_kw['encoding'] = encoding elif col_type == 'LONG': type_kw['encoding'] = encoding try: type_cls = ischema_names[col_type.lower()] type_instance = type_cls(*type_args, **type_kw) except KeyError: util.warn("Did not recognize type '%s' of column '%s'" % (col_type, name)) type_instance = sqltypes.NullType col_kw = {'autoincrement': False} col_kw['nullable'] = (nullable == 'YES') col_kw['primary_key'] = (mode == 'KEY') if func_def is not None: if func_def.startswith('SERIAL'): if col_kw['primary_key']: # No special default- let the standard autoincrement # support handle SERIAL pk columns. col_kw['autoincrement'] = True else: # strip current numbering col_kw['server_default'] = schema.DefaultClause( sql.text('SERIAL')) col_kw['autoincrement'] = True else: col_kw['server_default'] = schema.DefaultClause( sql.text(func_def)) elif constant_def is not None: col_kw['server_default'] = schema.DefaultClause( sql.text("'%s'" % constant_def.replace("'", "''"))) table.append_column(schema.Column(name, type_instance, **col_kw)) fk_sets = itertools.groupby(connection.execute(fk, params), lambda row: row.FKEYNAME) for fkeyname, fkey in fk_sets: fkey = list(fkey) if include_columns: key_cols = set([r.COLUMNNAME for r in fkey]) if key_cols != include_columns: continue columns, referants = [], [] quote = self.identifier_preparer._maybe_quote_identifier for row in fkey: columns.append(normalize(row.COLUMNNAME)) if table.schema or not row.in_schema: referants.append('.'.join([ quote(normalize(row[c])) for c in ('REFSCHEMANAME', 'REFTABLENAME', 'REFCOLUMNNAME') ])) else: referants.append('.'.join([ quote(normalize(row[c])) for c in ('REFTABLENAME', 'REFCOLUMNNAME') ])) constraint_kw = {'name': fkeyname.lower()} if fkey[0].RULE is not None: rule = fkey[0].RULE if rule.startswith('DELETE '): rule = rule[7:] constraint_kw['ondelete'] = rule table_kw = {} if table.schema or not row.in_schema: table_kw['schema'] = normalize(fkey[0].REFSCHEMANAME) ref_key = schema._get_table_key(normalize(fkey[0].REFTABLENAME), table_kw.get('schema')) if ref_key not in table.metadata.tables: schema.Table(normalize(fkey[0].REFTABLENAME), table.metadata, autoload=True, autoload_with=connection, **table_kw) constraint = schema.ForeignKeyConstraint(columns, referants, link_to_name=True, **constraint_kw) table.append_constraint(constraint)
def _compare_tables( conn_table_names, metadata_table_names, inspector, upgrade_ops, autogen_context, ): default_schema = inspector.bind.dialect.default_schema_name # tables coming from the connection will not have "schema" # set if it matches default_schema_name; so we need a list # of table names from local metadata that also have "None" if schema # == default_schema_name. Most setups will be like this anyway but # some are not (see #170) metadata_table_names_no_dflt_schema = OrderedSet([ (schema if schema != default_schema else None, tname) for schema, tname in metadata_table_names ]) # to adjust for the MetaData collection storing the tables either # as "schemaname.tablename" or just "tablename", create a new lookup # which will match the "non-default-schema" keys to the Table object. tname_to_table = dict(( no_dflt_schema, autogen_context.table_key_to_table[sa_schema._get_table_key( tname, schema)], ) for no_dflt_schema, (schema, tname) in zip( metadata_table_names_no_dflt_schema, metadata_table_names)) metadata_table_names = metadata_table_names_no_dflt_schema for s, tname in metadata_table_names.difference(conn_table_names): name = "%s.%s" % (s, tname) if s else tname metadata_table = tname_to_table[(s, tname)] if autogen_context.run_filters(metadata_table, tname, "table", False, None): upgrade_ops.ops.append( ops.CreateTableOp.from_table(metadata_table)) log.info("Detected added table %r", name) modify_table_ops = ops.ModifyTableOps(tname, [], schema=s) comparators.dispatch("table")( autogen_context, modify_table_ops, s, tname, None, metadata_table, ) if not modify_table_ops.is_empty(): upgrade_ops.ops.append(modify_table_ops) removal_metadata = sa_schema.MetaData() for s, tname in conn_table_names.difference(metadata_table_names): name = sa_schema._get_table_key(tname, s) exists = name in removal_metadata.tables t = sa_schema.Table(tname, removal_metadata, schema=s) if not exists: event.listen( t, "column_reflect", # fmt: off autogen_context.migration_context.impl. _compat_autogen_column_reflect(inspector), # fmt: on ) inspector.reflecttable(t, None) if autogen_context.run_filters(t, tname, "table", True, None): modify_table_ops = ops.ModifyTableOps(tname, [], schema=s) comparators.dispatch("table")(autogen_context, modify_table_ops, s, tname, t, None) if not modify_table_ops.is_empty(): upgrade_ops.ops.append(modify_table_ops) upgrade_ops.ops.append(ops.DropTableOp.from_table(t)) log.info("Detected removed table %r", name) existing_tables = conn_table_names.intersection(metadata_table_names) existing_metadata = sa_schema.MetaData() conn_column_info = {} for s, tname in existing_tables: name = sa_schema._get_table_key(tname, s) exists = name in existing_metadata.tables t = sa_schema.Table(tname, existing_metadata, schema=s) if not exists: event.listen( t, "column_reflect", # fmt: off autogen_context.migration_context.impl. _compat_autogen_column_reflect(inspector), # fmt: on ) inspector.reflecttable(t, None) conn_column_info[(s, tname)] = t for s, tname in sorted(existing_tables, key=lambda x: (x[0] or "", x[1])): s = s or None name = "%s.%s" % (s, tname) if s else tname metadata_table = tname_to_table[(s, tname)] conn_table = existing_metadata.tables[name] if autogen_context.run_filters(metadata_table, tname, "table", False, conn_table): modify_table_ops = ops.ModifyTableOps(tname, [], schema=s) with _compare_columns( s, tname, conn_table, metadata_table, modify_table_ops, autogen_context, inspector, ): comparators.dispatch("table")( autogen_context, modify_table_ops, s, tname, conn_table, metadata_table, ) if not modify_table_ops.is_empty(): upgrade_ops.ops.append(modify_table_ops)
def _compare_tables( conn_table_names, metadata_table_names, inspector, upgrade_ops, autogen_context, ): default_schema = inspector.bind.dialect.default_schema_name # tables coming from the connection will not have "schema" # set if it matches default_schema_name; so we need a list # of table names from local metadata that also have "None" if schema # == default_schema_name. Most setups will be like this anyway but # some are not (see #170) metadata_table_names_no_dflt_schema = OrderedSet( [ (schema if schema != default_schema else None, tname) for schema, tname in metadata_table_names ] ) # to adjust for the MetaData collection storing the tables either # as "schemaname.tablename" or just "tablename", create a new lookup # which will match the "non-default-schema" keys to the Table object. tname_to_table = dict( ( no_dflt_schema, autogen_context.table_key_to_table[ sa_schema._get_table_key(tname, schema) ], ) for no_dflt_schema, (schema, tname) in zip( metadata_table_names_no_dflt_schema, metadata_table_names ) ) metadata_table_names = metadata_table_names_no_dflt_schema for s, tname in metadata_table_names.difference(conn_table_names): name = "%s.%s" % (s, tname) if s else tname metadata_table = tname_to_table[(s, tname)] if autogen_context.run_filters( metadata_table, tname, "table", False, None ): upgrade_ops.ops.append( ops.CreateTableOp.from_table(metadata_table) ) log.info("Detected added table %r", name) modify_table_ops = ops.ModifyTableOps(tname, [], schema=s) comparators.dispatch("table")( autogen_context, modify_table_ops, s, tname, None, metadata_table, ) if not modify_table_ops.is_empty(): upgrade_ops.ops.append(modify_table_ops) removal_metadata = sa_schema.MetaData() for s, tname in conn_table_names.difference(metadata_table_names): name = sa_schema._get_table_key(tname, s) exists = name in removal_metadata.tables t = sa_schema.Table(tname, removal_metadata, schema=s) if not exists: event.listen( t, "column_reflect", # fmt: off autogen_context.migration_context.impl. _compat_autogen_column_reflect (inspector), # fmt: on ) inspector.reflecttable(t, None) if autogen_context.run_filters(t, tname, "table", True, None): modify_table_ops = ops.ModifyTableOps(tname, [], schema=s) comparators.dispatch("table")( autogen_context, modify_table_ops, s, tname, t, None ) if not modify_table_ops.is_empty(): upgrade_ops.ops.append(modify_table_ops) upgrade_ops.ops.append(ops.DropTableOp.from_table(t)) log.info("Detected removed table %r", name) existing_tables = conn_table_names.intersection(metadata_table_names) existing_metadata = sa_schema.MetaData() conn_column_info = {} for s, tname in existing_tables: name = sa_schema._get_table_key(tname, s) exists = name in existing_metadata.tables t = sa_schema.Table(tname, existing_metadata, schema=s) if not exists: event.listen( t, "column_reflect", # fmt: off autogen_context.migration_context.impl. _compat_autogen_column_reflect(inspector), # fmt: on ) inspector.reflecttable(t, None) conn_column_info[(s, tname)] = t for s, tname in sorted(existing_tables, key=lambda x: (x[0] or "", x[1])): s = s or None name = "%s.%s" % (s, tname) if s else tname metadata_table = tname_to_table[(s, tname)] conn_table = existing_metadata.tables[name] if autogen_context.run_filters( metadata_table, tname, "table", False, conn_table ): modify_table_ops = ops.ModifyTableOps(tname, [], schema=s) with _compare_columns( s, tname, conn_table, metadata_table, modify_table_ops, autogen_context, inspector, ): comparators.dispatch("table")( autogen_context, modify_table_ops, s, tname, conn_table, metadata_table, ) if not modify_table_ops.is_empty(): upgrade_ops.ops.append(modify_table_ops)
def _compare_tables(conn_table_names, metadata_table_names, object_filters, inspector, metadata, diffs, autogen_context): default_schema = inspector.bind.dialect.default_schema_name # tables coming from the connection will not have "schema" # set if it matches default_schema_name; so we need a list # of table names from local metadata that also have "None" if schema # == default_schema_name. Most setups will be like this anyway but # some are not (see #170) metadata_table_names_no_dflt_schema = OrderedSet([ (schema if schema != default_schema else None, tname) for schema, tname in metadata_table_names ]) # to adjust for the MetaData collection storing the tables either # as "schemaname.tablename" or just "tablename", create a new lookup # which will match the "non-default-schema" keys to the Table object. tname_to_table = dict( ( no_dflt_schema, metadata.tables[sa_schema._get_table_key(tname, schema)] ) for no_dflt_schema, (schema, tname) in zip( metadata_table_names_no_dflt_schema, metadata_table_names) ) metadata_table_names = metadata_table_names_no_dflt_schema for s, tname in metadata_table_names.difference(conn_table_names): name = '%s.%s' % (s, tname) if s else tname metadata_table = tname_to_table[(s, tname)] if _run_filters( metadata_table, tname, "table", False, None, object_filters): diffs.append(("add_table", metadata_table)) log.info("Detected added table %r", name) _compare_indexes_and_uniques(s, tname, object_filters, None, metadata_table, diffs, autogen_context, inspector) removal_metadata = sa_schema.MetaData() for s, tname in conn_table_names.difference(metadata_table_names): name = sa_schema._get_table_key(tname, s) exists = name in removal_metadata.tables t = sa_schema.Table(tname, removal_metadata, schema=s) if not exists: event.listen( t, "column_reflect", autogen_context['context'].impl. _compat_autogen_column_reflect(inspector)) inspector.reflecttable(t, None) if _run_filters(t, tname, "table", True, None, object_filters): diffs.append(("remove_table", t)) log.info("Detected removed table %r", name) existing_tables = conn_table_names.intersection(metadata_table_names) existing_metadata = sa_schema.MetaData() conn_column_info = {} for s, tname in existing_tables: name = sa_schema._get_table_key(tname, s) exists = name in existing_metadata.tables t = sa_schema.Table(tname, existing_metadata, schema=s) if not exists: event.listen( t, "column_reflect", autogen_context['context'].impl. _compat_autogen_column_reflect(inspector)) inspector.reflecttable(t, None) conn_column_info[(s, tname)] = t for s, tname in sorted(existing_tables, key=lambda x: (x[0] or '', x[1])): s = s or None name = '%s.%s' % (s, tname) if s else tname metadata_table = tname_to_table[(s, tname)] conn_table = existing_metadata.tables[name] if _run_filters( metadata_table, tname, "table", False, conn_table, object_filters): with _compare_columns( s, tname, object_filters, conn_table, metadata_table, diffs, autogen_context, inspector): _compare_indexes_and_uniques(s, tname, object_filters, conn_table, metadata_table, diffs, autogen_context, inspector)
def reflecttable(self, connection, table, include_columns): denormalize = self.identifier_preparer._denormalize_name normalize = self.identifier_preparer._normalize_name st = ( "SELECT COLUMNNAME, MODE, DATATYPE, CODETYPE, LEN, DEC, " ' NULLABLE, "DEFAULT", DEFAULTFUNCTION ' "FROM COLUMNS " "WHERE TABLENAME=? AND SCHEMANAME=%s " "ORDER BY POS" ) fk = ( "SELECT COLUMNNAME, FKEYNAME, " " REFSCHEMANAME, REFTABLENAME, REFCOLUMNNAME, RULE, " " (CASE WHEN REFSCHEMANAME = CURRENT_SCHEMA " " THEN 1 ELSE 0 END) AS in_schema " "FROM FOREIGNKEYCOLUMNS " "WHERE TABLENAME=? AND SCHEMANAME=%s " "ORDER BY FKEYNAME " ) params = [denormalize(table.name)] if not table.schema: st = st % "CURRENT_SCHEMA" fk = fk % "CURRENT_SCHEMA" else: st = st % "?" fk = fk % "?" params.append(denormalize(table.schema)) rows = connection.execute(st, params).fetchall() if not rows: raise exc.NoSuchTableError(table.fullname) include_columns = set(include_columns or []) for row in rows: (name, mode, col_type, encoding, length, scale, nullable, constant_def, func_def) = row name = normalize(name) if include_columns and name not in include_columns: continue type_args, type_kw = [], {} if col_type == "FIXED": type_args = length, scale # Convert FIXED(10) DEFAULT SERIAL to our Integer if scale == 0 and func_def is not None and func_def.startswith("SERIAL"): col_type = "INTEGER" type_args = (length,) elif col_type in "FLOAT": type_args = (length,) elif col_type in ("CHAR", "VARCHAR"): type_args = (length,) type_kw["encoding"] = encoding elif col_type == "LONG": type_kw["encoding"] = encoding try: type_cls = ischema_names[col_type.lower()] type_instance = type_cls(*type_args, **type_kw) except KeyError: util.warn("Did not recognize type '%s' of column '%s'" % (col_type, name)) type_instance = sqltypes.NullType col_kw = {"autoincrement": False} col_kw["nullable"] = nullable == "YES" col_kw["primary_key"] = mode == "KEY" if func_def is not None: if func_def.startswith("SERIAL"): if col_kw["primary_key"]: # No special default- let the standard autoincrement # support handle SERIAL pk columns. col_kw["autoincrement"] = True else: # strip current numbering col_kw["server_default"] = schema.DefaultClause(sql.text("SERIAL")) col_kw["autoincrement"] = True else: col_kw["server_default"] = schema.DefaultClause(sql.text(func_def)) elif constant_def is not None: col_kw["server_default"] = schema.DefaultClause(sql.text("'%s'" % constant_def.replace("'", "''"))) table.append_column(schema.Column(name, type_instance, **col_kw)) fk_sets = itertools.groupby(connection.execute(fk, params), lambda row: row.FKEYNAME) for fkeyname, fkey in fk_sets: fkey = list(fkey) if include_columns: key_cols = set([r.COLUMNNAME for r in fkey]) if key_cols != include_columns: continue columns, referants = [], [] quote = self.identifier_preparer._maybe_quote_identifier for row in fkey: columns.append(normalize(row.COLUMNNAME)) if table.schema or not row.in_schema: referants.append( ".".join([quote(normalize(row[c])) for c in ("REFSCHEMANAME", "REFTABLENAME", "REFCOLUMNNAME")]) ) else: referants.append(".".join([quote(normalize(row[c])) for c in ("REFTABLENAME", "REFCOLUMNNAME")])) constraint_kw = {"name": fkeyname.lower()} if fkey[0].RULE is not None: rule = fkey[0].RULE if rule.startswith("DELETE "): rule = rule[7:] constraint_kw["ondelete"] = rule table_kw = {} if table.schema or not row.in_schema: table_kw["schema"] = normalize(fkey[0].REFSCHEMANAME) ref_key = schema._get_table_key(normalize(fkey[0].REFTABLENAME), table_kw.get("schema")) if ref_key not in table.metadata.tables: schema.Table( normalize(fkey[0].REFTABLENAME), table.metadata, autoload=True, autoload_with=connection, **table_kw ) constraint = schema.ForeignKeyConstraint(columns, referants, link_to_name=True, **constraint_kw) table.append_constraint(constraint)