def _fixed_lookup_fixture(self): return [ (sqltypes.String(), sqltypes.VARCHAR()), (sqltypes.String(1), sqltypes.VARCHAR(1)), (sqltypes.String(3), sqltypes.VARCHAR(3)), (sqltypes.Text(), sqltypes.TEXT()), (sqltypes.Unicode(), sqltypes.VARCHAR()), (sqltypes.Unicode(1), sqltypes.VARCHAR(1)), (sqltypes.UnicodeText(), sqltypes.TEXT()), (sqltypes.CHAR(3), sqltypes.CHAR(3)), (sqltypes.NUMERIC, sqltypes.NUMERIC()), (sqltypes.NUMERIC(10, 2), sqltypes.NUMERIC(10, 2)), (sqltypes.Numeric, sqltypes.NUMERIC()), (sqltypes.Numeric(10, 2), sqltypes.NUMERIC(10, 2)), (sqltypes.DECIMAL, sqltypes.DECIMAL()), (sqltypes.DECIMAL(10, 2), sqltypes.DECIMAL(10, 2)), (sqltypes.INTEGER, sqltypes.INTEGER()), (sqltypes.BIGINT, sqltypes.BIGINT()), (sqltypes.Float, sqltypes.FLOAT()), (sqltypes.TIMESTAMP, sqltypes.TIMESTAMP()), (sqltypes.DATETIME, sqltypes.DATETIME()), (sqltypes.DateTime, sqltypes.DATETIME()), (sqltypes.DateTime(), sqltypes.DATETIME()), (sqltypes.DATE, sqltypes.DATE()), (sqltypes.Date, sqltypes.DATE()), (sqltypes.TIME, sqltypes.TIME()), (sqltypes.Time, sqltypes.TIME()), (sqltypes.BOOLEAN, sqltypes.BOOLEAN()), (sqltypes.Boolean, sqltypes.BOOLEAN()), ]
def downgrade(migrate_engine): if migrate_engine.name != 'mysql': return meta = sqlalchemy.MetaData(bind=migrate_engine) raw_template = sqlalchemy.Table('raw_template', meta, autoload=True) raw_template.c.template.alter(type=sqltypes.TEXT())
def downgrade(migrate_engine): if migrate_engine.name != 'mysql': return meta = sqlalchemy.MetaData(bind=migrate_engine) stack = sqlalchemy.Table('stack', meta, autoload=True) stack.c.parameters.alter(type=sqltypes.TEXT()) resource = sqlalchemy.Table('resource', meta, autoload=True) resource.c.rsrc_metadata.alter(type=sqltypes.TEXT()) watch_rule = sqlalchemy.Table('watch_rule', meta, autoload=True) watch_rule.c.rule.alter(type=sqltypes.TEXT()) watch_data = sqlalchemy.Table('watch_data', meta, autoload=True) watch_data.c.data.alter(type=sqltypes.TEXT())
def _fixed_lookup_fixture(self): return [ (sqltypes.String(), sqltypes.VARCHAR()), (sqltypes.String(1), sqltypes.VARCHAR(1)), (sqltypes.String(3), sqltypes.VARCHAR(3)), (sqltypes.Text(), sqltypes.TEXT()), (sqltypes.Unicode(), sqltypes.VARCHAR()), (sqltypes.Unicode(1), sqltypes.VARCHAR(1)), (sqltypes.UnicodeText(), sqltypes.TEXT()), (sqltypes.CHAR(3), sqltypes.CHAR(3)), (sqltypes.NUMERIC, sqltypes.NUMERIC()), (sqltypes.NUMERIC(10, 2), sqltypes.NUMERIC(10, 2)), (sqltypes.Numeric, sqltypes.NUMERIC()), (sqltypes.Numeric(10, 2), sqltypes.NUMERIC(10, 2)), (sqltypes.DECIMAL, sqltypes.DECIMAL()), (sqltypes.DECIMAL(10, 2), sqltypes.DECIMAL(10, 2)), (sqltypes.INTEGER, sqltypes.INTEGER()), (sqltypes.BIGINT, sqltypes.BIGINT()), (sqltypes.Float, sqltypes.FLOAT()), (sqltypes.TIMESTAMP, sqltypes.TIMESTAMP()), (sqltypes.DATETIME, sqltypes.DATETIME()), (sqltypes.DateTime, sqltypes.DATETIME()), (sqltypes.DateTime(), sqltypes.DATETIME()), (sqltypes.DATE, sqltypes.DATE()), (sqltypes.Date, sqltypes.DATE()), (sqltypes.TIME, sqltypes.TIME()), (sqltypes.Time, sqltypes.TIME()), (sqltypes.BOOLEAN, sqltypes.BOOLEAN()), (sqltypes.Boolean, sqltypes.BOOLEAN()), (sqlite.DATE(storage_format="%(year)04d%(month)02d%(day)02d", ), sqltypes.DATE()), (sqlite.TIME( storage_format="%(hour)02d%(minute)02d%(second)02d", ), sqltypes.TIME()), (sqlite.DATETIME(storage_format="%(year)04d%(month)02d%(day)02d" "%(hour)02d%(minute)02d%(second)02d", ), sqltypes.DATETIME()), ]
def associate_column_names_and_sqlalchemy_types(dataframe): """ This creates a dict of column names and sqlalchemy types for use in Pandas to_sql :returns dict """ dtypedict = {} for i,j in zip(dataframe.columns, dataframe.dtypes): if "geom" in str(i): dtypedict.update({i: gatypes.Geometry(geometry_type="POINT", srid=4326)}) else: dtypedict.update({i: types.TEXT()}) return dtypedict
def str_to_sqltype(expr): import re import sqlalchemy.types as sqltypes norm_expr = expr.lower() if norm_expr.startswith('integer'): match_result = re.match(r'integer\((\d+)\)', norm_expr) if match_result is not None: return sqltypes.BIGINT() if int(match_result.group(1)) > 11 else sqltypes.INTEGER() return sqltypes.BIGINT() if norm_expr == 'decimal': return sqltypes.DECIMAL() if norm_expr == 'date': return sqltypes.DATETIME() if norm_expr == 'bool' or norm_expr == 'boolean': return sqltypes.BOOLEAN() if norm_expr.startswith('string'): match_result = re.match(r'string\((\d+)\)', norm_expr) if match_result is not None: maxlen = int(match_result.group(1)) return sqltypes.VARCHAR(maxlen) if maxlen < 65536 else sqltypes.TEXT return sqltypes.TEXT() raise RuntimeError("Unsupported data type [" + expr + "]")
def _type_affinity_fixture(self): return [ ("LONGTEXT", sqltypes.TEXT()), ("TINYINT", sqltypes.INTEGER()), ("MEDIUMINT", sqltypes.INTEGER()), ("INT2", sqltypes.INTEGER()), ("UNSIGNED BIG INT", sqltypes.INTEGER()), ("INT8", sqltypes.INTEGER()), ("CHARACTER(20)", sqltypes.TEXT()), ("CLOB", sqltypes.TEXT()), ("CLOBBER", sqltypes.TEXT()), ("VARYING CHARACTER(70)", sqltypes.TEXT()), ("NATIVE CHARACTER(70)", sqltypes.TEXT()), ("BLOB", sqltypes.BLOB()), ("BLOBBER", sqltypes.NullType()), ("DOUBLE PRECISION", sqltypes.REAL()), ("FLOATY", sqltypes.REAL()), ("NOTHING WE KNOW", sqltypes.NUMERIC()), ]
# Example on how to use the BDNYC database with Astrodbkit2 from astrodbkit2.astrodb import Database, copy_database_schema from sqlalchemy import types # for BDNYC column overrides # Establish connection to database # Note that special parameters have to be passed to allow the BDNYC schema work properly connection_string = 'sqlite:///../BDNYCdb-1/bdnyc_database.db' db = Database(connection_string, reference_tables=['changelog', 'data_requests', 'publications', 'ignore', 'modes', 'systems', 'telescopes', 'versions', 'instruments'], primary_table='sources', primary_table_key='id', foreign_key='source_id', column_type_overrides={'spectra.spectrum': types.TEXT(), 'spectra.local_spectrum': types.TEXT()}) # Query similarly to SIMPLE results = db.query(db.sources).limit(10).all() for row in results: print(row) # The spectra table contains columns of type SPECTRUM, the column_type_overrides allows us to work with them as text for c in db.spectra.columns: print(c, c.type) db.query(db.spectra).limit(10).all() _ = db.inventory(11, pretty_print=True) # Can output the full contents of BDNYC as json files db.save_db('bdnyc') # Copy to another database
def reflecttable(self, connection, table, include_columns): #TODO: map these better column_func = { 14 : lambda r: sqltypes.String(r['FLEN']), # TEXT 7 : lambda r: sqltypes.Integer(), # SHORT 8 : lambda r: r['FPREC']==0 and sqltypes.Integer() or sqltypes.Numeric(precision=r['FPREC'], length=r['FSCALE'] * -1), #INT or NUMERIC 9 : lambda r: sqltypes.Float(), # QUAD 10 : lambda r: sqltypes.Float(), # FLOAT 27 : lambda r: sqltypes.Float(), # DOUBLE 35 : lambda r: sqltypes.DateTime(), # TIMESTAMP 37 : lambda r: sqltypes.String(r['FLEN']), # VARYING 261: lambda r: sqltypes.TEXT(), # BLOB 40 : lambda r: sqltypes.Char(r['FLEN']), # CSTRING 12 : lambda r: sqltypes.Date(), # DATE 13 : lambda r: sqltypes.Time(), # TIME 16 : lambda r: sqltypes.Numeric(precision=r['FPREC'], length=r['FSCALE'] * -1) #INT64 } tblqry = """ SELECT DISTINCT R.RDB$FIELD_NAME AS FNAME, R.RDB$NULL_FLAG AS NULL_FLAG, R.RDB$FIELD_POSITION, F.RDB$FIELD_TYPE AS FTYPE, F.RDB$FIELD_SUB_TYPE AS STYPE, F.RDB$FIELD_LENGTH AS FLEN, F.RDB$FIELD_PRECISION AS FPREC, F.RDB$FIELD_SCALE AS FSCALE FROM RDB$RELATION_FIELDS R JOIN RDB$FIELDS F ON R.RDB$FIELD_SOURCE=F.RDB$FIELD_NAME WHERE F.RDB$SYSTEM_FLAG=0 and R.RDB$RELATION_NAME=? ORDER BY R.RDB$FIELD_POSITION""" keyqry = """ SELECT SE.RDB$FIELD_NAME SENAME FROM RDB$RELATION_CONSTRAINTS RC JOIN RDB$INDEX_SEGMENTS SE ON RC.RDB$INDEX_NAME=SE.RDB$INDEX_NAME WHERE RC.RDB$CONSTRAINT_TYPE=? AND RC.RDB$RELATION_NAME=?""" fkqry = """ SELECT RC.RDB$CONSTRAINT_NAME CNAME, CSE.RDB$FIELD_NAME FNAME, IX2.RDB$RELATION_NAME RNAME, SE.RDB$FIELD_NAME SENAME FROM RDB$RELATION_CONSTRAINTS RC JOIN RDB$INDICES IX1 ON IX1.RDB$INDEX_NAME=RC.RDB$INDEX_NAME JOIN RDB$INDICES IX2 ON IX2.RDB$INDEX_NAME=IX1.RDB$FOREIGN_KEY JOIN RDB$INDEX_SEGMENTS CSE ON CSE.RDB$INDEX_NAME=IX1.RDB$INDEX_NAME JOIN RDB$INDEX_SEGMENTS SE ON SE.RDB$INDEX_NAME=IX2.RDB$INDEX_NAME AND SE.RDB$FIELD_POSITION=CSE.RDB$FIELD_POSITION WHERE RC.RDB$CONSTRAINT_TYPE=? AND RC.RDB$RELATION_NAME=? ORDER BY SE.RDB$INDEX_NAME, SE.RDB$FIELD_POSITION""" # get primary key fields c = connection.execute(keyqry, ["PRIMARY KEY", self._denormalize_name(table.name)]) pkfields =[self._normalize_name(r['SENAME']) for r in c.fetchall()] # get all of the fields for this table c = connection.execute(tblqry, [self._denormalize_name(table.name)]) found_table = False while True: row = c.fetchone() if row is None: break found_table = True name = self._normalize_name(row['FNAME']) if include_columns and name not in include_columns: continue args = [name] kw = {} # get the data types and lengths coltype = column_func.get(row['FTYPE'], None) if coltype is None: warnings.warn(RuntimeWarning("Did not recognize type '%s' of column '%s'" % (str(row['FTYPE']), name))) coltype = sqltypes.NULLTYPE else: coltype = coltype(row) args.append(coltype) # is it a primary key? kw['primary_key'] = name in pkfields # is it nullable ? kw['nullable'] = not bool(row['NULL_FLAG']) table.append_column(schema.Column(*args, **kw)) if not found_table: raise exceptions.NoSuchTableError(table.name) # get the foreign keys c = connection.execute(fkqry, ["FOREIGN KEY", self._denormalize_name(table.name)]) fks = {} while True: row = c.fetchone() if not row: break cname = self._normalize_name(row['CNAME']) try: fk = fks[cname] except KeyError: fks[cname] = fk = ([], []) rname = self._normalize_name(row['RNAME']) schema.Table(rname, table.metadata, autoload=True, autoload_with=connection) fname = self._normalize_name(row['FNAME']) refspec = rname + '.' + self._normalize_name(row['SENAME']) fk[0].append(fname) fk[1].append(refspec) for name,value in fks.iteritems(): table.append_constraint(schema.ForeignKeyConstraint(value[0], value[1], name=name))
), ( "TIME_TZ", types.TIME(timezone=True), ), ( "TIMESTAMP_NTZ", types.TIMESTAMP(timezone=False), ), ( "TIMESTAMP_TZ", types.TIMESTAMP(timezone=True), ), ( "TEXT", types.TEXT(), ), ( "VARCHAR", types.VARCHAR(), ), ) unquoted_types = ( ( "BINARY", types.LargeBinary(), ), ( "BOOLEAN", types.Boolean(),
# Establish connection to databases # Note that special parameters have to be passed to allow the BDNYC schema work properly connection_string = 'sqlite:///../BDNYCdevdb/bdnycdev.db' bdnyc = Database(connection_string, reference_tables=[ 'changelog', 'data_requests', 'publications', 'ignore', 'modes', 'systems', 'telescopes', 'versions', 'instruments' ], primary_table='sources', primary_table_key='id', foreign_key='source_id', column_type_overrides={ 'spectra.spectrum': types.TEXT(), 'spectra.local_spectrum': types.TEXT() }) # SIMPLE connection_string = 'sqlite:///SIMPLE.db' db = Database(connection_string) # Copy first publications that are not already in SIMPLE temp = db.query(db.Publications.c.name).all() existing_simple = [s[0] for s in temp] temp = bdnyc.query(bdnyc.publications)\ .filter(db.publications.c.shortname.notin_(existing_simple))\ .all() # Reformat data into something easier for SIMPLE to import
def init(): """define table event and mapping""" # Database definition from sqlalchemy import types, orm from sqlalchemy.schema import Column, Table, Sequence, ForeignKey from sqlalchemy.orm import relationship, backref, relation, mapper # Dependencies from User import User from Course import Course from Planning import Planning from ClassRoom import ClassRoom t_event = Table( 'event', db.metadata, Column('id', types.Integer, Sequence('event_seq_id', optional=True), nullable=False, primary_key=True), Column('name', types.VARCHAR(255), nullable=False), Column('description', types.TEXT(2000)), Column('start', types.DateTime(), nullable=False), Column('time', types.Integer, nullable=False), Column('name_teacher', types.VARCHAR(255)), Column('id_teacher', types.Integer, ForeignKey('user.id')), Column('id_classroom', types.Integer, ForeignKey('classroom.id')), Column('modality', types.Enum('none', 'lesson_elearning', 'lesson_classroom', 'lesson_practice', 'evaluation_practice', 'evaluation_exam', 'evaluation_oral'), nullable=False), Column('id_course', types.Integer, ForeignKey('course.id')), Column('mandatory', types.Integer, nullable=False), Column('private', types.Integer, nullable=False), Column('id_planning', types.Integer, ForeignKey('planning.id'), nullable=False), Column('added', types.DateTime(), default=datetime.now, nullable=False), Column('modified', types.DateTime(), default=datetime.now, onupdate=datetime.now, nullable=False), ) mapper(Event, t_event, properties={ 'datetime': t_event.c.start, 'time_length': t_event.c.time, 'teacher_id': t_event.c.id_teacher, 'teacher_name': t_event.c.name_teacher, 'required_event': t_event.c.mandatory, 'private_event': t_event.c.private, 'classroom': relationship(ClassRoom, backref='events'), 'course': relationship(Course, backref='events'), 'planning': relationship(Planning, backref='events'), 'teacher': relationship(User, backref='events'), })
def stdtype_to_sqltype(stdtype): import sqlalchemy.types as sqltypes if isinstance(stdtype, stdtypes.StringType): return sqltypes.VARCHAR(length=stdtype.max_len) if 0 < stdtype.max_len < 65536 else sqltypes.TEXT() if isinstance(stdtype, stdtypes.BoolType): return sqltypes.BOOLEAN() if isinstance(stdtype, stdtypes.DateType): return sqltypes.DATE() if stdtype.only_date else sqltypes.TIMESTAMP() if isinstance(stdtype, stdtypes.IntegerType): return sqltypes.BIGINT() if stdtype.length > 11 else sqltypes.INTEGER() if isinstance(stdtype, stdtypes.DecimalType): return sqltypes.DECIMAL() if isinstance(stdtype, stdtypes.ArrayType): return sqltypes.ARRAY(item_type=stdtype.item_type)
def reflecttable(self, connection, table): #TODO: map these better column_func = { 14: lambda r: sqltypes.String(r['FLEN']), # TEXT 7: lambda r: sqltypes.Integer(), # SHORT 8: lambda r: sqltypes.Integer(), # LONG 9: lambda r: sqltypes.Float(), # QUAD 10: lambda r: sqltypes.Float(), # FLOAT 27: lambda r: sqltypes.Double(), # DOUBLE 35: lambda r: sqltypes.DateTime(), # TIMESTAMP 37: lambda r: sqltypes.String(r['FLEN']), # VARYING 261: lambda r: sqltypes.TEXT(), # BLOB 40: lambda r: sqltypes.Char(r['FLEN']), # CSTRING 12: lambda r: sqltypes.Date(), # DATE 13: lambda r: sqltypes.Time(), # TIME 16: lambda r: sqltypes.Numeric(precision=r['FPREC'], length=r['FSCALE'] * -1) #INT64 } tblqry = """\ SELECT DISTINCT R.RDB$FIELD_NAME AS FNAME, R.RDB$NULL_FLAG AS NULL_FLAG, R.RDB$FIELD_POSITION, F.RDB$FIELD_TYPE AS FTYPE, F.RDB$FIELD_SUB_TYPE AS STYPE, F.RDB$FIELD_LENGTH AS FLEN, F.RDB$FIELD_PRECISION AS FPREC, F.RDB$FIELD_SCALE AS FSCALE FROM RDB$RELATION_FIELDS R JOIN RDB$FIELDS F ON R.RDB$FIELD_SOURCE=F.RDB$FIELD_NAME WHERE F.RDB$SYSTEM_FLAG=0 and R.RDB$RELATION_NAME=? ORDER BY R.RDB$FIELD_POSITION;""" keyqry = """ SELECT RC.RDB$CONSTRAINT_TYPE KEYTYPE, RC.RDB$CONSTRAINT_NAME CNAME, RC.RDB$INDEX_NAME INAME, SE.RDB$FIELD_NAME SENAME, FROM RDB$RELATION_CONSTRAINTS RC LEFT JOIN RDB$INDEX_SEGMENTS SE ON RC.RDB$INDEX_NAME=SE.RDB$INDEX_NAME WHERE RC.RDB$RELATION_NAME=? AND SE.RDB$FIELD_NAME=? """ #import pdb;pdb.set_trace() # get all of the fields for this table c = connection.execute(tblqry, [table.name.upper()]) while True: row = c.fetchone() if not row: break args = [row['FNAME']] kw = {} # get the data types and lengths args.append(column_func[row['FTYPE']](row)) # is it a foreign key (and what is it linked to) # is it a primary key? table.append_item(schema.Column(*args, **kw))
def load_dialect_impl(self, dialect: DefaultDialect) -> Any: return dialect.type_descriptor(types.TEXT())
def reflecttable(self, connection, table): #TODO: map these better column_func = { 14 : lambda r: sqltypes.String(r['FLEN']), # TEXT 7 : lambda r: sqltypes.Integer(), # SHORT 8 : lambda r: sqltypes.Integer(), # LONG 9 : lambda r: sqltypes.Float(), # QUAD 10 : lambda r: sqltypes.Float(), # FLOAT 27 : lambda r: sqltypes.Float(), # DOUBLE 35 : lambda r: sqltypes.DateTime(), # TIMESTAMP 37 : lambda r: sqltypes.String(r['FLEN']), # VARYING 261: lambda r: sqltypes.TEXT(), # BLOB 40 : lambda r: sqltypes.Char(r['FLEN']), # CSTRING 12 : lambda r: sqltypes.Date(), # DATE 13 : lambda r: sqltypes.Time(), # TIME 16 : lambda r: sqltypes.Numeric(precision=r['FPREC'], length=r['FSCALE'] * -1) #INT64 } tblqry = """ SELECT DISTINCT R.RDB$FIELD_NAME AS FNAME, R.RDB$NULL_FLAG AS NULL_FLAG, R.RDB$FIELD_POSITION, F.RDB$FIELD_TYPE AS FTYPE, F.RDB$FIELD_SUB_TYPE AS STYPE, F.RDB$FIELD_LENGTH AS FLEN, F.RDB$FIELD_PRECISION AS FPREC, F.RDB$FIELD_SCALE AS FSCALE FROM RDB$RELATION_FIELDS R JOIN RDB$FIELDS F ON R.RDB$FIELD_SOURCE=F.RDB$FIELD_NAME WHERE F.RDB$SYSTEM_FLAG=0 and R.RDB$RELATION_NAME=? ORDER BY R.RDB$FIELD_POSITION""" keyqry = """ SELECT SE.RDB$FIELD_NAME SENAME FROM RDB$RELATION_CONSTRAINTS RC JOIN RDB$INDEX_SEGMENTS SE ON RC.RDB$INDEX_NAME=SE.RDB$INDEX_NAME WHERE RC.RDB$CONSTRAINT_TYPE=? AND RC.RDB$RELATION_NAME=?""" fkqry = """ SELECT RC.RDB$CONSTRAINT_NAME CNAME, CSE.RDB$FIELD_NAME FNAME, IX2.RDB$RELATION_NAME RNAME, SE.RDB$FIELD_NAME SENAME FROM RDB$RELATION_CONSTRAINTS RC JOIN RDB$INDICES IX1 ON IX1.RDB$INDEX_NAME=RC.RDB$INDEX_NAME JOIN RDB$INDICES IX2 ON IX2.RDB$INDEX_NAME=IX1.RDB$FOREIGN_KEY JOIN RDB$INDEX_SEGMENTS CSE ON CSE.RDB$INDEX_NAME=IX1.RDB$INDEX_NAME JOIN RDB$INDEX_SEGMENTS SE ON SE.RDB$INDEX_NAME=IX2.RDB$INDEX_NAME AND SE.RDB$FIELD_POSITION=CSE.RDB$FIELD_POSITION WHERE RC.RDB$CONSTRAINT_TYPE=? AND RC.RDB$RELATION_NAME=? ORDER BY SE.RDB$INDEX_NAME, SE.RDB$FIELD_POSITION""" # get primary key fields c = connection.execute(keyqry, ["PRIMARY KEY", table.name.upper()]) pkfields =[r['SENAME'] for r in c.fetchall()] # get all of the fields for this table def lower_if_possible(name): # Remove trailing spaces: FB uses a CHAR() type, # that is padded with spaces name = name.rstrip() # If its composed only by upper case chars, use # the lowered version, otherwise keep the original # (even if stripped...) lname = name.lower() if lname.upper() == name and not ' ' in name: return lname return name c = connection.execute(tblqry, [table.name.upper()]) row = c.fetchone() if not row: raise exceptions.NoSuchTableError(table.name) while row: name = row['FNAME'] args = [lower_if_possible(name)] kw = {} # get the data types and lengths args.append(column_func[row['FTYPE']](row)) # is it a primary key? kw['primary_key'] = name in pkfields table.append_column(schema.Column(*args, **kw)) row = c.fetchone() # get the foreign keys c = connection.execute(fkqry, ["FOREIGN KEY", table.name.upper()]) fks = {} while True: row = c.fetchone() if not row: break cname = lower_if_possible(row['CNAME']) try: fk = fks[cname] except KeyError: fks[cname] = fk = ([], []) rname = lower_if_possible(row['RNAME']) schema.Table(rname, table.metadata, autoload=True, autoload_with=connection) fname = lower_if_possible(row['FNAME']) refspec = rname + '.' + lower_if_possible(row['SENAME']) fk[0].append(fname) fk[1].append(refspec) for name,value in fks.iteritems(): table.append_constraint(schema.ForeignKeyConstraint(value[0], value[1], name=name))
def load_dialect_impl(self, dialect: Any) -> Any: if dialect.name == 'postgresql': t = postgresql.JSONB() else: t = types.TEXT() return dialect.type_descriptor(t)