def _get_table_columns(self, connection, table_name, schema, extended=False): full_table = table_name if schema: full_table = schema + '.' + table_name # TODO using TGetColumnsReq hangs after sending TFetchResultsReq. # Using DESCRIBE works but is uglier. try: extended = " FORMATTED" if extended else "" rows = connection.execute('DESCRIBE{} {}'.format( extended, self.identifier_preparer.quote_identifier( full_table))).fetchall() except exc.OperationalError as e: # Does the table exist? regex_fmt = r'TExecuteStatementResp.*SemanticException.*Table not found {}' regex = regex_fmt.format(re.escape(full_table)) if re.search(regex, e.args[0]): raise exc.NoSuchTableError(full_table) else: raise else: # Hive is stupid: this is what I get from DESCRIBE some_schema.does_not_exist regex = r'Table .* does not exist' if len(rows) == 1 and re.match(regex, rows[0].col_name): raise exc.NoSuchTableError(full_table) return rows
def _get_table_columns(self, connection, table_name, schema): full_table = table_name # Only qualify the table if the schema exists and is not default. if schema and schema.lower() != "default": full_table = schema + '.' + table_name # TODO using TGetColumnsReq hangs after sending TFetchResultsReq. # Using DESCRIBE works but is uglier. try: # This needs the table name to be unescaped (no backticks). rows = connection.execute( 'DESCRIBE {}'.format(full_table)).fetchall() except exc.OperationalError as e: # Does the table exist? regex_fmt = r'TExecuteStatementResp.*NoSuchTableException.*Table or view \'{}\'' \ r' not found' regex = regex_fmt.format(re.escape(table_name)) if re.search(regex, e.args[0]): raise exc.NoSuchTableError(full_table) elif schema: schema_regex_fmt = r'TExecuteStatementResp.*NoSuchDatabaseException.*Database ' \ r'\'{}\' not found' schema_regex = schema_regex_fmt.format(re.escape(schema)) if re.search(schema_regex, e.args[0]): raise exc.NoSuchTableError(full_table) else: # When a hive-only column exists in a table hive_regex_fmt = r'org.apache.spark.SparkException: Cannot recognize hive type ' \ r'string' if re.search(hive_regex_fmt, e.args[0]): raise exc.UnreflectableTableError else: raise else: return rows
def _get_table_columns(self, connection, table_name, schema): full_table = table_name if schema: full_table = schema + '.' + table_name # TODO using TGetColumnsReq hangs after sending TFetchResultsReq. # Using DESCRIBE works but is uglier. try: # This needs the table name to be unescaped (no backticks). rows = connection.execute( 'DESCRIBE {}'.format(full_table)).fetchall() rows = [(row[0].decode('UTF-16'), row[1].decode('UTF-16'), row[2].decode('UTF-16')) for row in rows] except exc.OperationalError as e: # Does the table exist? regex_fmt = r'TExecuteStatementResp.*SemanticException.*Table not found {}' regex = regex_fmt.format(re.escape(full_table)) if re.search(regex, e.args[0]): raise exc.NoSuchTableError(full_table) else: raise else: # Hive is stupid: this is what I get from DESCRIBE some_schema.does_not_exist regex = r'Table .* does not exist' if len(rows) == 1 and re.match(regex, rows[0].col_name): raise exc.NoSuchTableError(full_table) return rows
def get_table_oid(self, connection, table_name, schema=None, **kw): """Fetch the oid for schema.table_name. Several reflection methods require the table oid. The idea for using this method is that it can be fetched one time and cached for subsequent calls. """ table_oid = None if schema is not None: schema_where_clause = "schema = :schema" else: schema_where_clause = "1=1" query = (""" SELECT * FROM _V_TABLE WHERE (%s) AND tablename = :table_name """ % schema_where_clause) # Since we're binding to unicode, table_name and schema_name must be # unicode. table_name = util.text_type(table_name) if schema is not None: schema = util.text_type(schema) s = sql.text(query).bindparams(table_name=sqltypes.Unicode) s = s.columns(oid=sqltypes.Integer) if schema: s = s.bindparams(sql.bindparam("schema", type_=sqltypes.Unicode)) c = connection.execute(s, table_name=table_name, schema=schema) table_oid = c.scalar() if table_oid is None: raise exc.NoSuchTableError(table_name) return table_oid
def get_foreign_keys(self, connection, table_name, schema=None, **kw): pyodbc_crsr = connection.connection.cursor() db_path = pyodbc_crsr.tables(table=table_name).fetchval() if db_path: db_engine = win32com.client.Dispatch( self._get_dao_string(pyodbc_crsr)) db = db_engine.OpenDatabase( db_path, False, True, "MS Access;PWD={}".format(connection.engine.url.password), ) fk_list = [] for rel in db.Relations: if rel.ForeignTable.casefold() == table_name.casefold(): fk_dict = { "constrained_columns": [], "referred_schema": None, "referred_table": rel.Table, "referred_columns": [], "name": rel.Name, } for fld in rel.Fields: fk_dict["constrained_columns"].append(fld.ForeignName) fk_dict["referred_columns"].append(fld.Name) fk_list.append(fk_dict) return fk_list else: util.raise_( exc.NoSuchTableError("Table '%s' not found." % table_name), )
def get_table_oid(self, connection, table_name, schema=None, **kw): """Fetch the oid for schema.table_name. Several reflection methods require the table oid. The idea for using this method is that it can be fetched one time and cached for subsequent calls. """ table_oid = None if schema is not None: schema_where_clause = "n.nspname = :schema" else: schema_where_clause = "pg_catalog.pg_table_is_visible(c.oid)" query = """ SELECT c.oid FROM pg_catalog.pg_class c LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace WHERE (%s) AND c.relname = :table_name AND c.relkind in ('r', 'v', 'f') """ % schema_where_clause # Since we're binding to unicode, table_name and schema_name must be # unicode. table_name = str(table_name) bindparams = [sql.bindparam('table_name', type_=sqltypes.Unicode)] if schema is not None: schema = str(schema) bindparams.append(sql.bindparam('schema', type_=sqltypes.Unicode)) s = sql.text( query, bindparams=bindparams, typemap={'oid': sqltypes.Integer}) c = connection.execute(s, table_name=table_name, schema=schema) table_oid = c.scalar() if table_oid is None: raise exc.NoSuchTableError(table_name) return table_oid
def get_table_id(self, connection, table_name, schema=None, **kw): """Fetch the id for schema.table_name. Several reflection methods require the table id. The idea for using this method is that it can be fetched one time and cached for subsequent calls. """ table_id = None if schema is None: schema = self.default_schema_name TABLEID_SQL = text(""" SELECT t.table_id AS id FROM sys.systab t JOIN dbo.sysusers u ON t.creator=u.uid WHERE u.name = :schema_name AND t.table_name = :table_name AND t.table_type in (1, 3, 4, 21) """) # Py2K if isinstance(schema, unicode): schema = schema.encode("ascii") if isinstance(table_name, unicode): table_name = table_name.encode("ascii") # end Py2K result = connection.execute(TABLEID_SQL, schema_name=schema, table_name=table_name) table_id = result.scalar() if table_id is None: raise exc.NoSuchTableError(table_name) return table_id
def get_table_id(self, connection, table_name, schema=None, **kw): """Fetch the id for schema.table_name. Several reflection methods require the table id. The idea for using this method is that it can be fetched one time and cached for subsequent calls. """ table_id = None if schema is None: schema = self.default_schema_name TABLEID_SQL = text( """ SELECT o.id AS id FROM sysobjects o JOIN sysusers u ON o.uid=u.uid WHERE u.name = :schema_name AND o.name = :table_name AND o.type in ('U', 'V') """ ) if util.py2k: if isinstance(schema, unicode): # noqa schema = schema.encode("ascii") if isinstance(table_name, unicode): # noqa table_name = table_name.encode("ascii") result = connection.execute( TABLEID_SQL, schema_name=schema, table_name=table_name ) table_id = result.scalar() if table_id is None: raise exc.NoSuchTableError(table_name) return table_id
def get_table_oid(self, connection, table_name, schema=None, **kw): """Fetch the oid for schema.table_name. Several reflection methods require the table oid. The idea for using this method is that it can be fetched one time and cached for subsequent calls. """ table_oid = None if schema is not None: schema_where_clause = "n.nspname = :schema" else: schema_where_clause = "sys_catalog.sys_table_is_visible(c.oid)" query = (""" SELECT c.oid FROM sys_catalog.sys_class c LEFT JOIN sys_catalog.sys_namespace n ON n.oid = c.relnamespace WHERE (%s) AND c.relname = :table_name AND c.relkind in ('r', 'v', 'm', 'f', 'p') """ % schema_where_clause) # Since we're binding to unicode, table_name and schema_name must be # unicode. table_name = util.text_type(table_name) if schema is not None: schema = util.text_type(schema) s = sql.text(query).bindparams(table_name=sqltypes.Unicode) s = s.columns(oid=sqltypes.Integer) if schema: s = s.bindparams(sql.bindparam("schema", type_=sqltypes.Unicode)) c = connection.execute(s, table_name=table_name, schema=schema) table_oid = c.scalar() if table_oid is None: raise exc.NoSuchTableError(table_name) return table_oid
def get_columns(self, connection: Connection, table_name: str, schema: str = None, **kw) -> List[Dict[str, Any]]: if not self.has_table(connection, table_name, schema): raise exc.NoSuchTableError(f'schema={schema}, table={table_name}') return self._get_columns(connection, table_name, schema, **kw)
def get_view_names(self, connection: Connection, schema: str = None, **kw) -> List[str]: schema = schema or self._get_default_schema_name(connection) if schema is None: raise exc.NoSuchTableError('schema is required') query = dedent(''' SELECT "table_name" FROM "information_schema"."views" WHERE "table_schema" = :schema ''').strip() res = connection.execute(sql.text(query), schema=schema) return [row.table_name for row in res]
def get_indexes(self, connection: Connection, table_name: str, schema: str = None, **kw) -> List[Dict[str, Any]]: if not self.has_table(connection, table_name, schema): raise exc.NoSuchTableError(f"schema={schema}, table={table_name}") partitioned_columns = self._get_columns(connection, f"{table_name}$partitions", schema, **kw) partition_index = dict( name="partition", column_names=[col["name"] for col in partitioned_columns], unique=False ) return [ partition_index, ]
def get_table_names(self, connection, schema: str = None, **kw): # type: ignore schema = schema or self._get_default_schema_name(connection) if schema is None: raise exc.NoSuchTableError("schema is required") query = dedent( """ SELECT "table_name" FROM "information_schema"."tables" WHERE "table_schema" = :schema and "table_type" != 'VIEW' """ ).strip() res = connection.execute(sql.text(query), schema=schema) return [row.table_name for row in res]
def _get_table_columns(self, connection, table_name, schema): full_table = table_name if schema: full_table = schema + '.' + table_name try: return connection.execute('DESCRIBE {}'.format(full_table)) except ttypes.SnappyException as e: if isinstance(e.exceptionData, ttypes.SnappyExceptionData ) and e.exceptionData.errorCode == 20000: raise exc.NoSuchTableError(full_table) else: raise e
def get_view_definition(self, connection: Connection, view_name: str, schema: str = None, **kw) -> str: full_view = self._get_full_table(view_name, schema) query = f'SHOW CREATE VIEW {full_view}' try: res = connection.execute(sql.text(query)) return res.scalar() except error.TrinoQueryError as e: if e.error_name in ( error.TABLE_NOT_FOUND, error.SCHEMA_NOT_FOUND, error.CATALOG_NOT_FOUND, ): raise exc.NoSuchTableError(full_view) from e raise
def get_view_definition(self, connection: Connection, view_name: str, schema: str = None, **kw) -> str: schema = schema or self._get_default_schema_name(connection) if schema is None: raise exc.NoSuchTableError("schema is required") query = dedent( """ SELECT "view_definition" FROM "information_schema"."views" WHERE "table_schema" = :schema AND "table_name" = :view """ ).strip() res = connection.execute(sql.text(query), schema=schema, view=view_name) return res.scalar()
def get_or_create_table(name, *columns, **kwargs): engine = get_sql_engine() # echo=True BaseORM.metadata.reflect(bind=engine, views=True) if name not in BaseORM.metadata.tables: if columns: table = sa.Table( name, BaseORM.metadata, gen_oid(), gen_update(), *columns, keep_existing=True, **kwargs ) table.create(bind=engine) else: raise sa_err.NoSuchTableError("Table") return BaseORM.metadata.tables[name]
def get_table_oid(self, connection, table_name, schema=None, **kw): if schema is None: schema = self._get_default_schema_name(connection) get_oid_sql = sql.text( dedent(""" SELECT table_id FROM v_catalog.tables WHERE lower(table_name) = '%(table)s' AND lower(table_schema) = '%(schema)s' """ % { 'schema': schema.lower(), 'table': table_name.lower() })) c = connection.execute(get_oid_sql) table_oid = c.scalar() if table_oid is None: raise exc.NoSuchTableError(table_name) return table_oid
def _table_id(self, connection, table_name, schema_name=None): """Fetch the id for schema.table_name, defaulting to current schema if schema is None""" query = """ SELECT id FROM sys.tables WHERE name = %(name)s AND schema_id = %(schema_id)s""" c = connection.execute( query, { "name": table_name, "schema_id": self._schema_id(connection, schema_name) }) table_id = c.scalar() if table_id is None: raise exc.NoSuchTableError(table_name) return table_id
def get_table_oid(self, connection, table_name, schema=None, **kw): log.debug("-->") """Fetch the oid for table_name. Several reflection methods require the table oid. The idea for using this method is that it can be fetched one time and cached for subsequent calls. """ table_oid = None if schema is None: schema = self._get_current_schema_name(connection) if not self.is_system_in_lowercase(): table_name = self.denormalize_name(table_name) schema = self.denormalize_name(schema) cursor = connection.execute( sql.text( 'select objid from _v_table where objid > 200000 and tablename = :name and schema = :schema' ).bindparams( sql.bindparam("name", util.text_type(table_name), type_=sqltypes.Unicode), sql.bindparam("schema", util.text_type(schema), type_=sqltypes.Unicode))) table_oid = cursor.scalar() if table_oid is None: cursor = connection.execute( sql.text( 'select objid from _v_object_data where objid > 200000 and objname = :name and schema = :schema' ).bindparams( sql.bindparam("name", util.text_type(table_name), type_=sqltypes.Unicode), sql.bindparam("schema", util.text_type(schema), type_=sqltypes.Unicode))) table_oid = cursor.scalar() if table_oid is None: raise exc.NoSuchTableError(table_name) return table_oid
def get_pk_constraint(self, connection, table_name, schema=None, **kw): pyodbc_crsr = connection.connection.cursor() db_path = pyodbc_crsr.tables(table=table_name).fetchval() if db_path: db_engine = win32com.client.Dispatch( self._get_dao_string(pyodbc_crsr)) db = db_engine.OpenDatabase( db_path, False, True, "MS Access;PWD={}".format(connection.engine.url.password), ) tbd = db.TableDefs(table_name) for idx in tbd.Indexes: if idx.Primary: return { "constrained_columns": [fld.Name for fld in idx.Fields], "name": idx.Name, } else: util.raise_( exc.NoSuchTableError("Table '%s' not found." % table_name), )
def _get_table_columns(self, connection, table_name, schema): full_table = self.identifier_preparer.quote_identifier(table_name) if schema: full_table = self.identifier_preparer.quote_identifier( schema) + '.' + full_table try: return connection.execute( 'SHOW COLUMNS FROM {}'.format(full_table)) except presto.DatabaseError as e: # Normally SQLAlchemy should wrap this exception in sqlalchemy.exc.DatabaseError, which # it successfully does in the Hive version. The difference with Presto is that this # error is raised when fetching the cursor's description rather than the initial execute # call. SQLAlchemy doesn't handle this. Thus, we catch the unwrapped # presto.DatabaseError here. # Does the table exist? msg = e.message.get('message') if isinstance(e.message, dict) else None regex = r"^Table\ \'.*{}\'\ does\ not\ exist$".format( re.escape(table_name)) if msg and re.match(regex, msg): raise exc.NoSuchTableError(table_name) else: raise
def reflecttable(self, connection, table, include_columns): # This is defined in the function, as it relies on win32com constants, # that aren't imported until dbapi method is called if not hasattr(self, 'ischema_names'): self.ischema_names = { const.dbByte: AcBinary, const.dbInteger: AcInteger, const.dbLong: AcInteger, const.dbSingle: AcFloat, const.dbDouble: AcFloat, const.dbDate: AcDateTime, const.dbLongBinary: AcBinary, const.dbMemo: AcText, const.dbBoolean: AcBoolean, const.dbText: AcUnicode, # All Access strings are unicode const.dbCurrency: AcNumeric, } # A fresh DAO connection is opened for each reflection # This is necessary, so we get the latest updates dtbs = daoEngine.OpenDatabase(connection.engine.url.database) try: for tbl in dtbs.TableDefs: if tbl.Name.lower() == table.name.lower(): break else: raise exc.NoSuchTableError(table.name) for col in tbl.Fields: coltype = self.ischema_names[col.Type] if col.Type == const.dbText: coltype = coltype(col.Size) colargs = \ { 'nullable': not(col.Required or col.Attributes & const.dbAutoIncrField), } default = col.DefaultValue if col.Attributes & const.dbAutoIncrField: colargs['default'] = schema.Sequence(col.Name + '_seq') elif default: if col.Type == const.dbBoolean: default = default == 'Yes' and '1' or '0' colargs['server_default'] = schema.DefaultClause( sql.text(default)) table.append_column(schema.Column(col.Name, coltype, **colargs)) # TBD: check constraints # Find primary key columns first for idx in tbl.Indexes: if idx.Primary: for col in idx.Fields: thecol = table.c[col.Name] table.primary_key.add(thecol) if isinstance(thecol.type, AcInteger) and \ not (thecol.default and isinstance(thecol.default.arg, schema.Sequence)): thecol.autoincrement = False # Then add other indexes for idx in tbl.Indexes: if not idx.Primary: if len(idx.Fields) == 1: col = table.c[idx.Fields[0].Name] if not col.primary_key: col.index = True col.unique = idx.Unique else: pass # TBD: multi-column indexes for fk in dtbs.Relations: if fk.ForeignTable != table.name: continue scols = [c.ForeignName for c in fk.Fields] rcols = ['%s.%s' % (fk.Table, c.Name) for c in fk.Fields] table.append_constraint( schema.ForeignKeyConstraint(scols, rcols, link_to_name=True)) finally: dtbs.Close()
def reflecttable(self, connection, table, include_columns): denormalize = self.identifier_preparer._denormalize_name normalize = self.identifier_preparer._normalize_name st = ('SELECT COLUMNNAME, MODE, DATATYPE, CODETYPE, LEN, DEC, ' ' NULLABLE, "DEFAULT", DEFAULTFUNCTION ' 'FROM COLUMNS ' 'WHERE TABLENAME=? AND SCHEMANAME=%s ' 'ORDER BY POS') fk = ('SELECT COLUMNNAME, FKEYNAME, ' ' REFSCHEMANAME, REFTABLENAME, REFCOLUMNNAME, RULE, ' ' (CASE WHEN REFSCHEMANAME = CURRENT_SCHEMA ' ' THEN 1 ELSE 0 END) AS in_schema ' 'FROM FOREIGNKEYCOLUMNS ' 'WHERE TABLENAME=? AND SCHEMANAME=%s ' 'ORDER BY FKEYNAME ') params = [denormalize(table.name)] if not table.schema: st = st % 'CURRENT_SCHEMA' fk = fk % 'CURRENT_SCHEMA' else: st = st % '?' fk = fk % '?' params.append(denormalize(table.schema)) rows = connection.execute(st, params).fetchall() if not rows: raise exc.NoSuchTableError(table.fullname) include_columns = set(include_columns or []) for row in rows: (name, mode, col_type, encoding, length, scale, nullable, constant_def, func_def) = row name = normalize(name) if include_columns and name not in include_columns: continue type_args, type_kw = [], {} if col_type == 'FIXED': type_args = length, scale # Convert FIXED(10) DEFAULT SERIAL to our Integer if (scale == 0 and func_def is not None and func_def.startswith('SERIAL')): col_type = 'INTEGER' type_args = length, elif col_type in 'FLOAT': type_args = length, elif col_type in ('CHAR', 'VARCHAR'): type_args = length, type_kw['encoding'] = encoding elif col_type == 'LONG': type_kw['encoding'] = encoding try: type_cls = ischema_names[col_type.lower()] type_instance = type_cls(*type_args, **type_kw) except KeyError: util.warn("Did not recognize type '%s' of column '%s'" % (col_type, name)) type_instance = sqltypes.NullType col_kw = {'autoincrement': False} col_kw['nullable'] = (nullable == 'YES') col_kw['primary_key'] = (mode == 'KEY') if func_def is not None: if func_def.startswith('SERIAL'): if col_kw['primary_key']: # No special default- let the standard autoincrement # support handle SERIAL pk columns. col_kw['autoincrement'] = True else: # strip current numbering col_kw['server_default'] = schema.DefaultClause( sql.text('SERIAL')) col_kw['autoincrement'] = True else: col_kw['server_default'] = schema.DefaultClause( sql.text(func_def)) elif constant_def is not None: col_kw['server_default'] = schema.DefaultClause( sql.text("'%s'" % constant_def.replace("'", "''"))) table.append_column(schema.Column(name, type_instance, **col_kw)) fk_sets = itertools.groupby(connection.execute(fk, params), lambda row: row.FKEYNAME) for fkeyname, fkey in fk_sets: fkey = list(fkey) if include_columns: key_cols = set([r.COLUMNNAME for r in fkey]) if key_cols != include_columns: continue columns, referants = [], [] quote = self.identifier_preparer._maybe_quote_identifier for row in fkey: columns.append(normalize(row.COLUMNNAME)) if table.schema or not row.in_schema: referants.append('.'.join([ quote(normalize(row[c])) for c in ('REFSCHEMANAME', 'REFTABLENAME', 'REFCOLUMNNAME') ])) else: referants.append('.'.join([ quote(normalize(row[c])) for c in ('REFTABLENAME', 'REFCOLUMNNAME') ])) constraint_kw = {'name': fkeyname.lower()} if fkey[0].RULE is not None: rule = fkey[0].RULE if rule.startswith('DELETE '): rule = rule[7:] constraint_kw['ondelete'] = rule table_kw = {} if table.schema or not row.in_schema: table_kw['schema'] = normalize(fkey[0].REFSCHEMANAME) ref_key = schema._get_table_key(normalize(fkey[0].REFTABLENAME), table_kw.get('schema')) if ref_key not in table.metadata.tables: schema.Table(normalize(fkey[0].REFTABLENAME), table.metadata, autoload=True, autoload_with=connection, **table_kw) constraint = schema.ForeignKeyConstraint(columns, referants, link_to_name=True, **constraint_kw) table.append_constraint(constraint)
def reflecttable(self, connection, table, include_columns): preparer = self.identifier_preparer if table.schema is not None: schema_where_clause = "n.nspname = :schema" schemaname = table.schema if isinstance(schemaname, str): schemaname = schemaname.decode(self.encoding) else: schema_where_clause = "pg_catalog.pg_table_is_visible(c.oid)" schemaname = None SQL_COLS = """ SELECT a.attname, pg_catalog.format_type(a.atttypid, a.atttypmod), (SELECT substring(d.adsrc for 128) FROM pg_catalog.pg_attrdef d WHERE d.adrelid = a.attrelid AND d.adnum = a.attnum AND a.atthasdef) AS DEFAULT, a.attnotnull, a.attnum, a.attrelid as table_oid FROM pg_catalog.pg_attribute a WHERE a.attrelid = ( SELECT c.oid FROM pg_catalog.pg_class c LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace WHERE (%s) AND c.relname = :table_name AND c.relkind in ('r','v') ) AND a.attnum > 0 AND NOT a.attisdropped ORDER BY a.attnum """ % schema_where_clause s = sql.text(SQL_COLS, bindparams=[ sql.bindparam('table_name', type_=sqltypes.Unicode), sql.bindparam('schema', type_=sqltypes.Unicode) ], typemap={ 'attname': sqltypes.Unicode, 'default': sqltypes.Unicode }) tablename = table.name if isinstance(tablename, str): tablename = tablename.decode(self.encoding) c = connection.execute(s, table_name=tablename, schema=schemaname) rows = c.fetchall() if not rows: raise exc.NoSuchTableError(table.name) domains = self._load_domains(connection) for name, format_type, default, notnull, attnum, table_oid in rows: if include_columns and name not in include_columns: continue ## strip (30) from character varying(30) attype = re.search('([^\([]+)', format_type).group(1) nullable = not notnull is_array = format_type.endswith('[]') try: charlen = re.search('\(([\d,]+)\)', format_type).group(1) except: charlen = False numericprec = False numericscale = False if attype == 'numeric': if charlen is False: numericprec, numericscale = (None, None) else: numericprec, numericscale = charlen.split(',') charlen = False if attype == 'double precision': numericprec, numericscale = (53, False) charlen = False if attype == 'integer': numericprec, numericscale = (32, 0) charlen = False args = [] for a in (charlen, numericprec, numericscale): if a is None: args.append(None) elif a is not False: args.append(int(a)) kwargs = {} if attype == 'timestamp with time zone': kwargs['timezone'] = True elif attype == 'timestamp without time zone': kwargs['timezone'] = False if attype in ischema_names: coltype = ischema_names[attype] else: if attype in domains: domain = domains[attype] if domain['attype'] in ischema_names: # A table can't override whether the domain is nullable. nullable = domain['nullable'] if domain['default'] and not default: # It can, however, override the default value, but can't set it to null. default = domain['default'] coltype = ischema_names[domain['attype']] else: coltype = None if coltype: coltype = coltype(*args, **kwargs) if is_array: coltype = PGArray(coltype) else: util.warn("Did not recognize type '%s' of column '%s'" % (attype, name)) coltype = sqltypes.NULLTYPE colargs = [] if default is not None: match = re.search(r"""(nextval\(')([^']+)('.*$)""", default) if match is not None: # the default is related to a Sequence sch = table.schema if '.' not in match.group(2) and sch is not None: # unconditionally quote the schema name. this could # later be enhanced to obey quoting rules / "quote schema" default = match.group(1) + ( '"%s"' % sch) + '.' + match.group(2) + match.group(3) colargs.append(schema.DefaultClause(sql.text(default))) table.append_column( schema.Column(name, coltype, nullable=nullable, *colargs)) # Primary keys PK_SQL = """ SELECT attname FROM pg_attribute WHERE attrelid = ( SELECT indexrelid FROM pg_index i WHERE i.indrelid = :table AND i.indisprimary = 't') ORDER BY attnum """ t = sql.text(PK_SQL, typemap={'attname': sqltypes.Unicode}) c = connection.execute(t, table=table_oid) for row in c.fetchall(): pk = row[0] if pk in table.c: col = table.c[pk] table.primary_key.add(col) if col.default is None: col.autoincrement = False # Foreign keys FK_SQL = """ SELECT conname, pg_catalog.pg_get_constraintdef(oid, true) as condef FROM pg_catalog.pg_constraint r WHERE r.conrelid = :table AND r.contype = 'f' ORDER BY 1 """ t = sql.text(FK_SQL, typemap={ 'conname': sqltypes.Unicode, 'condef': sqltypes.Unicode }) c = connection.execute(t, table=table_oid) for conname, condef in c.fetchall(): m = re.search( 'FOREIGN KEY \((.*?)\) REFERENCES (?:(.*?)\.)?(.*?)\((.*?)\)', condef).groups() (constrained_columns, referred_schema, referred_table, referred_columns) = m constrained_columns = [ preparer._unquote_identifier(x) for x in re.split(r'\s*,\s*', constrained_columns) ] if referred_schema: referred_schema = preparer._unquote_identifier(referred_schema) elif table.schema is not None and table.schema == self.get_default_schema_name( connection): # no schema (i.e. its the default schema), and the table we're # reflecting has the default schema explicit, then use that. # i.e. try to use the user's conventions referred_schema = table.schema referred_table = preparer._unquote_identifier(referred_table) referred_columns = [ preparer._unquote_identifier(x) for x in re.split(r'\s*,\s', referred_columns) ] refspec = [] if referred_schema is not None: schema.Table(referred_table, table.metadata, autoload=True, schema=referred_schema, autoload_with=connection) for column in referred_columns: refspec.append(".".join( [referred_schema, referred_table, column])) else: schema.Table(referred_table, table.metadata, autoload=True, autoload_with=connection) for column in referred_columns: refspec.append(".".join([referred_table, column])) table.append_constraint( schema.ForeignKeyConstraint(constrained_columns, refspec, conname, link_to_name=True)) # Indexes IDX_SQL = """ SELECT c.relname, i.indisunique, i.indexprs, i.indpred, a.attname FROM pg_index i, pg_class c, pg_attribute a WHERE i.indrelid = :table AND i.indexrelid = c.oid AND a.attrelid = i.indexrelid AND i.indisprimary = 'f' ORDER BY c.relname, a.attnum """ t = sql.text(IDX_SQL, typemap={'attname': sqltypes.Unicode}) c = connection.execute(t, table=table_oid) indexes = {} sv_idx_name = None for row in c.fetchall(): idx_name, unique, expr, prd, col = row if expr and not idx_name == sv_idx_name: util.warn( "Skipped unsupported reflection of expression-based index %s" % idx_name) sv_idx_name = idx_name continue if prd and not idx_name == sv_idx_name: util.warn( "Predicate of partial index %s ignored during reflection" % idx_name) sv_idx_name = idx_name if not indexes.has_key(idx_name): indexes[idx_name] = [unique, []] indexes[idx_name][1].append(col) for name, (unique, columns) in indexes.items(): schema.Index(name, *[table.columns[c] for c in columns], **dict(unique=unique))
def reflecttable(connection, table, include_columns, ischema_names): key_constraints = pg_key_constraints if table.schema is not None: current_schema = table.schema else: current_schema = connection.default_schema_name() s = select([columns], sql.and_(columns.c.table_name == table.name, columns.c.table_schema == current_schema), order_by=[columns.c.ordinal_position]) c = connection.execute(s) found_table = False while True: row = c.fetchone() if row is None: break #print "row! " + repr(row) # continue found_table = True (name, type, nullable, charlen, numericprec, numericscale, default) = (row[columns.c.column_name], row[columns.c.data_type], row[columns.c.is_nullable] == 'YES', row[columns.c.character_maximum_length], row[columns.c.numeric_precision], row[columns.c.numeric_scale], row[columns.c.column_default]) if include_columns and name not in include_columns: continue args = [] for a in (charlen, numericprec, numericscale): if a is not None: args.append(a) coltype = ischema_names[type] #print "coltype " + repr(coltype) + " args " + repr(args) coltype = coltype(*args) colargs = [] if default is not None: colargs.append(DefaultClause(sql.text(default))) table.append_column(Column(name, coltype, nullable=nullable, *colargs)) if not found_table: raise exc.NoSuchTableError(table.name) # we are relying on the natural ordering of the constraint_column_usage table to return the referenced columns # in an order that corresponds to the ordinal_position in the key_constraints table, otherwise composite foreign keys # wont reflect properly. dont see a way around this based on whats available from information_schema s = select([ constraints.c.constraint_name, constraints.c.constraint_type, constraints.c.table_name, key_constraints ], use_labels=True, from_obj=[ constraints.join( column_constraints, column_constraints.c.constraint_name == constraints.c.constraint_name).join( key_constraints, key_constraints.c.constraint_name == column_constraints.c.constraint_name) ], order_by=[key_constraints.c.ordinal_position]) s.append_column(column_constraints) s.append_whereclause(constraints.c.table_name == table.name) s.append_whereclause(constraints.c.table_schema == current_schema) colmap = [ constraints.c.constraint_type, key_constraints.c.column_name, column_constraints.c.table_schema, column_constraints.c.table_name, column_constraints.c.column_name, constraints.c.constraint_name, key_constraints.c.ordinal_position ] c = connection.execute(s) fks = {} while True: row = c.fetchone() if row is None: break (type, constrained_column, referred_schema, referred_table, referred_column, constraint_name, ordinal_position) = (row[colmap[0]], row[colmap[1]], row[colmap[2]], row[colmap[3]], row[colmap[4]], row[colmap[5]], row[colmap[6]]) #print "type %s on column %s to remote %s.%s.%s" % (type, constrained_column, referred_schema, referred_table, referred_column) if type == 'PRIMARY KEY': table.primary_key.add(table.c[constrained_column]) elif type == 'FOREIGN KEY': try: fk = fks[constraint_name] except KeyError: fk = ([], []) fks[constraint_name] = fk if current_schema == referred_schema: referred_schema = table.schema if referred_schema is not None: Table(referred_table, table.metadata, autoload=True, schema=referred_schema, autoload_with=connection) refspec = ".".join( [referred_schema, referred_table, referred_column]) else: Table(referred_table, table.metadata, autoload=True, autoload_with=connection) refspec = ".".join([referred_table, referred_column]) if constrained_column not in fk[0]: fk[0].append(constrained_column) if refspec not in fk[1]: fk[1].append(refspec) for name, value in fks.iteritems(): table.append_constraint( ForeignKeyConstraint(value[0], value[1], name=name))
def reflecttable(self, table, include_columns, exclude_columns=()): """Given a Table object, load its internal constructs based on introspection. This is the underlying method used by most dialects to produce table reflection. Direct usage is like:: from sqlalchemy import create_engine, MetaData, Table from sqlalchemy.engine import reflection engine = create_engine('...') meta = MetaData() user_table = Table('user', meta) insp = Inspector.from_engine(engine) insp.reflecttable(user_table, None) :param table: a :class:`~sqlalchemy.schema.Table` instance. :param include_columns: a list of string column names to include in the reflection process. If ``None``, all columns are reflected. """ dialect = self.bind.dialect # table attributes we might need. reflection_options = {} schema = table.schema table_name = table.name # apply table options tbl_opts = self.get_table_options(table_name, schema, **table.kwargs) if tbl_opts: table.kwargs.update(tbl_opts) # table.kwargs will need to be passed to each reflection method. Make # sure keywords are strings. tblkw = table.kwargs.copy() for (k, v) in list(tblkw.items()): del tblkw[k] tblkw[str(k)] = v if isinstance(schema, str): schema = schema.decode(dialect.encoding) if isinstance(table_name, str): table_name = table_name.decode(dialect.encoding) # columns found_table = False cols_by_orig_name = {} for col_d in self.get_columns(table_name, schema, **tblkw): found_table = True orig_name = col_d['name'] name = col_d['name'] if include_columns and name not in include_columns: continue if exclude_columns and name in exclude_columns: continue coltype = col_d['type'] col_kw = { 'nullable': col_d['nullable'], } for k in ('autoincrement', 'quote', 'info', 'key'): if k in col_d: col_kw[k] = col_d[k] colargs = [] if col_d.get('default') is not None: # the "default" value is assumed to be a literal SQL # expression, so is wrapped in text() so that no quoting # occurs on re-issuance. colargs.append( sa_schema.DefaultClause(sql.text(col_d['default']), _reflected=True)) if 'sequence' in col_d: # TODO: mssql and sybase are using this. seq = col_d['sequence'] sequence = sa_schema.Sequence(seq['name'], 1, 1) if 'start' in seq: sequence.start = seq['start'] if 'increment' in seq: sequence.increment = seq['increment'] colargs.append(sequence) cols_by_orig_name[orig_name] = col = \ sa_schema.Column(name, coltype, *colargs, **col_kw) table.append_column(col) if not found_table: raise exc.NoSuchTableError(table.name) # Primary keys pk_cons = self.get_pk_constraint(table_name, schema, **tblkw) if pk_cons: pk_cols = [ cols_by_orig_name[pk] for pk in pk_cons['constrained_columns'] if pk in cols_by_orig_name and pk not in exclude_columns ] pk_cols += [ pk for pk in table.primary_key if pk.key in exclude_columns ] primary_key_constraint = sa_schema.PrimaryKeyConstraint( name=pk_cons.get('name'), *pk_cols) table.append_constraint(primary_key_constraint) # Foreign keys fkeys = self.get_foreign_keys(table_name, schema, **tblkw) for fkey_d in fkeys: conname = fkey_d['name'] # look for columns by orig name in cols_by_orig_name, # but support columns that are in-Python only as fallback constrained_columns = [ cols_by_orig_name[c].key if c in cols_by_orig_name else c for c in fkey_d['constrained_columns'] ] if exclude_columns and set(constrained_columns).intersection( exclude_columns): continue referred_schema = fkey_d['referred_schema'] referred_table = fkey_d['referred_table'] referred_columns = fkey_d['referred_columns'] refspec = [] if referred_schema is not None: sa_schema.Table(referred_table, table.metadata, autoload=True, schema=referred_schema, autoload_with=self.bind, **reflection_options) for column in referred_columns: refspec.append(".".join( [referred_schema, referred_table, column])) else: sa_schema.Table(referred_table, table.metadata, autoload=True, autoload_with=self.bind, **reflection_options) for column in referred_columns: refspec.append(".".join([referred_table, column])) if 'options' in fkey_d: options = fkey_d['options'] else: options = {} table.append_constraint( sa_schema.ForeignKeyConstraint(constrained_columns, refspec, conname, link_to_name=True, **options)) # Indexes indexes = self.get_indexes(table_name, schema) for index_d in indexes: name = index_d['name'] columns = index_d['column_names'] unique = index_d['unique'] flavor = index_d.get('type', 'unknown type') if include_columns and \ not set(columns).issubset(include_columns): util.warn( "Omitting %s KEY for (%s), key covers omitted columns." % (flavor, ', '.join(columns))) continue # look for columns by orig name in cols_by_orig_name, # but support columns that are in-Python only as fallback sa_schema.Index( name, *[ cols_by_orig_name[c] if c in cols_by_orig_name else table.c[c] for c in columns ], **dict(unique=unique))
def reflecttable(self, connection, table, include_columns): c = connection.execute( "select distinct OWNER from systables where tabname=?", table.name.lower()) rows = c.fetchall() if not rows: raise exc.NoSuchTableError(table.name) else: if table.owner is not None: if table.owner.lower() in [r[0] for r in rows]: owner = table.owner.lower() else: raise AssertionError( "Specified owner %s does not own table %s" % (table.owner, table.name)) else: if len(rows) == 1: owner = rows[0][0] else: raise AssertionError( "There are multiple tables with name %s in the schema, you must specifie owner" % table.name) c = connection.execute( """select colname , coltype , collength , t3.default , t1.colno from syscolumns as t1 , systables as t2 , OUTER sysdefaults as t3 where t1.tabid = t2.tabid and t2.tabname=? and t2.owner=? and t3.tabid = t2.tabid and t3.colno = t1.colno order by t1.colno""", table.name.lower(), owner) rows = c.fetchall() if not rows: raise exc.NoSuchTableError(table.name) for name, colattr, collength, default, colno in rows: name = name.lower() if include_columns and name not in include_columns: continue # in 7.31, coltype = 0x000 # ^^-- column type # ^-- 1 not null , 0 null nullable, coltype = divmod(colattr, 256) if coltype not in (0, 13) and default: default = default.split()[-1] if coltype == 0 or coltype == 13: # char , varchar coltype = ischema_names.get(coltype, InfoString)(collength) if default: default = "'%s'" % default elif coltype == 5: # decimal precision, scale = (collength & 0xFF00) >> 8, collength & 0xFF if scale == 255: scale = 0 coltype = InfoNumeric(precision, scale) else: try: coltype = ischema_names[coltype] except KeyError: util.warn("Did not recognize type '%s' of column '%s'" % (coltype, name)) coltype = sqltypes.NULLTYPE colargs = [] if default is not None: colargs.append(schema.DefaultClause(sql.text(default))) table.append_column( schema.Column(name, coltype, nullable=(nullable == 0), *colargs)) # FK c = connection.execute( """select t1.constrname as cons_name , t1.constrtype as cons_type , t4.colname as local_column , t7.tabname as remote_table , t6.colname as remote_column from sysconstraints as t1 , systables as t2 , sysindexes as t3 , syscolumns as t4 , sysreferences as t5 , syscolumns as t6 , systables as t7 , sysconstraints as t8 , sysindexes as t9 where t1.tabid = t2.tabid and t2.tabname=? and t2.owner=? and t1.constrtype = 'R' and t3.tabid = t2.tabid and t3.idxname = t1.idxname and t4.tabid = t2.tabid and t4.colno = t3.part1 and t5.constrid = t1.constrid and t8.constrid = t5.primary and t6.tabid = t5.ptabid and t6.colno = t9.part1 and t9.idxname = t8.idxname and t7.tabid = t5.ptabid""", table.name.lower(), owner) rows = c.fetchall() fks = {} for cons_name, cons_type, local_column, remote_table, remote_column in rows: try: fk = fks[cons_name] except KeyError: fk = ([], []) fks[cons_name] = fk refspec = ".".join([remote_table, remote_column]) schema.Table(remote_table, table.metadata, autoload=True, autoload_with=connection) if local_column not in fk[0]: fk[0].append(local_column) if refspec not in fk[1]: fk[1].append(refspec) for name, value in fks.iteritems(): table.append_constraint( schema.ForeignKeyConstraint(value[0], value[1], None, link_to_name=True)) # PK c = connection.execute( """select t1.constrname as cons_name , t1.constrtype as cons_type , t4.colname as local_column from sysconstraints as t1 , systables as t2 , sysindexes as t3 , syscolumns as t4 where t1.tabid = t2.tabid and t2.tabname=? and t2.owner=? and t1.constrtype = 'P' and t3.tabid = t2.tabid and t3.idxname = t1.idxname and t4.tabid = t2.tabid and t4.colno = t3.part1""", table.name.lower(), owner) rows = c.fetchall() for cons_name, cons_type, local_column in rows: table.primary_key.add(table.c[local_column])
def reflecttable(self, connection, table, include_columns): # Query to extract the details of all the fields of the given table tblqry = """ SELECT DISTINCT r.rdb$field_name AS fname, r.rdb$null_flag AS null_flag, t.rdb$type_name AS ftype, f.rdb$field_sub_type AS stype, f.rdb$field_length AS flen, f.rdb$field_precision AS fprec, f.rdb$field_scale AS fscale, COALESCE(r.rdb$default_source, f.rdb$default_source) AS fdefault FROM rdb$relation_fields r JOIN rdb$fields f ON r.rdb$field_source=f.rdb$field_name JOIN rdb$types t ON t.rdb$type=f.rdb$field_type AND t.rdb$field_name='RDB$FIELD_TYPE' WHERE f.rdb$system_flag=0 AND r.rdb$relation_name=? ORDER BY r.rdb$field_position """ # Query to extract the PK/FK constrained fields of the given table keyqry = """ SELECT se.rdb$field_name AS fname FROM rdb$relation_constraints rc JOIN rdb$index_segments se ON rc.rdb$index_name=se.rdb$index_name WHERE rc.rdb$constraint_type=? AND rc.rdb$relation_name=? """ # Query to extract the details of each UK/FK of the given table fkqry = """ SELECT rc.rdb$constraint_name AS cname, cse.rdb$field_name AS fname, ix2.rdb$relation_name AS targetrname, se.rdb$field_name AS targetfname FROM rdb$relation_constraints rc JOIN rdb$indices ix1 ON ix1.rdb$index_name=rc.rdb$index_name JOIN rdb$indices ix2 ON ix2.rdb$index_name=ix1.rdb$foreign_key JOIN rdb$index_segments cse ON cse.rdb$index_name=ix1.rdb$index_name JOIN rdb$index_segments se ON se.rdb$index_name=ix2.rdb$index_name AND se.rdb$field_position=cse.rdb$field_position WHERE rc.rdb$constraint_type=? AND rc.rdb$relation_name=? ORDER BY se.rdb$index_name, se.rdb$field_position """ # Heuristic-query to determine the generator associated to a PK field genqry = """ SELECT trigdep.rdb$depended_on_name AS fgenerator FROM rdb$dependencies tabdep JOIN rdb$dependencies trigdep ON (tabdep.rdb$dependent_name=trigdep.rdb$dependent_name AND trigdep.rdb$depended_on_type=14 AND trigdep.rdb$dependent_type=2) JOIN rdb$triggers trig ON (trig.rdb$trigger_name=tabdep.rdb$dependent_name) WHERE tabdep.rdb$depended_on_name=? AND tabdep.rdb$depended_on_type=0 AND trig.rdb$trigger_type=1 AND tabdep.rdb$field_name=? AND (SELECT count(*) FROM rdb$dependencies trigdep2 WHERE trigdep2.rdb$dependent_name = trigdep.rdb$dependent_name) = 2 """ tablename = self._denormalize_name(table.name) # get primary key fields c = connection.execute(keyqry, ["PRIMARY KEY", tablename]) pkfields = [self._normalize_name(r['fname']) for r in c.fetchall()] # get all of the fields for this table c = connection.execute(tblqry, [tablename]) found_table = False while True: row = c.fetchone() if row is None: break found_table = True name = self._normalize_name(row['fname']) if include_columns and name not in include_columns: continue args = [name] kw = {} # get the data type coltype = ischema_names.get(row['ftype'].rstrip()) if coltype is None: util.warn("Did not recognize type '%s' of column '%s'" % (str(row['ftype']), name)) coltype = sqltypes.NULLTYPE else: coltype = coltype(row) args.append(coltype) # is it a primary key? kw['primary_key'] = name in pkfields # is it nullable? kw['nullable'] = not bool(row['null_flag']) # does it have a default value? if row['fdefault'] is not None: # the value comes down as "DEFAULT 'value'" assert row['fdefault'].upper().startswith('DEFAULT '), row defvalue = row['fdefault'][8:] args.append(schema.DefaultClause(sql.text(defvalue))) col = schema.Column(*args, **kw) if kw['primary_key']: # if the PK is a single field, try to see if its linked to # a sequence thru a trigger if len(pkfields)==1: genc = connection.execute(genqry, [tablename, row['fname']]) genr = genc.fetchone() if genr is not None: col.sequence = schema.Sequence(self._normalize_name(genr['fgenerator'])) table.append_column(col) if not found_table: raise exc.NoSuchTableError(table.name) # get the foreign keys c = connection.execute(fkqry, ["FOREIGN KEY", tablename]) fks = {} while True: row = c.fetchone() if not row: break cname = self._normalize_name(row['cname']) try: fk = fks[cname] except KeyError: fks[cname] = fk = ([], []) rname = self._normalize_name(row['targetrname']) schema.Table(rname, table.metadata, autoload=True, autoload_with=connection) fname = self._normalize_name(row['fname']) refspec = rname + '.' + self._normalize_name(row['targetfname']) fk[0].append(fname) fk[1].append(refspec) for name, value in fks.iteritems(): table.append_constraint(schema.ForeignKeyConstraint(value[0], value[1], name=name, link_to_name=True))
def reflecttable(self, connection, table, include_columns): preparer = self.identifier_preparer if table.schema is None: pragma = "PRAGMA " else: pragma = "PRAGMA %s." % preparer.quote_identifier(table.schema) qtable = preparer.format_table(table, False) c = connection.execute("%stable_info(%s)" % (pragma, qtable)) found_table = False while True: row = c.fetchone() if row is None: break found_table = True (name, type_, nullable, default, has_default, primary_key) = (row[1], row[2].upper(), not row[3], row[4], row[4] is not None, row[5]) name = re.sub(r'^\"|\"$', '', name) if include_columns and name not in include_columns: continue match = re.match(r'(\w+)(\(.*?\))?', type_) if match: coltype = match.group(1) args = match.group(2) else: coltype = "VARCHAR" args = '' try: coltype = ischema_names[coltype] except KeyError: util.warn("Did not recognize type '%s' of column '%s'" % (coltype, name)) coltype = sqltypes.NullType if args is not None: args = re.findall(r'(\d+)', args) coltype = coltype(*[int(a) for a in args]) colargs = [] if has_default: colargs.append(DefaultClause(sql.text(default))) table.append_column( schema.Column(name, coltype, primary_key=primary_key, nullable=nullable, *colargs)) if not found_table: raise exc.NoSuchTableError(table.name) c = connection.execute("%sforeign_key_list(%s)" % (pragma, qtable)) fks = {} while True: row = c.fetchone() if row is None: break (constraint_name, tablename, localcol, remotecol) = (row[0], row[2], row[3], row[4]) tablename = re.sub(r'^\"|\"$', '', tablename) localcol = re.sub(r'^\"|\"$', '', localcol) remotecol = re.sub(r'^\"|\"$', '', remotecol) try: fk = fks[constraint_name] except KeyError: fk = ([], []) fks[constraint_name] = fk # look up the table based on the given table's engine, not 'self', # since it could be a ProxyEngine remotetable = schema.Table(tablename, table.metadata, autoload=True, autoload_with=connection) constrained_column = table.c[localcol].name refspec = ".".join([tablename, remotecol]) if constrained_column not in fk[0]: fk[0].append(constrained_column) if refspec not in fk[1]: fk[1].append(refspec) for name, value in fks.iteritems(): table.append_constraint( schema.ForeignKeyConstraint(value[0], value[1], link_to_name=True)) # check for UNIQUE indexes c = connection.execute("%sindex_list(%s)" % (pragma, qtable)) unique_indexes = [] while True: row = c.fetchone() if row is None: break if (row[2] == 1): unique_indexes.append(row[1]) # loop thru unique indexes for one that includes the primary key for idx in unique_indexes: c = connection.execute("%sindex_info(%s)" % (pragma, idx)) cols = [] while True: row = c.fetchone() if row is None: break cols.append(row[2])