def execute(state, dict_, row): collection = collections.get(tuple([row[col] for col in local_cols]), (None,)) if len(collection) > 1: util.warn("Multiple rows returned with " "uselist=False for eagerly-loaded attribute '%s' " % self) scalar = collection[0] state.get_impl(self.key).set_committed_value(state, dict_, scalar)
def limit_clause(self, select): text = "" if select._limit is not None: text += "\n LIMIT %d" % int(select._limit) if select._offset is not None: util.warn("EXASolution does not support OFFSET") return text
def _get_column_info(self, name, type_, nullable, autoincrement, default, precision, scale, length): coltype = self.ischema_names.get(type_, None) kwargs = {} if coltype in (NUMERIC, DECIMAL): args = (precision, scale) elif coltype == FLOAT: args = (precision,) elif coltype in (CHAR, VARCHAR, UNICHAR, UNIVARCHAR, NCHAR, NVARCHAR): args = (length,) else: args = () if coltype: coltype = coltype(*args, **kwargs) #is this necessary #if is_array: # coltype = ARRAY(coltype) else: util.warn("Did not recognize type '%s' of column '%s'" % (type_, name)) coltype = sqltypes.NULLTYPE if default: default = re.sub("DEFAULT", "", default).strip() default = re.sub("^'(.*)'$", lambda m: m.group(1), default) else: default = None column_info = dict(name=name, type=coltype, nullable=nullable, default=default, autoincrement=autoincrement) return column_info
def _emit_update_statements(base_mapper, uowtransaction, cached_connections, mapper, table, update): """Emit UPDATE statements corresponding to value lists collected by _collect_update_commands().""" needs_version_id = mapper.version_id_col is not None and \ table.c.contains_column(mapper.version_id_col) def update_stmt(): clause = sql.and_() for col in mapper._pks_by_table[table]: clause.clauses.append(col == sql.bindparam(col._label, type_=col.type)) if needs_version_id: clause.clauses.append(mapper.version_id_col ==\ sql.bindparam(mapper.version_id_col._label, type_=col.type)) return table.update(clause) statement = base_mapper._memo(('update', table), update_stmt) rows = 0 for state, state_dict, params, mapper, \ connection, value_params in update: if value_params: c = connection.execute( statement.values(value_params), params) else: c = cached_connections[connection].\ execute(statement, params) _postfetch( mapper, uowtransaction, table, state, state_dict, c.context.prefetch_cols, c.context.postfetch_cols, c.context.compiled_parameters[0], value_params) rows += c.rowcount if connection.dialect.supports_sane_rowcount: if rows != len(update): raise orm_exc.StaleDataError( "UPDATE statement on table '%s' expected to " "update %d row(s); %d were matched." % (table.description, len(update), rows)) elif needs_version_id: util.warn("Dialect %s does not support updated rowcount " "- versioning cannot be verified." % c.dialect.dialect_description, stacklevel=12)
def get_indexes(self, connection, table_name, schema, **kw): table_oid = self.get_table_oid(connection, table_name, schema, info_cache=kw.get("info_cache")) IDX_SQL = """ SELECT c.relname, i.indisunique, i.indexprs, i.indpred, a.attname FROM pg_index i, pg_class c, pg_attribute a WHERE i.indrelid = :table_oid AND i.indexrelid = c.oid AND a.attrelid = i.indexrelid AND i.indisprimary = 'f' ORDER BY c.relname, a.attnum """ t = sql.text(IDX_SQL, typemap={"attname": sqltypes.Unicode}) c = connection.execute(t, table_oid=table_oid) index_names = {} indexes = [] sv_idx_name = None for row in c.fetchall(): idx_name, unique, expr, prd, col = row if expr: if idx_name != sv_idx_name: util.warn("Skipped unsupported reflection of " "expression-based index %s" % idx_name) sv_idx_name = idx_name continue if prd and not idx_name == sv_idx_name: util.warn("Predicate of partial index %s ignored during reflection" % idx_name) sv_idx_name = idx_name if idx_name in index_names: index_d = index_names[idx_name] else: index_d = {"column_names": []} indexes.append(index_d) index_names[idx_name] = index_d index_d["name"] = idx_name index_d["column_names"].append(col) index_d["unique"] = unique return indexes
def _get_column_info(self, name, type_, nullable, default, primary_key): match = re.match(r'(\w+)(\(.*?\))?', type_) if match: coltype = match.group(1) args = match.group(2) else: coltype = "VARCHAR" args = '' try: coltype = self.ischema_names[coltype] if args is not None: args = re.findall(r'(\d+)', args) coltype = coltype(*[int(a) for a in args]) except KeyError: util.warn("Did not recognize type '%s' of column '%s'" % (coltype, name)) coltype = sqltypes.NullType() if default is not None: default = unicode(default) return { 'name': name, 'type': coltype, 'nullable': nullable, 'default': default, 'autoincrement': default is None, 'primary_key': primary_key }
def define_constraint_cascades(self, constraint): text = "" if constraint.ondelete is not None: text += " ON DELETE %s" % constraint.ondelete if constraint.onupdate is not None: util.warn("DB2 does not support UPDATE CASCADE for foreign keys.") return text
def register_object(self, state, isdelete=False, listonly=False, cancel_delete=False, operation=None, prop=None): if not self.session._contains_state(state): if not state.deleted and operation is not None: util.warn("Object of type %s not in session, %s operation " "along '%s' will not proceed" % (mapperutil.state_class_str(state), operation, prop)) return False if state not in self.states: mapper = state.manager.mapper if mapper not in self.mappers: mapper._per_mapper_flush_actions(self) self.mappers[mapper].add(state) self.states[state] = (isdelete, listonly) else: if not listonly and (isdelete or cancel_delete): self.states[state] = (isdelete, False) return True
def _get_column_info(self, name, type_, nullable, default, primary_key): match = re.match(r"(\w+)(\(.*?\))?", type_) if match: coltype = match.group(1) args = match.group(2) else: coltype = "VARCHAR" args = "" try: coltype = self.ischema_names[coltype] if args is not None: args = re.findall(r"(\d+)", args) coltype = coltype(*[int(a) for a in args]) except KeyError: util.warn("Did not recognize type '%s' of column '%s'" % (coltype, name)) coltype = sqltypes.NullType() if default is not None: default = str(default) return { "name": name, "type": coltype, "nullable": nullable, "default": default, "autoincrement": default is None, "primary_key": primary_key, }
def __determine_targets(self): if isinstance(self.argument, type): self.mapper = mapper.class_mapper(self.argument, entity_name=self.entity_name, compile=False) elif isinstance(self.argument, mapper.Mapper): self.mapper = self.argument elif callable(self.argument): # accept a callable to suit various deferred-configurational schemes self.mapper = mapper.class_mapper(self.argument(), entity_name=self.entity_name, compile=False) else: raise exceptions.ArgumentError("relation '%s' expects a class or a mapper argument (received: %s)" % (self.key, type(self.argument))) if not self.parent.concrete: for inheriting in self.parent.iterate_to_root(): if inheriting is not self.parent and inheriting._get_property(self.key, raiseerr=False): util.warn( ("Warning: relation '%s' on mapper '%s' supercedes " "the same relation on inherited mapper '%s'; this " "can cause dependency issues during flush") % (self.key, self.parent, inheriting)) self.target = self.mapper.mapped_table self.table = self.mapper.mapped_table if self.cascade.delete_orphan: if self.parent.class_ is self.mapper.class_: raise exceptions.ArgumentError("In relationship '%s', can't establish 'delete-orphan' cascade " "rule on a self-referential relationship. " "You probably want cascade='all', which includes delete cascading but not orphan detection." %(str(self))) self.mapper.primary_mapper().delete_orphans.append((self.key, self.parent.class_))
def get_columns(self, connection, table_name, schema=None, **kw): current_schema = self.denormalize_name(schema or self.default_schema_name) table_name = self.denormalize_name(table_name) syscols = self.sys_columns query = sql.select([syscols.c.colname, syscols.c.typename, syscols.c.defaultval, syscols.c.nullable, syscols.c.length, syscols.c.scale, syscols.c.isid, syscols.c.idgenerate], sql.and_(syscols.c.tabschema == current_schema, syscols.c.tabname == table_name), order_by=[syscols.c.colno]) sa_columns = [] for r in connection.execute(query): coltype = r[1].upper() if coltype in ('DECIMAL', 'NUMERIC'): coltype = self.ischema_names.get(coltype)(int(r[4]), int(r[5])) elif coltype in ('CHARACTER', 'CHAR', 'VARCHAR', 'GRAPHIC', 'VARGRAPHIC'): coltype = self.ischema_names.get(coltype)(int(r[4])) else: try: coltype = self.ischema_names[coltype] except KeyError: util.warn("Did not recognize type '%s' of column '%s'" % (coltype, r[0])) coltype = sa_types.NULLTYPE sa_columns.append({'name': self.normalize_name(r[0]), 'type': coltype, 'nullable': r[3] == 'Y', 'default': r[2], 'autoincrement': (r[6] == 'YES') and (r[7] is not None)}) return sa_columns
def get_lastrowid(self): columns = self.compiled.sql_compiler.statement.table.columns autoinc_pk_columns = \ [c.name for c in columns if c.autoincrement and c.primary_key] if len(autoinc_pk_columns) == 0: return None elif len(autoinc_pk_columns) > 1: util.warn("Table with more than one autoincrement, primary key"\ " Column!") raise Exception else: id_col = self.dialect.denormalize_name(autoinc_pk_columns[0]) table = self.compiled.sql_compiler.statement.table.name table = self.dialect.denormalize_name(table) sql_stmnt = "SELECT column_identity from SYS.EXA_ALL_COLUMNS "\ "WHERE column_object_type = 'TABLE' and column_table "\ "= ? AND column_name = ?" schema = self.compiled.sql_compiler.statement.table.schema if schema is not None: schema = self.dialect.denormalize_name(schema) sql_stmnt += " AND column_schema = ?" cursor = self.create_cursor() if schema: cursor.execute(sql_stmnt, table, id_col, schema) else: cursor.execute(sql_stmnt, table, id_col) lastrowid = cursor.fetchone()[0] - 1 cursor.close() return lastrowid
def reflecttable(self, connection, table, include_columns=None, exclude_columns=None): exclude_columns = exclude_columns or [] try: rows = connection.execute('SHOW COLUMNS FROM {}'.format(table)) except presto.DatabaseError as e: # Normally SQLAlchemy should wrap this exception in sqlalchemy.exc.DatabaseError, which # it successfully does in the Hive version. The difference with Presto is that this # error is raised when fetching the cursor's description rather than the initial execute # call. SQLAlchemy doesn't handle this. Thus, we catch the unwrapped # presto.DatabaseError here. # Does the table exist? msg = e.message.get('message') if isinstance(e.message, dict) else None regex = r"^Table\ \'.*{}\'\ does\ not\ exist$".format(re.escape(table.name)) if msg and re.match(regex, msg): raise exc.NoSuchTableError(table.name) else: raise else: for row in rows: name, coltype, nullable, is_partition_key = row if include_columns is not None and name not in include_columns: continue if name in exclude_columns: continue try: coltype = _type_map[coltype] except KeyError: util.warn("Did not recognize type '%s' of column '%s'" % (coltype, name)) coltype = types.NullType table.append_column(schema.Column( name=name, type_=coltype, nullable=nullable, index=is_partition_key, # Translate Hive partitions to indexes ))
def get_columns(self, connection, table_name, schema=None, **kw): rows = self._get_table_columns(connection, table_name, schema) # Strip whitespace rows = [[col.strip() if col else None for col in row] for row in rows] # Filter out empty rows and comment rows = [row for row in rows if row[0] and row[0] != '# col_name'] result = [] for (col_name, col_type, _comment) in rows: if col_name == '# Partition Information': break # Take out the more detailed type information # e.g. 'map<int,int>' -> 'map' # 'decimal(10,1)' -> decimal col_type = re.search(r'^\w+', col_type).group(0) try: coltype = _type_map[col_type] except KeyError: util.warn("Did not recognize type '%s' of column '%s'" % ( col_type, col_name)) coltype = types.NullType result.append({ 'name': col_name, 'type': coltype, 'nullable': True, 'default': None, }) return result
def __new__(cls, arg): values = set([ c for c in re.split('\s*,\s*', arg or "") if c ]) if values.difference(cls._allowed_cascades): raise sa_exc.ArgumentError( "Invalid cascade option(s): %s" % ", ".join([repr(x) for x in sorted( values.difference(cls._allowed_cascades) )]) ) if "all" in values: values.update(cls._add_w_all_cascades) if "none" in values: values.clear() values.discard('all') self = frozenset.__new__(CascadeOptions, values) self.save_update = 'save-update' in values self.delete = 'delete' in values self.refresh_expire = 'refresh-expire' in values self.merge = 'merge' in values self.expunge = 'expunge' in values self.delete_orphan = "delete-orphan" in values if self.delete_orphan and not self.delete: util.warn("The 'delete-orphan' cascade " "option requires 'delete'.") return self
def _emit_delete_statements(base_mapper, uowtransaction, cached_connections, mapper, table, delete): """Emit DELETE statements corresponding to value lists collected by _collect_delete_commands().""" need_version_id = mapper.version_id_col is not None and \ table.c.contains_column(mapper.version_id_col) def delete_stmt(): clause = sql.and_() for col in mapper._pks_by_table[table]: clause.clauses.append( col == sql.bindparam(col.key, type_=col.type)) if need_version_id: clause.clauses.append( mapper.version_id_col == sql.bindparam( mapper.version_id_col.key, type_=mapper.version_id_col.type ) ) return table.delete(clause) for connection, del_objects in delete.iteritems(): statement = base_mapper._memo(('delete', table), delete_stmt) rows = -1 connection = cached_connections[connection] if need_version_id and \ not connection.dialect.supports_sane_multi_rowcount: # TODO: need test coverage for this [ticket:1761] if connection.dialect.supports_sane_rowcount: rows = 0 # execute deletes individually so that versioned # rows can be verified for params in del_objects: c = connection.execute(statement, params) rows += c.rowcount else: util.warn( "Dialect %s does not support deleted rowcount " "- versioning cannot be verified." % connection.dialect.dialect_description, stacklevel=12) connection.execute(statement, del_objects) else: c = connection.execute(statement, del_objects) if connection.dialect.supports_sane_multi_rowcount: rows = c.rowcount if rows != -1 and rows != len(del_objects): raise orm_exc.StaleDataError( "DELETE statement on table '%s' expected to " "delete %d row(s); %d were matched." % (table.description, len(del_objects), c.rowcount) )
def existing_execute(state, dict_, row): # call _instance on the row, even though the object has # been created, so that we further descend into properties existing = _instance(row, None) if existing is not None and key in dict_ and existing is not dict_[key]: util.warn( "Multiple rows returned with " "uselist=False for eagerly-loaded attribute '%s' " % self )
def get_columns(self, connection, table_name, schema=None, **kw): schema = schema or connection.engine.url.database if schema is None: schema = connection.execute("select CURRENT_SCHEMA from dual").scalar() table_name=self.denormalize_name(table_name) schema=self.denormalize_name(schema) columns = [] for row in self._get_all_columns(connection, schema, info_cache=kw.get("info_cache")): if row[9] != table_name and table_name is not None: continue (colname, coltype, length, precision, scale, nullable, default, identity, is_distribution_key) = \ (row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8]) # FIXME: Missing type support: INTERVAL DAY [(p)] TO SECOND [(fp)], INTERVAL YEAR[(p)] TO MONTH # remove ASCII, UTF8 and spaces from char-like types coltype = re.sub(r'ASCII|UTF8| ', '', coltype) # remove precision and scale addition from numeric types coltype = re.sub(r'\(\d+(\,\d+)?\)', '', coltype) try: if coltype == 'VARCHAR': coltype = sqltypes.VARCHAR(length) elif coltype == 'DECIMAL': # this Dialect forces INTTYPESINRESULTSIFPOSSIBLE=y on ODBC level # thus, we need to convert DECIMAL(<=18,0) back to INTEGER type # and DECIMAL(36,0) back to BIGINT type if scale == 0 and precision <= 18: coltype = sqltypes.INTEGER() elif scale ==0 and precision == 36: coltype = sqltypes.BIGINT() else: coltype = sqltypes.DECIMAL(precision, scale) else: coltype = self.ischema_names[coltype] except KeyError: util.warn("Did not recognize type '%s' of column '%s'" % (coltype, colname)) coltype = sqltypes.NULLTYPE cdict = { 'name': self.normalize_name(colname), 'type': coltype, 'nullable': nullable, 'default': default, 'is_distribution_key': is_distribution_key } if identity: identity = int(identity) # if we have a positive identity value add a sequence if identity is not None and identity >= 0: cdict['sequence'] = {'name':''} # TODO: we have to possibility to encode the current identity value count # into the column metadata. But the consequence is that it would also be used # as start value in CREATE statements. For now the current value is ignored. # Add it by changing the dict to: {'name':'', 'start': int(identity)} columns.append(cdict) return columns
def process(value): if not isinstance(value, (unicode, NoneType)): if assert_unicode == "warn": util.warn("Unicode type received non-unicode bind " "param value %r" % value) return value else: raise exc.InvalidRequestError("Unicode type received non-unicode bind param value %r" % value) else: return value
def get_columns(self, connection, table_name, schema=None, **kw): schema = schema or connection.engine.url.database sql_stmnt = "SELECT column_name, column_type, column_maxsize, column_num_prec, column_num_scale, " \ "column_is_nullable, column_default, column_identity FROM sys.exa_all_columns " \ "WHERE column_object_type IN ('TABLE', 'VIEW') AND column_table = :table_name AND column_schema = " if schema is None: sql_stmnt += "CURRENT_SCHEMA " else: sql_stmnt += ":schema " sql_stmnt += "ORDER BY column_ordinal_position" c = connection.execute(sql.text(sql_stmnt), table_name=self.denormalize_name(table_name), schema=self.denormalize_name(schema)) columns = [] for row in c: (colname, coltype, length, precision, scale, nullable, default, identity) = \ (row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7]) # FIXME: Missing type support: INTERVAL DAY [(p)] TO SECOND [(fp)], INTERVAL YEAR[(p)] TO MONTH # remove ASCII, UTF8 and spaces from char-like types coltype = re.sub(r'ASCII|UTF8| ', '', coltype) # remove precision and scale addition from numeric types coltype = re.sub(r'\(\d+(\,\d+)?\)', '', coltype) try: if coltype == 'VARCHAR': coltype = sqltypes.VARCHAR(length) elif coltype == 'DECIMAL': # this Dialect forces INTTYPESINRESULTSIFPOSSIBLE=y on ODBC level # thus, we need to convert DECIMAL(<=18,0) back to INTEGER type if scale == 0 and precision <= 18: coltype = sqltypes.INTEGER() else: coltype = sqltypes.DECIMAL(precision, scale) else: coltype = self.ischema_names[coltype] except KeyError: util.warn("Did not recognize type '%s' of column '%s'" % (coltype, colname)) coltype = sqltypes.NULLTYPE cdict = { 'name': self.normalize_name(colname), 'type': coltype, 'nullable': nullable, 'default': default } # if we have a positive identity value add a sequence if identity is not None and identity >= 0: cdict['sequence'] = {'name':''} # TODO: we have to possibility to encode the current identity value count # into the column metadata. But the consequence is that it would also be used # as start value in CREATE statements. For now the current value is ignored. # Add it by changing the dict to: {'name':'', 'start': int(identity)} columns.append(cdict) return columns
def do_init(self): super(ColumnProperty, self).do_init() if len(self.columns) > 1 and self.parent.primary_key.issuperset(self.columns): util.warn( ("On mapper %s, primary key column '%s' is being combined " "with distinct primary key column '%s' in attribute '%s'. " "Use explicit properties to give each column its own mapped " "attribute name.") % (str(self.parent), str(self.columns[1]), str(self.columns[0]), self.key))
def configure(self, **kwargs): """reconfigure the sessionmaker used by this ScopedSession.""" if self.registry.has(): warn('At least one scoped session is already present. ' ' configure() can not affect sessions that have ' 'already been created.') self.session_factory.configure(**kwargs)
def __init__( self, convert_unicode=False, assert_unicode=False, encoding="utf-8", paramstyle=None, dbapi=None, implicit_returning=None, label_length=None, **kwargs ): if not getattr(self, "ported_sqla_06", True): util.warn("The %s dialect is not yet ported to SQLAlchemy 0.6/0.7" % self.name) self.convert_unicode = convert_unicode if assert_unicode: util.warn_deprecated( "assert_unicode is deprecated. " "SQLAlchemy emits a warning in all cases where it " "would otherwise like to encode a Python unicode object " "into a specific encoding but a plain bytestring is " "received. " "This does *not* apply to DBAPIs that coerce Unicode " "natively." ) self.encoding = encoding self.positional = False self._ischema = None self.dbapi = dbapi if paramstyle is not None: self.paramstyle = paramstyle elif self.dbapi is not None: self.paramstyle = self.dbapi.paramstyle else: self.paramstyle = self.default_paramstyle if implicit_returning is not None: self.implicit_returning = implicit_returning self.positional = self.paramstyle in ("qmark", "format", "numeric") self.identifier_preparer = self.preparer(self) self.type_compiler = self.type_compiler(self) if label_length and label_length > self.max_identifier_length: raise exc.ArgumentError( "Label length of %d is greater than this dialect's" " maximum identifier length of %d" % (label_length, self.max_identifier_length) ) self.label_length = label_length if self.description_encoding == "use_encoding": self._description_decoder = processors.to_unicode_processor_factory(encoding) elif self.description_encoding is not None: self._description_decoder = processors.to_unicode_processor_factory(self.description_encoding) self._encoder = codecs.getencoder(self.encoding) self._decoder = processors.to_unicode_processor_factory(self.encoding)
def dbapi(cls): module = __import__("pymssql") # pymmsql doesn't have a Binary method. we use string # TODO: monkeypatching here is less than ideal module.Binary = str client_ver = tuple(int(x) for x in module.__version__.split(".")) if client_ver < (1,): util.warn("The pymssql dialect expects at least " "the 1.0 series of the pymssql DBAPI.") return module
def get_columns(self, connection, table_name, schema=None, **kw): """ kw arguments can be: oracle_resolve_synonyms dblink """ resolve_synonyms = kw.get('oracle_resolve_synonyms', False) dblink = kw.get('dblink', '') info_cache = kw.get('info_cache') (table_name, schema, dblink, synonym) = \ self._prepare_reflection_args(connection, table_name, schema, resolve_synonyms, dblink, info_cache=info_cache) columns = [] c = connection.execute(sql.text( "SELECT column_name, data_type, data_length, data_precision, data_scale, " "nullable, data_default FROM ALL_TAB_COLUMNS%(dblink)s " "WHERE table_name = :table_name AND owner = :owner " "ORDER BY column_id" % {'dblink': dblink}), table_name=table_name, owner=schema) for row in c: (colname, orig_colname, coltype, length, precision, scale, nullable, default) = \ (self.normalize_name(row[0]), row[0], row[1], row[2], row[3], row[4], row[5]=='Y', row[6]) if coltype == 'NUMBER' : coltype = NUMBER(precision, scale) elif coltype=='CHAR' or coltype=='VARCHAR2': coltype = self.ischema_names.get(coltype)(length) else: coltype = re.sub(r'\(\d+\)', '', coltype) try: coltype = self.ischema_names[coltype] except KeyError: util.warn("Did not recognize type '%s' of column '%s'" % (coltype, colname)) coltype = sqltypes.NULLTYPE cdict = { 'name': colname, 'type': coltype, 'nullable': nullable, 'default': default, } if orig_colname.lower() == orig_colname: cdict['quote'] = True columns.append(cdict) return columns
def __init__(cls, classname, bases, dict_): if "_decl_class_registry" in cls.__dict__: return type.__init__(cls, classname, bases, dict_) cls._decl_class_registry[classname] = cls our_stuff = util.OrderedDict() for k in dict_: value = dict_[k] if isinstance(value, tuple) and len(value) == 1 and isinstance(value[0], (Column, MapperProperty)): util.warn( "Ignoring declarative-like tuple value of attribute " "%s: possibly a copy-and-paste error with a comma " "left at the end of the line?" % k ) continue if not isinstance(value, (Column, MapperProperty)): continue prop = _deferred_relation(cls, value) our_stuff[k] = prop table = None if "__table__" not in cls.__dict__: if "__tablename__" in cls.__dict__: tablename = cls.__tablename__ autoload = cls.__dict__.get("__autoload__") if autoload: table_kw = {"autoload": True} else: table_kw = {} cols = [] for key, c in our_stuff.iteritems(): if isinstance(c, ColumnProperty): for col in c.columns: if isinstance(col, Column) and col.table is None: _undefer_column_name(key, col) cols.append(col) elif isinstance(c, Column): _undefer_column_name(key, c) cols.append(c) cls.__table__ = table = Table(tablename, cls.metadata, *cols, **table_kw) else: table = cls.__table__ mapper_args = getattr(cls, "__mapper_args__", {}) if "inherits" not in mapper_args: inherits = cls.__mro__[1] inherits = cls._decl_class_registry.get(inherits.__name__, None) mapper_args["inherits"] = inherits if hasattr(cls, "__mapper_cls__"): mapper_cls = util.unbound_method_to_callable(cls.__mapper_cls__) else: mapper_cls = mapper cls.__mapper__ = mapper_cls(cls, table, properties=our_stuff, **mapper_args) return type.__init__(cls, classname, bases, dict_)
def get_columns(self, connection, table_name, schema=None, **kw): schema = schema or self.default_schema_name c = connection.execute( """select colname, coltype, collength, t3.default, t1.colno from syscolumns as t1 , systables as t2 , OUTER sysdefaults as t3 where t1.tabid = t2.tabid and t2.tabname=? and t2.owner=? and t3.tabid = t2.tabid and t3.colno = t1.colno order by t1.colno""", table_name, schema) pk_constraint = self.get_pk_constraint(connection, table_name, schema, **kw) primary_cols = pk_constraint['constrained_columns'] columns = [] rows = c.fetchall() for name, colattr, collength, default, colno in rows: name = name.lower() autoincrement = False primary_key = False if name in primary_cols: primary_key = True # in 7.31, coltype = 0x000 # ^^-- column type # ^-- 1 not null, 0 null not_nullable, coltype = divmod(colattr, 256) if coltype not in (0, 13) and default: default = default.split()[-1] if coltype == 6: # Serial, mark as autoincrement autoincrement = True if coltype == 0 or coltype == 13: # char, varchar coltype = ischema_names[coltype](collength) if default: default = "'%s'" % default elif coltype == 5: # decimal precision, scale = (collength & 0xFF00) >> 8, collength & 0xFF if scale == 255: scale = 0 coltype = sqltypes.Numeric(precision, scale) else: try: coltype = ischema_names[coltype] except KeyError: util.warn("Did not recognize type '%s' of column '%s'" % (coltype, name)) coltype = sqltypes.NULLTYPE column_info = dict(name=name, type=coltype, nullable=not not_nullable, default=default, autoincrement=autoincrement, primary_key=primary_key) columns.append(column_info) return columns
def __init__(self, **kwargs): SQLiteDialect.__init__(self, **kwargs) if self.dbapi is not None: sqlite_ver = self.dbapi.version_info if sqlite_ver < (2, 1, 3): util.warn( ("The installed version of pysqlite2 (%s) is out-dated " "and will cause errors in some cases. Version 2.1.3 " "or greater is recommended.") % '.'.join([str(subver) for subver in sqlite_ver]))
def process(value): if isinstance(value, unicode): return value.encode(dialect.encoding) elif assert_unicode and not isinstance(value, (unicode, NoneType)): if assert_unicode == 'warn': util.warn("Unicode type received non-unicode bind " "param value %r" % value) return value else: raise exc.InvalidRequestError("Unicode type received non-unicode bind param value %r" % value) else: return value
def process(value): if not isinstance(value, (unicode, NoneType)): if assert_unicode == 'warn': util.warn("Unicode type received non-unicode bind " "param value %r" % value) return value else: raise exc.InvalidRequestError( "Unicode type received non-unicode bind param value %r" % value) else: return value
def execute(state, dict_, row): collection = collections.get( tuple([row[col] for col in local_cols]), (None, )) if len(collection) > 1: util.warn( "Multiple rows returned with " "uselist=False for eagerly-loaded attribute '%s' " % self) scalar = collection[0] state.get_impl(self.key).\ set_committed_value(state, dict_, scalar)
def _extract_non_list_result(self, result): """logic copied from LazyLoader#_emit_lazyload""" l = len(result) if l: if l > 1: util.warn("Multiple rows returned with " "uselist=False for lazily-loaded attribute '%s' " % self.parent_property) return result[0] else: return None
def get_columns(self, conn, table_name, schema=None, **kw): if not self._is_v2plus: # v1.1. # Bad: the table name is not properly escaped. # Oh well. Hoping 1.1 won't be around for long. rows = conn.execute('SHOW COLUMNS FROM "%s"."%s"' % (schema or self.default_schema_name, table_name)) else: # v2.0 or later. Information schema is usable. rows = conn.execute( 'SELECT column_name, data_type, is_nullable::bool, column_default, ' 'numeric_precision, numeric_scale, character_maximum_length ' 'FROM information_schema.columns ' 'WHERE table_schema = %s AND table_name = %s AND NOT is_hidden::bool', (schema or self.default_schema_name, table_name), ) res = [] for row in rows: name, type_str, nullable, default = row[:4] # When there are type parameters, attach them to the # returned type object. m = re.match(r'^(\w+(?: \w+)*)(?:\(([0-9, ]*)\))?$', type_str) if m is None: warn("Could not parse type name '%s'" % type_str) typ = sqltypes.NULLTYPE() else: type_name, type_args = m.groups() try: type_class = _type_map[type_name.lower()] except KeyError: warn("Did not recognize type '%s' of column '%s'" % (type_name, name)) type_class = sqltypes.NULLTYPE if type_args: typ = type_class(*[int(s.strip()) for s in type_args.split(',')]) elif type_class is sqltypes.DECIMAL: typ = type_class( precision=row.numeric_precision, scale=row.numeric_scale, ) elif type_class is sqltypes.VARCHAR: typ = type_class(length=row.character_maximum_length) else: typ = type_class() res.append(dict( name=name, type=typ, nullable=nullable, default=default, )) return res
def get_columns(self, connection, table_name, schema=None, resolve_synonyms=False, dblink='', **kw): (table_name, schema, dblink, synonym) = \ self._prepare_reflection_args(connection, table_name, schema, resolve_synonyms, dblink) columns = [] c = connection.execute ("select COLUMN_NAME, DATA_TYPE, DATA_LENGTH, DATA_PRECISION, DATA_SCALE, NULLABLE, DATA_DEFAULT from ALL_TAB_COLUMNS%(dblink)s where TABLE_NAME = :table_name and OWNER = :owner" % {'dblink':dblink}, {'table_name':table_name, 'owner':schema}) while True: row = c.fetchone() if row is None: break (colname, coltype, length, precision, scale, nullable, default) = (self._normalize_name(row[0]), row[1], row[2], row[3], row[4], row[5]=='Y', row[6]) # INTEGER if the scale is 0 and precision is null # NUMBER if the scale and precision are both null # NUMBER(9,2) if the precision is 9 and the scale is 2 # NUMBER(3) if the precision is 3 and scale is 0 #length is ignored except for CHAR and VARCHAR2 if coltype == 'NUMBER' : if precision is None and scale is None: coltype = sqltypes.NUMERIC elif precision is None and scale == 0 : coltype = sqltypes.INTEGER else : coltype = sqltypes.NUMERIC(precision, scale) elif coltype=='CHAR' or coltype=='VARCHAR2': coltype = self.ischema_names.get(coltype)(length) else: coltype = re.sub(r'\(\d+\)', '', coltype) try: coltype = self.ischema_names[coltype] except KeyError: util.warn("Did not recognize type '%s' of column '%s'" % (coltype, colname)) coltype = sqltypes.NULLTYPE colargs = [] if default is not None: colargs.append(sa_schema.DefaultClause(sql.text(default))) cdict = { 'name': colname, 'type': coltype, 'nullable': nullable, 'default': default, 'attrs': colargs } columns.append(cdict) return columns
def __init__(self, **kwargs): default.DefaultDialect.__init__(self, **kwargs) def vers(num): return tuple([int(x) for x in num.split('.')]) if self.dbapi is not None: sqlite_ver = self.dbapi.version_info if sqlite_ver < (2,1,'3'): util.warn( ("The installed version of pysqlite2 (%s) is out-dated " "and will cause errors in some cases. Version 2.1.3 " "or greater is recommended.") % '.'.join([str(subver) for subver in sqlite_ver])) self.supports_cast = (self.dbapi is None or vers(self.dbapi.sqlite_version) >= vers("3.2.3"))
def __init__(self, convert_unicode=False, assert_unicode=False, encoding='utf-8', paramstyle=None, dbapi=None, implicit_returning=None, label_length=None, **kwargs): if not getattr(self, 'ported_sqla_06', True): util.warn("The %s dialect is not yet ported to SQLAlchemy 0.6" % self.name) self.convert_unicode = convert_unicode if assert_unicode: util.warn_deprecated( "assert_unicode is deprecated. " "SQLAlchemy emits a warning in all cases where it " "would otherwise like to encode a Python unicode object " "into a specific encoding but a plain bytestring is " "received. " "This does *not* apply to DBAPIs that coerce Unicode " "natively.") self.encoding = encoding self.positional = False self._ischema = None self.dbapi = dbapi if paramstyle is not None: self.paramstyle = paramstyle elif self.dbapi is not None: self.paramstyle = self.dbapi.paramstyle else: self.paramstyle = self.default_paramstyle if implicit_returning is not None: self.implicit_returning = implicit_returning self.positional = self.paramstyle in ('qmark', 'format', 'numeric') self.identifier_preparer = self.preparer(self) self.type_compiler = self.type_compiler(self) if label_length and label_length > self.max_identifier_length: raise exc.ArgumentError( "Label length of %d is greater than this dialect's" " maximum identifier length of %d" % (label_length, self.max_identifier_length)) self.label_length = label_length if not hasattr(self, 'description_encoding'): self.description_encoding = getattr(self, 'description_encoding', encoding)
def _emit_update_statements(base_mapper, uowtransaction, cached_connections, mapper, table, update): """Emit UPDATE statements corresponding to value lists collected by _collect_update_commands().""" needs_version_id = mapper.version_id_col is not None and \ table.c.contains_column(mapper.version_id_col) def update_stmt(): clause = sql.and_() for col in mapper._pks_by_table[table]: clause.clauses.append( col == sql.bindparam(col._label, type_=col.type)) if needs_version_id: clause.clauses.append(mapper.version_id_col ==\ sql.bindparam(mapper.version_id_col._label, type_=mapper.version_id_col.type)) return table.update(clause) statement = base_mapper._memo(('update', table), update_stmt) rows = 0 for state, state_dict, params, mapper, \ connection, value_params in update: if value_params: c = connection.execute(statement.values(value_params), params) else: c = cached_connections[connection].\ execute(statement, params) _postfetch(mapper, uowtransaction, table, state, state_dict, c.context.prefetch_cols, c.context.postfetch_cols, c.context.compiled_parameters[0], value_params) rows += c.rowcount if connection.dialect.supports_sane_rowcount: if rows != len(update): raise orm_exc.StaleDataError( "UPDATE statement on table '%s' expected to " "update %d row(s); %d were matched." % (table.description, len(update), rows)) elif needs_version_id: util.warn("Dialect %s does not support updated rowcount " "- versioning cannot be verified." % c.dialect.dialect_description, stacklevel=12)
def get_columns(self, connection, table_name, schema=None, **kw): current_schema = self.denormalize_name(schema or self.default_schema_name) table_name = self.denormalize_name(table_name) syscols = self.sys_columns query = sql.select( [ syscols.c.colname, syscols.c.typename, syscols.c.defaultval, syscols.c.nullable, syscols.c.length, syscols.c.scale, syscols.c.isid, syscols.c.idgenerate, ], sql.and_(syscols.c.tabschema == current_schema, syscols.c.tabname == table_name), order_by=[syscols.c.colno], ) sa_columns = [] for r in connection.execute(query): coltype = r[1].upper() if coltype in ["DECIMAL", "NUMERIC"]: coltype = self.ischema_names.get(coltype)(int(r[4]), int(r[5])) elif coltype in [ "CHARACTER", "CHAR", "VARCHAR", "GRAPHIC", "VARGRAPHIC" ]: coltype = self.ischema_names.get(coltype)(int(r[4])) else: try: coltype = self.ischema_names[coltype] except KeyError: util.warn("Did not recognize type '%s' of column '%s'" % (coltype, r[0])) coltype = coltype = sa_types.NULLTYPE sa_columns.append({ "name": self.normalize_name(r[0]), "type": coltype, "nullable": r[3] == "Y", "default": r[2], "autoincrement": (r[6] == "YES") and (r[7] != None), }) return sa_columns
def get_columns(self, connection, table_name, schema=None, lowercase=True, **kw): """ Get all columns for a given table :param connection: ODBC cnxn :param table_name: the name of the table which has columns :param schema: the schema for the table :returns: list of columns from db with associated metadata """ current_schema = self.capitalize(schema or self.default_schema_name) table = self.capitalize(table_name) INDICES = [3, 5, 12, 17, 6, 8, 22, 23] # these are the indexes in our table # which correspond to the ones in IBM DB2 # name, type, default, nullable, precision, scale, autoincrement column_data = self.get_columns_from_db( connection, current_schema, table, col_indices=INDICES) # call SYSIBM.SQLCOLUMNS sa_columns = [] for r in column_data: coltype = self.capitalize(r[1]) # extract column type if coltype in ['DECIMAL', 'NUMERIC']: coltype = self.ischema_names.get(coltype)(int(r[4]), int(r[5])) # extract # full name of two argument types e.g. DECIMAL(3,1) elif coltype in ['CHARACTER', 'CHAR', 'VARCHAR']: coltype = self.ischema_names.get(coltype)(int(r[4])) # one var types: e.g. VARCHAR(100) else: try: coltype = self.ischema_names[coltype] except KeyError: util.warn("Did not recognize type '%s' of column '%s'" % (coltype, r[0])) coltype = coltype = sa_types.NULLTYPE # assign no type if not understood sa_columns.append({ # add column data to array 'name': r[0].lower(), 'type': coltype, 'nullable': r[3] == 'YES', 'default': r[2] or None, 'autoincrement': (r[6] == 'YES'), }) return sa_columns
def _get_column_info( self, name, type_, nullable, autoincrement, default, precision, scale, length, ): coltype = self.ischema_names.get(type_, None) kwargs = {} if coltype in (NUMERIC, DECIMAL): args = (precision, scale) elif coltype == FLOAT: args = (precision,) elif coltype in (CHAR, VARCHAR, UNICHAR, UNIVARCHAR, NCHAR, NVARCHAR): args = (length,) else: args = () if coltype: coltype = coltype(*args, **kwargs) # is this necessary # if is_array: # coltype = ARRAY(coltype) else: util.warn( "Did not recognize type '%s' of column '%s'" % (type_, name) ) coltype = sqltypes.NULLTYPE if default: default = default.replace("DEFAULT", "").strip() default = re.sub("^'(.*)'$", lambda m: m.group(1), default) else: default = None column_info = dict( name=name, type=coltype, nullable=nullable, default=default, autoincrement=autoincrement, ) return column_info
def define_constraint_cascades(self, constraint): text = "" if constraint.ondelete is not None: text += " ON DELETE %s" % constraint.ondelete # oracle has no ON UPDATE CASCADE - # its only available via triggers http://asktom.oracle.com/tkyte/update_cascade/index.html if constraint.onupdate is not None: util.warn( "Oracle does not contain native UPDATE CASCADE " "functionality - onupdates will not be rendered for foreign keys. " "Consider using deferrable=True, initially='deferred' or triggers.") return text
def process(value): if isinstance(value, unicode): return value.encode(dialect.encoding) elif assert_unicode and not isinstance(value, (unicode, NoneType)): if assert_unicode == 'warn': util.warn("Unicode type received non-unicode bind " "param value %r" % value) return value else: raise exceptions.InvalidRequestError( "Unicode type received non-unicode bind param value %r" % value) else: return value
def initialize(self, connection): super(MSDialect, self).initialize(connection) if self.server_version_info[0] not in range(8, 17): # FreeTDS with version 4.2 seems to report here # a number like "95.10.255". Don't know what # that is. So emit warning. util.warn( "Unrecognized server version info '%s'. Version specific " "behaviors may not function properly. If using ODBC " "with FreeTDS, ensure server version 7.0 or 8.0, not 4.2, " "is configured in the FreeTDS configuration." % ".".join(str(x) for x in self.server_version_info)) if self.server_version_info >= MS_2005_VERSION and \ 'implicit_returning' not in self.__dict__: self.implicit_returning = True
def init_class_attribute(self, mapper): self.is_class_level = True if not self.uselist: util.warn( "On relationship %s, 'dynamic' loaders cannot be used with " "many-to-one/one-to-one relationships and/or " "uselist=False." % self.parent_property) strategies._register_attribute( self, mapper, useobject=True, impl_class=DynamicAttributeImpl, target_mapper=self.parent_property.mapper, order_by=self.parent_property.order_by, query_class=self.parent_property.query_class)
def define_constraint_cascades(self, constraint): """ Add a clause for cascading constraints :param constraint: the constraint class :returns: constraint clause """ text = "" if constraint.ondelete is not None: text += " ON DELETE %s" % constraint.ondelete if constraint.onupdate is not None: util.warn( "Splice Machine does not support UPDATE CASCADE for foreign keys." ) return text
def _detect_charset(self, connection): """Sniff out the character set in use for connection results.""" # Prefer 'character_set_results' for the current connection over the # value in the driver. SET NAMES or individual variable SETs will # change the charset without updating the driver's view of the world. # # If it's decided that issuing that sort of SQL leaves you SOL, then # this can prefer the driver value. rs = connection.execute("SHOW VARIABLES LIKE 'character_set%%'") opts = dict((row[0], row[1]) for row in self._compat_fetchall(rs)) for key in ('character_set_connection', 'character_set'): if opts.get(key, None): return opts[key] util.warn("Could not detect the connection character set. Assuming latin1.") return 'latin1'
def _emit_delete_statements(base_mapper, uowtransaction, cached_connections, mapper, table, delete): """Emit DELETE statements corresponding to value lists collected by _collect_delete_commands().""" need_version_id = mapper.version_id_col is not None and \ table.c.contains_column(mapper.version_id_col) def delete_stmt(): clause = sql.and_() for col in mapper._pks_by_table[table]: clause.clauses.append( col == sql.bindparam(col.key, type_=col.type)) if need_version_id: clause.clauses.append(mapper.version_id_col == sql.bindparam( mapper.version_id_col.key, type_=mapper.version_id_col.type)) return table.delete(clause) for connection, del_objects in delete.iteritems(): statement = base_mapper._memo(('delete', table), delete_stmt) connection = cached_connections[connection] if need_version_id: # TODO: need test coverage for this [ticket:1761] if connection.dialect.supports_sane_rowcount: rows = 0 # execute deletes individually so that versioned # rows can be verified for params in del_objects: c = connection.execute(statement, params) rows += c.rowcount if rows != len(del_objects): raise orm_exc.StaleDataError( "DELETE statement on table '%s' expected to " "delete %d row(s); %d were matched." % (table.description, len(del_objects), c.rowcount)) else: util.warn("Dialect %s does not support deleted rowcount " "- versioning cannot be verified." % connection.dialect.dialect_description, stacklevel=12) connection.execute(statement, del_objects) else: connection.execute(statement, del_objects)
def _determine_direction(self): if self.secondaryjoin is not None: self.direction = MANYTOMANY elif self._refers_to_parent_table(): # self referential defaults to ONETOMANY unless the "remote" side is present # and does not reference any foreign key columns if self.local_remote_pairs: remote = [r for l, r in self.local_remote_pairs] elif self.remote_side: remote = self.remote_side else: remote = None if not remote or self._foreign_keys.intersection(remote): self.direction = ONETOMANY else: self.direction = MANYTOONE else: for mappedtable, parenttable in [(self.mapper.mapped_table, self.parent.mapped_table), (self.mapper.local_table, self.parent.local_table)]: onetomany = [c for c in self._foreign_keys if mappedtable.c.contains_column(c)] manytoone = [c for c in self._foreign_keys if parenttable.c.contains_column(c)] if not onetomany and not manytoone: raise sa_exc.ArgumentError( "Can't determine relation direction for relationship '%s' " "- foreign key columns are present in neither the " "parent nor the child's mapped tables" %(str(self))) elif onetomany and manytoone: continue elif onetomany: self.direction = ONETOMANY break elif manytoone: self.direction = MANYTOONE break else: raise sa_exc.ArgumentError( "Can't determine relation direction for relationship '%s' " "- foreign key columns are present in both the parent and " "the child's mapped tables. Specify 'foreign_keys' " "argument." % (str(self))) if self.cascade.delete_orphan and self.direction is MANYTOMANY: util.warn("On %s, delete-orphan cascade is not supported on a " "many-to-many relation. This will raise an error in 0.6." % self)
def get_columns(self, connection, table_name, schema=None, **kw): c = connection.execute( """select colname, coltype, collength, t3.default, t1.colno from syscolumns as t1 , systables as t2 , OUTER sysdefaults as t3 where t1.tabid = t2.tabid and t2.tabname=? and t3.tabid = t2.tabid and t3.colno = t1.colno order by t1.colno""", table.name.lower()) columns = [] for name, colattr, collength, default, colno in rows: name = name.lower() if include_columns and name not in include_columns: continue # in 7.31, coltype = 0x000 # ^^-- column type # ^-- 1 not null, 0 null nullable, coltype = divmod(colattr, 256) if coltype not in (0, 13) and default: default = default.split()[-1] if coltype == 0 or coltype == 13: # char, varchar coltype = ischema_names[coltype](collength) if default: default = "'%s'" % default elif coltype == 5: # decimal precision, scale = (collength & 0xFF00) >> 8, collength & 0xFF if scale == 255: scale = 0 coltype = sqltypes.Numeric(precision, scale) else: try: coltype = ischema_names[coltype] except KeyError: util.warn("Did not recognize type '%s' of column '%s'" % (coltype, name)) coltype = sqltypes.NULLTYPE # TODO: nullability ?? nullable = True column_info = dict(name=name, type=coltype, nullable=nullable, default=default) columns.append(column_info) return columns
def get_columns(self, connection, table_name, schema=None, **kw): quote = self.identifier_preparer.quote_identifier if schema is not None: pragma = "PRAGMA %s." % quote(schema) else: pragma = "PRAGMA " qtable = quote(table_name) c = _pragma_cursor( connection.execute("%stable_info(%s)" % (pragma, qtable))) found_table = False columns = [] while True: row = c.fetchone() if row is None: break (name, type_, nullable, default, has_default, primary_key) = (row[1], row[2].upper(), not row[3], row[4], row[4] is not None, row[5]) name = re.sub(r'^\"|\"$', '', name) if default: default = re.sub(r"^\'|\'$", '', default) match = re.match(r'(\w+)(\(.*?\))?', type_) if match: coltype = match.group(1) args = match.group(2) else: coltype = "VARCHAR" args = '' try: coltype = self.ischema_names[coltype] except KeyError: util.warn("Did not recognize type '%s' of column '%s'" % (coltype, name)) coltype = sqltypes.NullType if args is not None: args = re.findall(r'(\d+)', args) coltype = coltype(*[int(a) for a in args]) columns.append({ 'name': name, 'type': coltype, 'nullable': nullable, 'default': default, 'primary_key': primary_key }) return columns
def get_columns(self, connection, table_name, schema=None, **kw): rows = self._get_table_columns(connection, table_name, schema) result = [] for row in rows: try: coltype = _type_map[row.Type] except KeyError: util.warn("Did not recognize type '%s' of column '%s'" % (row.Type, row.Column)) coltype = types.NullType result.append({ 'name': row.Column, 'type': coltype, # newer Presto no longer includes this column 'nullable': getattr(row, 'Null', True), 'default': None, }) return result
def _get_column_type(self, name, spec): if spec.startswith('Array'): inner = spec[6:-1] coltype = self.ischema_names['_array'] return coltype(self._get_column_type(name, inner)) elif spec.startswith('FixedString'): length = int(spec[12:-1]) return self.ischema_names['FixedString'](length) elif spec.startswith('Nullable'): inner = spec[9:-1] coltype = self.ischema_names['_nullable'] return coltype(self._get_column_type(name, inner)) elif spec.startswith('LowCardinality'): inner = spec[15:-1] coltype = self.ischema_names['_lowcardinality'] return coltype(self._get_column_type(name, inner)) elif spec.startswith('Enum'): pos = spec.find('(') type = spec[:pos] coltype = self.ischema_names[type] options = dict() if pos >= 0: options = self._parse_options( spec[pos + 1: spec.rfind(')')] ) if not options: return sqltypes.NullType type_enum = enum.Enum('%s_enum' % name, options) return lambda: coltype(type_enum) elif spec.lower().startswith('decimal'): coltype = self.ischema_names['Decimal'] return coltype(*self._parse_decimal_params(spec)) else: try: return self.ischema_names[spec] except KeyError: warn("Did not recognize type '%s' of column '%s'" % (spec, name)) return sqltypes.NullType
def _get_columns_for_table_query( self, connection, query, table_schema=None, table_name=None): params = { 'table_schema': self.denormalize_name(table_schema), 'table_name': self.denormalize_name(table_name), } result = connection.execute(query, params) for (table_name, colname, coltype, character_maximum_length, numeric_precision, numeric_scale, is_nullable, column_default, is_identity) in result: table_name = self.normalize_name(table_name) colname = self.normalize_name(colname) if colname.startswith('sys_clustering_column'): # ignoring clustering column continue col_type = self.ischema_names.get(coltype, None) col_type_kw = {} if col_type is None: sa_util.warn( "Did not recognize type '{}' of column '{}'".format( coltype, colname)) col_type = sqltypes.NULLTYPE else: if issubclass(col_type, sqltypes.Numeric): col_type_kw['precision'] = numeric_precision col_type_kw['scale'] = numeric_scale elif issubclass(col_type, (sqltypes.String, sqltypes.BINARY)): col_type_kw['length'] = character_maximum_length type_instance = col_type(**col_type_kw) yield (table_name, colname, { 'name': colname, 'type': type_instance, 'nullable': is_nullable == 'YES', 'default': column_default, 'autoincrement': is_identity == 'YES', })
def get_columns(self, connection, table_name, schema=None, **kwargs): schema = schema or self.default_schema_name result = connection.execute( sql.text( """SELECT COLUMN_NAME, DATA_TYPE_NAME, DEFAULT_VALUE, IS_NULLABLE, LENGTH, SCALE, COMMENTS FROM ( SELECT SCHEMA_NAME, TABLE_NAME, COLUMN_NAME, POSITION, DATA_TYPE_NAME, DEFAULT_VALUE, IS_NULLABLE, LENGTH, SCALE, COMMENTS FROM SYS.TABLE_COLUMNS UNION ALL SELECT SCHEMA_NAME, VIEW_NAME AS TABLE_NAME, COLUMN_NAME, POSITION, DATA_TYPE_NAME, DEFAULT_VALUE, IS_NULLABLE, LENGTH, SCALE, COMMENTS FROM SYS.VIEW_COLUMNS) AS COLUMS WHERE SCHEMA_NAME=:schema AND TABLE_NAME=:table ORDER BY POSITION""" ).bindparams( schema=self.denormalize_name(schema), table=self.denormalize_name(table_name) ) ) columns = [] for row in result.fetchall(): column = { 'name': self.normalize_name(row[0]), 'default': row[2], 'nullable': row[3] == "TRUE", 'comment': row[6] } if hasattr(types, row[1]): column['type'] = getattr(types, row[1]) elif hasattr(hana_types, row[1]): column['type'] = getattr(hana_types, row[1]) else: util.warn("Did not recognize type '%s' of column '%s'" % ( row[1], column['name'] )) column['type'] = types.NULLTYPE if column['type'] == types.DECIMAL: column['type'] = types.DECIMAL(row[4], row[5]) elif column['type'] == types.VARCHAR: column['type'] = types.VARCHAR(row[4]) elif column['type'] == types.NVARCHAR: column['type'] = types.NVARCHAR(row[4]) columns.append(column) return columns
def _get_column_type(self, type_): match = self._pattern_column_type.match(type_) if match: name = match.group(1).lower() length = match.group(2) else: name = type_.lower() length = None args = [] if name in ["boolean"]: col_type = types.BOOLEAN elif name in ["float", "double", "real"]: col_type = types.FLOAT elif name in ["tinyint", "smallint", "integer", "int"]: col_type = types.INTEGER elif name in ["bigint"]: col_type = types.BIGINT elif name in ["decimal"]: col_type = types.DECIMAL if length: precision, scale = length.split(",") args = [int(precision), int(scale)] elif name in ["char"]: col_type = types.CHAR if length: args = [int(length)] elif name in ["varchar"]: col_type = types.VARCHAR if length: args = [int(length)] elif name in ["string"]: col_type = types.String elif name in ["date"]: col_type = types.DATE elif name in ["timestamp"]: col_type = types.TIMESTAMP elif name in ["binary", "varbinary"]: col_type = types.BINARY elif name in ["array", "map", "struct", "row", "json"]: col_type = types.String else: util.warn(f"Did not recognize type '{type_}'") col_type = types.NullType return col_type(*args)
def get_columns(self, connection, table_name, schema=None, **kw): rows = self._get_table_columns(connection, table_name, None) result = [] for row in rows: name, coltype, nullable, _is_partition_key = row try: coltype = _type_map[coltype] except KeyError: util.warn("Did not recognize type '%s' of column '%s'" % (coltype, name)) coltype = types.NullType result.append({ 'name': name, 'type': coltype, 'nullable': nullable, 'default': None, }) return result
def get_columns(self, connection, table_name, schema=None, **kw): table = self._get_table(connection, table_name, schema) columns = table.schema result = [] for col in columns: try: coltype = _type_map[col.field_type] except KeyError: util.warn("Did not recognize type '%s' of column '%s'" % (col.field_type, col.name)) result.append({ 'name': col.name, 'type': types.ARRAY(coltype) if col.mode == 'REPEATED' else coltype, 'nullable': col.mode == 'NULLABLE' or col.mode == 'REPEATED', 'default': None, }) return result
def get_columns(self, connection, table_name, schema=None, **kw): current_schema = self.denormalize_name(schema or self.default_schema_name) table_name = self.denormalize_name(table_name) syscols = self.sys_columns query = sql.select([ syscols.c.colname, syscols.c.typename, syscols.c.defaultval, syscols.c.nullable, syscols.c.length, syscols.c.scale, syscols.c.isid, syscols.c.idgenerate ], sql.and_(syscols.c.tabschema == current_schema, syscols.c.tabname == table_name), order_by=[syscols.c.colno]) sa_columns = [] for r in connection.execute(query): coltype = r[1].upper() if coltype in ['DECIMAL', 'NUMERIC']: coltype = self.ischema_names.get(coltype)(int(r[4]), int(r[5])) elif coltype in [ 'CHARACTER', 'CHAR', 'VARCHAR', 'GRAPHIC', 'VARGRAPHIC' ]: coltype = self.ischema_names.get(coltype)(int(r[4])) else: try: coltype = self.ischema_names[coltype] except KeyError: util.warn("Did not recognize type '%s' of column '%s'" % (coltype, r[0])) coltype = coltype = sa_types.NULLTYPE sa_columns.append({ 'name': self.normalize_name(r[0]), 'type': coltype, 'nullable': r[3] == unicode('Y'), 'default': r[2], 'autoincrement': (r[6] == unicode('YES')) and (r[7] != None), }) return sa_columns
def __init__(self, arg=""): if not arg: values = set() else: values = set(c.strip() for c in arg.split(',')) self.delete_orphan = "delete-orphan" in values self.delete = "delete" in values or "all" in values self.save_update = "save-update" in values or "all" in values self.merge = "merge" in values or "all" in values self.expunge = "expunge" in values or "all" in values self.refresh_expire = "refresh-expire" in values or "all" in values if self.delete_orphan and not self.delete: util.warn("The 'delete-orphan' cascade option requires " "'delete'. This will raise an error in 0.6.") for x in values: if x not in all_cascades: raise sa_exc.ArgumentError("Invalid cascade option '%s'" % x)
def get_indexes(self, connection, table_name, schema, **kw): table_oid = self.get_table_oid(connection, table_name, schema, info_cache=kw.get('info_cache')) IDX_SQL = """ SELECT c.relname, i.indisunique, i.indexprs, i.indpred, a.attname FROM pg_index i, pg_class c, pg_attribute a WHERE i.indrelid = :table_oid AND i.indexrelid = c.oid AND a.attrelid = i.indexrelid AND i.indisprimary = 'f' ORDER BY c.relname, a.attnum """ t = sql.text(IDX_SQL, typemap={'attname': sqltypes.Unicode}) c = connection.execute(t, table_oid=table_oid) index_names = {} indexes = [] sv_idx_name = None for row in c.fetchall(): idx_name, unique, expr, prd, col = row if expr: if idx_name != sv_idx_name: util.warn( "Skipped unsupported reflection of expression-based index %s" % idx_name) sv_idx_name = idx_name continue if prd and not idx_name == sv_idx_name: util.warn( "Predicate of partial index %s ignored during reflection" % idx_name) sv_idx_name = idx_name if idx_name in index_names: index_d = index_names[idx_name] else: index_d = {'column_names': []} indexes.append(index_d) index_names[idx_name] = index_d index_d['name'] = idx_name index_d['column_names'].append(col) index_d['unique'] = unique return indexes