Esempio n. 1
0
 def _locate_owner_row(self, owner, name, rows, raiseerr=False):
     """return the row in the given list of rows which references the given table name and owner name."""
     if not rows:
         if raiseerr:
             raise exceptions.NoSuchTableError(name)
         else:
             return None
     else:
         if owner is not None:
             for row in rows:
                 if owner.upper() in row[0]:
                     return row
             else:
                 if raiseerr:
                     raise exceptions.AssertionError(
                         "Specified owner %s does not own table %s" %
                         (owner, name))
                 else:
                     return None
         else:
             if len(rows) == 1:
                 return rows[0]
             else:
                 if raiseerr:
                     raise exceptions.AssertionError(
                         "There are multiple tables with name '%s' visible to the schema, you must specifiy owner"
                         % name)
                 else:
                     return None
Esempio n. 2
0
 def server_version_info(self, connection):
     v = connection.execute("select version()").scalar()
     m = re.match('PostgreSQL (\d+)\.(\d+)\.(\d+)', v)
     if not m:
         raise exceptions.AssertionError(
             "Could not determine version from string '%s'" % v)
     return tuple([int(x) for x in m.group(1, 2, 3)])
Esempio n. 3
0
    def execute(self, source, dest, parent, child, clearkeys):
        # TODO: break the "dictionary" case into a separate method like 'update' above,
        # reduce conditionals
        if source is None:
            if self.issecondary is False:
                source = parent
            elif self.issecondary is True:
                source = child
        if clearkeys or source is None:
            value = None
            clearkeys = True
        else:
            try:
                value = self.source_mapper._get_state_attr_by_column(
                    source, self.source_column)
            except exceptions.UnmappedColumnError:
                self._raise_col_to_prop(False)
        if isinstance(dest, dict):
            dest[self.dest_column.key] = value
        else:
            if clearkeys and self.dest_primary_key():
                raise exceptions.AssertionError(
                    "Dependency rule tried to blank-out primary key column '%s' on instance '%s'"
                    % (str(self.dest_column), mapperutil.state_str(dest)))

            if logging.is_debug_enabled(self.logger):
                self.logger.debug(
                    "execute() instances: %s(%s)->%s(%s) ('%s')" %
                    (mapperutil.state_str(source), str(self.source_column),
                     mapperutil.state_str(dest), str(self.dest_column), value))
            try:
                self.dest_mapper._set_state_attr_by_column(
                    dest, self.dest_column, value)
            except exceptions.UnmappedColumnError:
                self._raise_col_to_prop(True)
Esempio n. 4
0
    def execute(self, source, dest, obj, child, clearkeys):
        if source is None:
            if self.issecondary is False:
                source = obj
            elif self.issecondary is True:
                source = child
        if clearkeys or source is None:
            value = None
            clearkeys = True
        else:
            value = self.source_mapper.get_attr_by_column(
                source, self.source_column)
        if isinstance(dest, dict):
            dest[self.dest_column.key] = value
        else:
            if clearkeys and self.dest_primary_key():
                raise exceptions.AssertionError(
                    "Dependency rule tried to blank-out primary key column '%s' on instance '%s'"
                    % (str(self.dest_column), mapperutil.instance_str(dest)))

            if logging.is_debug_enabled(self.logger):
                self.logger.debug(
                    "execute() instances: %s(%s)->%s(%s) ('%s')" %
                    (mapperutil.instance_str(source), str(self.source_column),
                     mapperutil.instance_str(dest), str(
                         self.dest_column), value))
            self.dest_mapper.set_attr_by_column(dest, self.dest_column, value)
Esempio n. 5
0
    def _create_polymorphic_joins(self):
        # get ready to create "polymorphic" primary/secondary join clauses.
        # these clauses represent the same join between parent/child tables that the primary
        # and secondary join clauses represent, except they reference ColumnElements that are specifically
        # in the "polymorphic" selectables.  these are used to construct joins for both Query as well as
        # eager loading, and also are used to calculate "lazy loading" clauses.

        # as we will be using the polymorphic selectables (i.e. select_table argument to Mapper) to figure this out,
        # first create maps of all the "equivalent" columns, since polymorphic selectables will often munge
        # several "equivalent" columns (such as parent/child fk cols) into just one column.
        target_equivalents = self.mapper._get_inherited_column_equivalents()

        # if the target mapper loads polymorphically, adapt the clauses to the target's selectable
        if self.loads_polymorphic:
            if self.secondaryjoin:
                self.polymorphic_secondaryjoin = self.secondaryjoin.copy_container(
                )
                sql_util.ClauseAdapter(self.mapper.select_table).traverse(
                    self.polymorphic_secondaryjoin)
                self.polymorphic_primaryjoin = self.primaryjoin.copy_container(
                )
            else:
                self.polymorphic_primaryjoin = self.primaryjoin.copy_container(
                )
                if self.direction is sync.ONETOMANY:
                    sql_util.ClauseAdapter(
                        self.mapper.select_table,
                        include=self.foreign_keys,
                        equivalents=target_equivalents).traverse(
                            self.polymorphic_primaryjoin)
                elif self.direction is sync.MANYTOONE:
                    sql_util.ClauseAdapter(
                        self.mapper.select_table,
                        exclude=self.foreign_keys,
                        equivalents=target_equivalents).traverse(
                            self.polymorphic_primaryjoin)
                self.polymorphic_secondaryjoin = None
            # load "polymorphic" versions of the columns present in "remote_side" - this is
            # important for lazy-clause generation which goes off the polymorphic target selectable
            for c in list(self.remote_side):
                for equiv in [c] + (c in target_equivalents
                                    and target_equivalents[c] or []):
                    corr = self.mapper.select_table.corresponding_column(
                        equiv, raiseerr=False)
                    if corr:
                        self.remote_side.add(corr)
                        break
                else:
                    raise exceptions.AssertionError(
                        str(self) +
                        ": Could not find corresponding column for " + str(c) +
                        " in selectable " + str(self.mapper.select_table))
        else:
            self.polymorphic_primaryjoin = self.primaryjoin.copy_container()
            self.polymorphic_secondaryjoin = self.secondaryjoin and self.secondaryjoin.copy_container(
            ) or None
Esempio n. 6
0
    def __init__(self, *values):
        """Emulate an Enum type.

        values:
           A list of valid values for this column
        """

        if values is None or len(values) is 0:
            raise exceptions.AssertionError('Enum requires a list of values')
        self.values = list(values)
Esempio n. 7
0
def clear(dest, dest_mapper, synchronize_pairs):
    for l, r in synchronize_pairs:
        if r.primary_key:
            raise exceptions.AssertionError(
                "Dependency rule tried to blank-out primary key column '%s' on instance '%s'"
                % (r, mapperutil.state_str(dest)))
        try:
            dest_mapper._set_state_attr_by_column(dest, r, None)
        except exceptions.UnmappedColumnError:
            _raise_col_to_prop(True, None, l, dest_mapper, r)
Esempio n. 8
0
 def dialect_impl(self, dialect):
     try:
         return self.impl_dict[dialect]
     except:
         typedesc = dialect.type_descriptor(self.impl)
         tt = self.copy()
         if not isinstance(tt, self.__class__):
             raise exceptions.AssertionError("Type object %s does not properly implement the copy() method, it must return an object of type %s" % (self, self.__class__))
         tt.impl = typedesc
         self.impl_dict[dialect] = tt
         return tt
Esempio n. 9
0
    def _resolve_synonym(self,
                         connection,
                         desired_owner=None,
                         desired_synonym=None,
                         desired_table=None):
        """search for a local synonym matching the given desired owner/name.

        if desired_owner is None, attempts to locate a distinct owner.

	returns the actual name, owner, dblink name, and synonym name if found.
        """

        sql = """select OWNER, TABLE_OWNER, TABLE_NAME, DB_LINK, SYNONYM_NAME
		   from   ALL_SYNONYMS WHERE """

        clauses = []
        params = {}
        if desired_synonym:
            clauses.append("SYNONYM_NAME=:synonym_name")
            params['synonym_name'] = desired_synonym
        if desired_owner:
            clauses.append("TABLE_OWNER=:desired_owner")
            params['desired_owner'] = desired_owner
        if desired_table:
            clauses.append("TABLE_NAME=:tname")
            params['tname'] = desired_table

        sql += " AND ".join(clauses)

        result = connection.execute(sql, **params)
        if desired_owner:
            row = result.fetchone()
            if row:
                return row['TABLE_NAME'], row['TABLE_OWNER'], row[
                    'DB_LINK'], row['SYNONYM_NAME']
            else:
                return None, None, None, None
        else:
            rows = result.fetchall()
            if len(rows) > 1:
                raise exceptions.AssertionError(
                    "There are multiple tables visible to the schema, you must specify owner"
                )
            elif len(rows) == 1:
                row = rows[0]
                return row['TABLE_NAME'], row['TABLE_OWNER'], row[
                    'DB_LINK'], row['SYNONYM_NAME']
            else:
                return None, None, None, None
Esempio n. 10
0
    def cascade_iterator(self, type_, state, visited_instances, halt_on=None):
        if not type_ in self.cascade:
            return
        passive = type_ != 'delete' or self.passive_deletes
        mapper = self.mapper.primary_mapper()
        instances = attributes.get_as_list(state, self.key, passive=passive)
        if instances:
            for c in instances:
                if c is not None and c not in visited_instances and (halt_on is None or not halt_on(c)):
                    if not isinstance(c, self.mapper.class_):
                        raise exceptions.AssertionError("Attribute '%s' on class '%s' doesn't handle objects of type '%s'" % (self.key, str(self.parent.class_), str(c.__class__)))
                    visited_instances.add(c)

                    # cascade using the mapper local to this object, so that its individual properties are located
                    instance_mapper = object_mapper(c, entity_name=mapper.entity_name)
                    yield (c, instance_mapper, c._state)
Esempio n. 11
0
    def dialect_impl(self, dialect):
        try:
            return self._impl_dict[dialect]
        except AttributeError:
            self._impl_dict = {}
        except KeyError:
            pass

        typedesc = self.load_dialect_impl(dialect)
        tt = self.copy()
        if not isinstance(tt, self.__class__):
            raise exceptions.AssertionError(
                "Type object %s does not properly implement the copy() method, it must return an object of type %s"
                % (self, self.__class__))
        tt.impl = typedesc
        self._impl_dict[dialect] = tt
        return tt
Esempio n. 12
0
    def merge(self, object, entity_name=None, _recursive=None):
        """Copy the state of the given `object` onto the persistent
        object with the same identifier.

        If there is no persistent instance currently associated with
        the session, it will be loaded.  Return the persistent
        instance. If the given instance is unsaved, save a copy of and
        return it as a newly persistent instance. The given instance
        does not become associated with the session.

        This operation cascades to associated instances if the
        association is mapped with ``cascade="merge"``.
        """

        if _recursive is None:
            _recursive = util.Set()
        if entity_name is not None:
            mapper = _class_mapper(object.__class__, entity_name=entity_name)
        else:
            mapper = _object_mapper(object)
        if mapper in _recursive or object in _recursive:
            return None
        _recursive.add(mapper)
        _recursive.add(object)
        try:
            key = getattr(object, '_instance_key', None)
            if key is None:
                merged = attribute_manager.new_instance(mapper.class_)
            else:
                if key in self.identity_map:
                    merged = self.identity_map[key]
                else:
                    merged = self.get(mapper.class_, key[1])
                    if merged is None:
                        raise exceptions.AssertionError(
                            "Instance %s has an instance key but is not persisted"
                            % mapperutil.instance_str(object))
            for prop in mapper.iterate_properties:
                prop.merge(self, object, merged, _recursive)
            if key is None:
                self.save(merged, entity_name=mapper.entity_name)
            return merged
        finally:
            _recursive.remove(mapper)
Esempio n. 13
0
 def dialect_impl(self, dialect):
     try:
         return self.impl_dict[dialect]
     except:
         # see if the dialect has an adaptation of the TypeDecorator itself
         adapted_decorator = dialect.type_descriptor(self)
         if adapted_decorator is not self:
             result = adapted_decorator.dialect_impl(dialect)
             self.impl_dict[dialect] = result
             return result
         typedesc = dialect.type_descriptor(self.impl)
         tt = self.copy()
         if not isinstance(tt, self.__class__):
             raise exceptions.AssertionError(
                 "Type object %s does not properly implement the copy() method, it must return an object of type %s"
                 % (self, self.__class__))
         tt.impl = typedesc
         self.impl_dict[dialect] = tt
         return tt
Esempio n. 14
0
 def cascade_iterator(self, type, object, recursive, halt_on=None):
     if not type in self.cascade:
         return
     passive = type != 'delete' or self.passive_deletes
     mapper = self.mapper.primary_mapper()
     for c in sessionlib.attribute_manager.get_as_list(object,
                                                       self.key,
                                                       passive=passive):
         if c is not None and c not in recursive and (halt_on is None
                                                      or not halt_on(c)):
             if not isinstance(c, self.mapper.class_):
                 raise exceptions.AssertionError(
                     "Attribute '%s' on class '%s' doesn't handle objects of type '%s'"
                     %
                     (self.key, str(self.parent.class_), str(c.__class__)))
             recursive.add(c)
             yield c
             for c2 in mapper.cascade_iterator(type, c, recursive):
                 yield c2
Esempio n. 15
0
    def server_version_info(self, connection):
        """Get the version of the Firebird server used by a connection.

        Returns a tuple of (`major`, `minor`, `build`), three integers
        representing the version of the attached server.
        """

        # This is the simpler approach (the other uses the services api),
        # that for backward compatibility reasons returns a string like
        #   LI-V6.3.3.12981 Firebird 2.0
        # where the first version is a fake one resembling the old
        # Interbase signature. This is more than enough for our purposes,
        # as this is mainly (only?) used by the testsuite.

        from re import match

        fbconn = connection.connection.connection
        version = fbconn.server_version
        m = match('\w+-V(\d+)\.(\d+)\.(\d+)\.(\d+) \w+ (\d+)\.(\d+)', version)
        if not m:
            raise exceptions.AssertionError(
                "Could not determine version from string '%s'" % version)
        return tuple([int(x) for x in m.group(5, 6, 4)])
Esempio n. 16
0
    def reflecttable(self, connection, table, include_columns):
        preparer = self.identifier_preparer

        # search for table, including across synonyms and dblinks.
        # locate the actual name of the table, the real owner, and any dblink clause needed.
        actual_name, owner, dblink = self._resolve_table_owner(
            connection, self._denormalize_name(table.name), table)

        c = connection.execute(
            "select COLUMN_NAME, DATA_TYPE, DATA_LENGTH, DATA_PRECISION, DATA_SCALE, NULLABLE, DATA_DEFAULT from ALL_TAB_COLUMNS%(dblink)s where TABLE_NAME = :table_name and OWNER = :owner"
            % {'dblink': dblink}, {
                'table_name': actual_name,
                'owner': owner
            })

        while True:
            row = c.fetchone()
            if row is None:
                break
            found_table = True

            #print "ROW:" , row
            (colname, coltype, length, precision, scale, nullable,
             default) = (self._normalize_name(row[0]), row[1], row[2], row[3],
                         row[4], row[5] == 'Y', row[6])

            if include_columns and colname not in include_columns:
                continue

            # INTEGER if the scale is 0 and precision is null
            # NUMBER if the scale and precision are both null
            # NUMBER(9,2) if the precision is 9 and the scale is 2
            # NUMBER(3) if the precision is 3 and scale is 0
            #length is ignored except for CHAR and VARCHAR2
            if coltype == 'NUMBER':
                if precision is None and scale is None:
                    coltype = OracleNumeric
                elif precision is None and scale == 0:
                    coltype = OracleInteger
                else:
                    coltype = OracleNumeric(precision, scale)
            elif coltype == 'CHAR' or coltype == 'VARCHAR2':
                coltype = ischema_names.get(coltype, OracleString)(length)
            else:
                coltype = re.sub(r'\(\d+\)', '', coltype)
                try:
                    coltype = ischema_names[coltype]
                except KeyError:
                    util.warn("Did not recognize type '%s' of column '%s'" %
                              (coltype, colname))
                    coltype = sqltypes.NULLTYPE

            colargs = []
            if default is not None:
                colargs.append(schema.PassiveDefault(sql.text(default)))

            table.append_column(
                schema.Column(colname, coltype, nullable=nullable, *colargs))

        if not table.columns:
            raise exceptions.AssertionError(
                "Couldn't find any column information for table %s" %
                actual_name)

        c = connection.execute(
            """SELECT
             ac.constraint_name,
             ac.constraint_type,
             loc.column_name AS local_column,
             rem.table_name AS remote_table,
             rem.column_name AS remote_column,
             rem.owner AS remote_owner
           FROM all_constraints%(dblink)s ac,
             all_cons_columns%(dblink)s loc,
             all_cons_columns%(dblink)s rem
           WHERE ac.table_name = :table_name
           AND ac.constraint_type IN ('R','P')
           AND ac.owner = :owner
           AND ac.owner = loc.owner
           AND ac.constraint_name = loc.constraint_name
           AND ac.r_owner = rem.owner(+)
           AND ac.r_constraint_name = rem.constraint_name(+)
           -- order multiple primary keys correctly
           ORDER BY ac.constraint_name, loc.position, rem.position""" %
            {'dblink': dblink}, {
                'table_name': actual_name,
                'owner': owner
            })

        fks = {}
        while True:
            row = c.fetchone()
            if row is None:
                break
            #print "ROW:" , row
            (cons_name, cons_type, local_column, remote_table, remote_column,
             remote_owner) = row[0:2] + tuple(
                 [self._normalize_name(x) for x in row[2:]])
            if cons_type == 'P':
                table.primary_key.add(table.c[local_column])
            elif cons_type == 'R':
                try:
                    fk = fks[cons_name]
                except KeyError:
                    fk = ([], [])
                    fks[cons_name] = fk
                if remote_table is None:
                    # ticket 363
                    util.warn(
                        ("Got 'None' querying 'table_name' from "
                         "all_cons_columns%(dblink)s - does the user have "
                         "proper rights to the table?") % {'dblink': dblink})
                    continue
                refspec = ".".join([remote_table, remote_column])
                schema.Table(remote_table,
                             table.metadata,
                             autoload=True,
                             autoload_with=connection,
                             owner=remote_owner)
                if local_column not in fk[0]:
                    fk[0].append(local_column)
                if refspec not in fk[1]:
                    fk[1].append(refspec)

        for name, value in fks.iteritems():
            table.append_constraint(
                schema.ForeignKeyConstraint(value[0], value[1], name=name))
Esempio n. 17
0
    def reflecttable(self, connection, table, include_columns):
        c = connection.execute(
            "select distinct OWNER from systables where tabname=?",
            table.name.lower())
        rows = c.fetchall()
        if not rows:
            raise exceptions.NoSuchTableError(table.name)
        else:
            if table.owner is not None:
                if table.owner.lower() in [r[0] for r in rows]:
                    owner = table.owner.lower()
                else:
                    raise exceptions.AssertionError(
                        "Specified owner %s does not own table %s" %
                        (table.owner, table.name))
            else:
                if len(rows) == 1:
                    owner = rows[0][0]
                else:
                    raise exceptions.AssertionError(
                        "There are multiple tables with name %s in the schema, you must specifie owner"
                        % table.name)

        c = connection.execute(
            """select colname , coltype , collength , t3.default , t1.colno from syscolumns as t1 , systables as t2 , OUTER sysdefaults as t3
                                    where t1.tabid = t2.tabid and t2.tabname=? and t2.owner=?
                                      and t3.tabid = t2.tabid and t3.colno = t1.colno
                                    order by t1.colno""", table.name.lower(),
            owner)
        rows = c.fetchall()

        if not rows:
            raise exceptions.NoSuchTableError(table.name)

        for name, colattr, collength, default, colno in rows:
            name = name.lower()
            if include_columns and name not in include_columns:
                continue

            # in 7.31, coltype = 0x000
            #                       ^^-- column type
            #                      ^-- 1 not null , 0 null
            nullable, coltype = divmod(colattr, 256)
            if coltype not in (0, 13) and default:
                default = default.split()[-1]

            if coltype == 0 or coltype == 13:  # char , varchar
                coltype = ischema_names.get(coltype, InfoString)(collength)
                if default:
                    default = "'%s'" % default
            elif coltype == 5:  # decimal
                precision, scale = (collength & 0xFF00) >> 8, collength & 0xFF
                if scale == 255:
                    scale = 0
                coltype = InfoNumeric(precision, scale)
            else:
                try:
                    coltype = ischema_names[coltype]
                except KeyError:
                    util.warn("Did not recognize type '%s' of column '%s'" %
                              (coltype, name))
                    coltype = sqltypes.NULLTYPE

            colargs = []
            if default is not None:
                colargs.append(schema.PassiveDefault(sql.text(default)))

            table.append_column(
                schema.Column(name,
                              coltype,
                              nullable=(nullable == 0),
                              *colargs))

        # FK
        c = connection.execute(
            """select t1.constrname as cons_name , t1.constrtype as cons_type ,
                                         t4.colname as local_column , t7.tabname as remote_table ,
                                         t6.colname as remote_column
                                    from sysconstraints as t1 , systables as t2 ,
                                         sysindexes as t3 , syscolumns as t4 ,
                                         sysreferences as t5 , syscolumns as t6 , systables as t7 ,
                                         sysconstraints as t8 , sysindexes as t9
                                   where t1.tabid = t2.tabid and t2.tabname=? and t2.owner=? and t1.constrtype = 'R'
                                     and t3.tabid = t2.tabid and t3.idxname = t1.idxname
                                     and t4.tabid = t2.tabid and t4.colno = t3.part1
                                     and t5.constrid = t1.constrid and t8.constrid = t5.primary
                                     and t6.tabid = t5.ptabid and t6.colno = t9.part1 and t9.idxname = t8.idxname
                                     and t7.tabid = t5.ptabid""",
            table.name.lower(), owner)
        rows = c.fetchall()
        fks = {}
        for cons_name, cons_type, local_column, remote_table, remote_column in rows:
            try:
                fk = fks[cons_name]
            except KeyError:
                fk = ([], [])
                fks[cons_name] = fk
            refspec = ".".join([remote_table, remote_column])
            schema.Table(remote_table,
                         table.metadata,
                         autoload=True,
                         autoload_with=connection)
            if local_column not in fk[0]:
                fk[0].append(local_column)
            if refspec not in fk[1]:
                fk[1].append(refspec)

        for name, value in fks.iteritems():
            table.append_constraint(
                schema.ForeignKeyConstraint(value[0], value[1], None))

        # PK
        c = connection.execute(
            """select t1.constrname as cons_name , t1.constrtype as cons_type ,
                                         t4.colname as local_column
                                    from sysconstraints as t1 , systables as t2 ,
                                         sysindexes as t3 , syscolumns as t4
                                   where t1.tabid = t2.tabid and t2.tabname=? and t2.owner=? and t1.constrtype = 'P'
                                     and t3.tabid = t2.tabid and t3.idxname = t1.idxname
                                     and t4.tabid = t2.tabid and t4.colno = t3.part1""",
            table.name.lower(), owner)
        rows = c.fetchall()
        for cons_name, cons_type, local_column in rows:
            table.primary_key.add(table.c[local_column])
Esempio n. 18
0
 def process_bind_param(self, value, engine):
     if value not in self.values:
         raise exceptions.AssertionError('"%s" not in Enum.values' % value)
     return self.values.index(value)
Esempio n. 19
0
    def reflecttable(self, connection, table):
        preparer = self.identifier_preparer
        if not preparer.should_quote(table):
            name = table.name.upper()
        else:
            name = table.name
        c = connection.execute ("select distinct OWNER from ALL_TAB_COLUMNS where TABLE_NAME = :table_name", {'table_name':name})
        rows = c.fetchall()
        if not rows :
            raise exceptions.NoSuchTableError(table.name)
        else:
            if table.owner is not None:
                if table.owner.upper() in [r[0] for r in rows]:
                    owner = table.owner.upper()
                else:
                    raise exceptions.AssertionError("Specified owner %s does not own table %s"%(table.owner, table.name))
            else:
                if len(rows)==1:
                    owner = rows[0][0]
                else:
                    raise exceptions.AssertionError("There are multiple tables with name %s in the schema, you must specifie owner"%table.name)

        c = connection.execute ("select COLUMN_NAME, DATA_TYPE, DATA_LENGTH, DATA_PRECISION, DATA_SCALE, NULLABLE, DATA_DEFAULT from ALL_TAB_COLUMNS where TABLE_NAME = :table_name and OWNER = :owner", {'table_name':name, 'owner':owner})
        
        while True:
            row = c.fetchone()
            if row is None:
                break
            found_table = True

            #print "ROW:" , row
            (name, coltype, length, precision, scale, nullable, default) = (row[0], row[1], row[2], row[3], row[4], row[5]=='Y', row[6])

            # INTEGER if the scale is 0 and precision is null
            # NUMBER if the scale and precision are both null
            # NUMBER(9,2) if the precision is 9 and the scale is 2
            # NUMBER(3) if the precision is 3 and scale is 0
            #length is ignored except for CHAR and VARCHAR2
            if coltype=='NUMBER' :
                if precision is None and scale is None:
                    coltype = OracleNumeric
                elif precision is None and scale == 0  :
                    coltype = OracleInteger
                else :
                    coltype = OracleNumeric(precision, scale)
            elif coltype=='CHAR' or coltype=='VARCHAR2':
                coltype = ischema_names.get(coltype, OracleString)(length)
            else:
                coltype = re.sub(r'\(\d+\)', '', coltype)
                try:
                    coltype = ischema_names[coltype]
                except KeyError:
                    raise exceptions.AssertionError("Cant get coltype for type '%s'" % coltype)
               
            colargs = []
            if default is not None:
                colargs.append(schema.PassiveDefault(sql.text(default)))
          
            # if name comes back as all upper, assume its case folded 
            if (name.upper() == name): 
                name = name.lower()
            
            table.append_column(schema.Column(name, coltype, nullable=nullable, *colargs))

       
        c = connection.execute(constraintSQL, {'table_name' : table.name.upper(), 'owner' : owner})
        fks = {}
        while True:
            row = c.fetchone()
            if row is None:
                break
            #print "ROW:" , row                
            (cons_name, cons_type, local_column, remote_table, remote_column, remote_owner) = row
            if cons_type == 'P':
                table.primary_key.add(table.c[local_column])
            elif cons_type == 'R':
                try:
                    fk = fks[cons_name]
                except KeyError:
                   fk = ([], [])
                   fks[cons_name] = fk
                refspec = ".".join([remote_table, remote_column])
                schema.Table(remote_table, table.metadata, autoload=True, autoload_with=connection, owner=remote_owner)
                if local_column not in fk[0]:
                    fk[0].append(local_column)
                if refspec not in fk[1]:
                    fk[1].append(refspec)

        for name, value in fks.iteritems():
            table.append_constraint(schema.ForeignKeyConstraint(value[0], value[1], name=name))
Esempio n. 20
0
 def __init__(self, *args, **kwargs):
     if not hasattr(self.__class__, 'impl'):
         raise exceptions.AssertionError(
             "TypeDecorator implementations require a class-level variable 'impl' which refers to the class of type being decorated"
         )
     self.impl = self.__class__.impl(*args, **kwargs)