def process_fk(catalog, table, fk_name, fk_rows):
            foreign_key = grt.classes.db_ForeignKey()
            if fk_name in cls._connections[connection.__id__]['fk_names']:
                while True:
                    suffix = '_%06d' % random.randint(0, 999999)
                    if fk_name + suffix not in cls._connections[connection.__id__]['fk_names']:
                        break
                fk_name += suffix
            foreign_key.name = fk_name
            foreign_key.owner = table
            foreign_key.deleteRule = get_action(fk_rows[0].delete_rule)
            foreign_key.updateRule = get_action(fk_rows[0].update_rule)
            foreign_key.modelOnly = 0
            
            # Find the referenced table:
            referenced_schema = find_object_with_name(catalog.schemata, fk_rows[0].pktable_schem) if fk_rows[0].pktable_schem else schema
            foreign_key.referencedTable = find_object_with_name(referenced_schema.tables, fk_rows[0].pktable_name) if fk_rows[0].pktable_name else table
            
            for fk_row in fk_rows:
                column = find_object_with_name(table.columns, fk_row.fkcolumn_name)
                if not column:
                    grt.send_error('Migration: reverseEngineerTableFKs: Column "%s" not found in table "%s"' % (fk_row.fkcolumn_name, table.name) )
                    continue

                ref_column = find_object_with_name(foreign_key.referencedTable.columns, fk_row.pkcolumn_name)
                if not ref_column:
                    grt.send_error('Migration: reverseEngineerTableFKs: Column "%s" not found in table "%s"' % (fk_row.pkcolumn_name, foreign_key.referencedTable.name) )
                    continue
                
                foreign_key.columns.append(column)
                foreign_key.referencedColumns.append(ref_column)

            cls._connections[connection.__id__]['fk_names'][foreign_key.name] = table 
            table.foreignKeys.append(foreign_key)
Beispiel #2
0
    def reverseEngineerTableFKs(cls, connection, table):
        """Reverse engineers the foreign keys for the given table."""

        schema = table.owner
        catalog = schema.owner

        if len(table.columns) == 0:  # Table must have columns reverse engineered before we can rev eng its foreign keys
            grt.send_error('Migration: reverseEngineerTableFKs: Reverse engineering of table %s was attempted but the table has no columns attribute' % table.name)
            return 1
        
        query = """SELECT si.index_name, sfk.foreign_table_id, sfk.foreign_index_id, sfk.primary_table_id, sfk.primary_index_id
FROM SYSFKEY sfk
JOIN SYSIDX si ON (sfk.foreign_index_id=si.index_id AND sfk.foreign_table_id=si.table_id)
JOIN SYSTAB st ON sfk.foreign_table_id=st.table_id
JOIN SYSUSER su ON st.creator=su.user_id
WHERE st.table_name='%s' AND su.user_name='%s'
ORDER BY sfk.primary_index_id""" % (table.name, schema.name)

        fk_cursor = cls.get_connection(connection).cursor()
        for fk_name, this_table_id, this_index_id, other_table_id, other_index_id in fk_cursor.execute(query):
            this_column_query = """SELECT stc.column_name
FROM SYSIDXCOL sic
JOIN SYSTABCOL stc ON (sic.table_id=stc.table_id AND sic.column_id=stc.column_id)
WHERE sic.table_id=%d AND sic.index_id=%d
ORDER BY sic.sequence""" % (this_table_id, this_index_id)
            other_column_query = """SELECT su.user_name, st.table_name, stc.column_name
FROM SYSIDXCOL sic
JOIN SYSTABCOL stc ON (sic.table_id=stc.table_id AND sic.column_id=stc.column_id)
JOIN SYSTAB st ON stc.table_id=st.table_id
JOIN SYSUSER su ON st.creator=su.user_id
WHERE sic.table_id=%d AND sic.index_id=%d
ORDER BY sic.sequence""" % (other_table_id, other_index_id)

            these_columns =  cls.execute_query(connection, this_column_query).fetchall()
            other_columns =  cls.execute_query(connection, other_column_query).fetchall()
            
            foreign_key = grt.classes.db_ForeignKey()
            foreign_key.owner = table
            foreign_key.name = fk_name
            
            # Find the referenced table:
            referenced_schema = find_object_with_name(catalog.schemata, other_columns[0][0]) if other_columns[0][0] else schema
            foreign_key.referencedTable = find_object_with_name(referenced_schema.tables, other_columns[0][1]) if other_columns[0][1] else table
            
            for (this_column_name,), (_, _, other_column_name) in zip(these_columns, other_columns):
                column = find_object_with_name(table.columns, this_column_name)
                if not column:
                    grt.send_error('Migration: reverseEngineerTableFKs: Column "%s" not found in table "%s"' % (this_column_name, table.name) )
                    continue

                ref_column = find_object_with_name(foreign_key.referencedTable.columns, other_column_name)
                if not ref_column:
                    grt.send_error('Migration: reverseEngineerTableFKs: Column "%s" not found in table "%s"' % (other_column_name, foreign_key.referencedTable.name) )
                    continue
                
                foreign_key.columns.append(column)
                foreign_key.referencedColumns.append(ref_column)

            table.foreignKeys.append(foreign_key)
        return 0
        def process_fk(catalog, table, fk_name, fk_rows):
            foreign_key = grt.classes.db_ForeignKey()
            if fk_name in cls._connections[connection.__id__]['fk_names']:
                while True:
                    suffix = '_%06d' % random.randint(0, 999999)
                    if fk_name + suffix not in cls._connections[connection.__id__]['fk_names']:
                        break
                fk_name += suffix
            foreign_key.name = fk_name
            foreign_key.owner = table
            foreign_key.deleteRule = get_action(fk_rows[0].delete_rule)
            foreign_key.updateRule = get_action(fk_rows[0].update_rule)
            foreign_key.modelOnly = 0
            
            # Find the referenced table:
            referenced_schema = find_object_with_name(catalog.schemata, fk_rows[0].pktable_schem) if fk_rows[0].pktable_schem else schema
            foreign_key.referencedTable = find_object_with_name(referenced_schema.tables, fk_rows[0].pktable_name) if fk_rows[0].pktable_name else table
            
            for fk_row in fk_rows:
                column = find_object_with_name(table.columns, fk_row.fkcolumn_name)
                if not column:
                    grt.send_error('Migration: reverseEngineerTableFKs: Column "%s" not found in table "%s"' % (fk_row.fkcolumn_name, table.name) )
                    continue

                ref_column = find_object_with_name(foreign_key.referencedTable.columns, fk_row.pkcolumn_name)
                if not ref_column:
                    grt.send_error('Migration: reverseEngineerTableFKs: Column "%s" not found in table "%s"' % (fk_row.pkcolumn_name, foreign_key.referencedTable.name) )
                    continue
                
                foreign_key.columns.append(column)
                foreign_key.referencedColumns.append(ref_column)

            cls._connections[connection.__id__]['fk_names'][foreign_key.name] = table 
            table.foreignKeys.append(foreign_key)
Beispiel #4
0
        def process_fk(catalog, table, fk_name, fk_rows):
            foreign_key = grt.classes.db_ForeignKey()
            foreign_key.name = fk_name
            foreign_key.owner = table
            foreign_key.deleteRule = get_action(fk_rows[0].delete_rule)
            foreign_key.updateRule = get_action(fk_rows[0].update_rule)
            foreign_key.modelOnly = 0
            
            # Find the referenced table:
            referenced_schema = find_object_with_name(catalog.schemata, fk_rows[0].pktable_schem) if fk_rows[0].pktable_schem else schema
            foreign_key.referencedTable = find_object_with_name(referenced_schema.tables, fk_rows[0].pktable_name) if fk_rows[0].pktable_name else table
            
            for fk_row in fk_rows:
                column = find_object_with_name(table.columns, fk_row.fkcolumn_name)
                if not column:
                    grt.send_error('Migration: reverseEngineerTableFKs: Column "%s" not found in table "%s"' % (fk_row.fkcolumn_name, table.name) )
                    continue

                ref_column = find_object_with_name(foreign_key.referencedTable.columns, fk_row.pkcolumn_name)
                if not ref_column:
                    grt.send_error('Migration: reverseEngineerTableFKs: Column "%s" not found in table "%s"' % (fk_row.pkcolumn_name, foreign_key.referencedTable.name) )
                    continue
                
                foreign_key.columns.append(column)
                foreign_key.referencedColumns.append(ref_column)
            table.foreignKeys.append(foreign_key)
Beispiel #5
0
        def process_fk(catalog, table, fk_name, fk_rows):
            foreign_key = grt.classes.db_ForeignKey()
            if fk_name in cls._connections[connection.__id__]['fk_names']:
                while True:
                    suffix = '_%06d' % random.randint(0, 999999)
                    if fk_name + suffix not in cls._connections[connection.__id__]['fk_names']:
                        break
                fk_name += suffix
            foreign_key.name = fk_name
            foreign_key.owner = table
            foreign_key.deleteRule = get_delete_action(fk_rows[0].grbit)
            foreign_key.updateRule = get_update_action(fk_rows[0].grbit)
            foreign_key.modelOnly = 0
            
            # Find the referenced table:
            foreign_key.referencedTable = find_object_with_name(catalog.schemata[0].tables, fk_rows[0].szReferencedObject)
            if not foreign_key.referencedTable:
                grt.send_error('Migration: reverseEngineerTableFKs: Table "%s" not found in schemata "%s"' % (fk_rows[0].szReferencedObject, catalog.schemata[0].name) )
                return 1
            
            for fk_row in fk_rows:
                column = find_object_with_name(table.columns, fk_row.szColumn)
                if not column:
                    grt.send_error('Migration: reverseEngineerTableFKs: Column "%s" not found in table "%s"' % (fk_row.szColumn, table.name) )
                    continue

                ref_column = find_object_with_name(foreign_key.referencedTable.columns, fk_row.szReferencedColumn)
                if not ref_column:
                    grt.send_error('Migration: reverseEngineerTableFKs: Column "%s" not found in table "%s"' % (fk_row.szReferencedColumn, foreign_key.referencedTable.name) )
                    continue
                
                foreign_key.columns.append(column)
                foreign_key.referencedColumns.append(ref_column)

            # Find and delete indexes that are identical to FKs
            for index in reversed(table.indices):
                if table.primaryKey != index and len(index.columns) == len(foreign_key.columns):
                    match = True
                    for i, col in enumerate(index.columns):
                        if foreign_key.columns[i] != col.referencedColumn:
                            match = False
                            break
                    if match:
                        grt.send_warning("Migration: reverseEngineerTable: Skipping duplicate index %s from table %s\n" % (col.name, table.name))
                        table.indices.remove(index)

            cls._connections[connection.__id__]['fk_names'][foreign_key.name] = table 
            table.foreignKeys.append(foreign_key)
        def process_fk(catalog, table, fk_name, fk_rows):
            foreign_key = grt.classes.db_ForeignKey()
            if fk_name in cls._connections[connection.__id__]['fk_names']:
                while True:
                    suffix = '_%06d' % random.randint(0, 999999)
                    if fk_name + suffix not in cls._connections[connection.__id__]['fk_names']:
                        break
                fk_name += suffix
            foreign_key.name = fk_name
            foreign_key.owner = table
            foreign_key.deleteRule = get_delete_action(fk_rows[0].grbit)
            foreign_key.updateRule = get_update_action(fk_rows[0].grbit)
            foreign_key.modelOnly = 0
            
            # Find the referenced table:
            foreign_key.referencedTable = find_object_with_name(catalog.schemata[0].tables, fk_rows[0].szReferencedObject)
            if not foreign_key.referencedTable:
                grt.send_error('Migration: reverseEngineerTableFKs: Table "%s" not found in schemata "%s"' % (fk_rows[0].szReferencedObject, catalog.schemata[0].name) )
                return 1
            
            for fk_row in fk_rows:
                column = find_object_with_name(table.columns, fk_row.szColumn)
                if not column:
                    grt.send_error('Migration: reverseEngineerTableFKs: Column "%s" not found in table "%s"' % (fk_row.szColumn, table.name) )
                    continue

                ref_column = find_object_with_name(foreign_key.referencedTable.columns, fk_row.szReferencedColumn)
                if not ref_column:
                    grt.send_error('Migration: reverseEngineerTableFKs: Column "%s" not found in table "%s"' % (fk_row.szReferencedColumn, foreign_key.referencedTable.name) )
                    continue
                
                foreign_key.columns.append(column)
                foreign_key.referencedColumns.append(ref_column)

            # Find and delete indexes that are identical to FKs
            for index in reversed(table.indices):
                if table.primaryKey != index and len(index.columns) == len(foreign_key.columns):
                    match = True
                    for i, col in enumerate(index.columns):
                        if foreign_key.columns[i] != col.referencedColumn:
                            match = False
                            break
                    if match:
                        grt.send_warning("Migration: reverseEngineerTable: Skipping duplicate index %s from table %s\n" % (col.name, table.name))
                        table.indices.remove(index)

            cls._connections[connection.__id__]['fk_names'][foreign_key.name] = table 
            table.foreignKeys.append(foreign_key)
    def migrateColumnDefaultValue(self, state, default_value, source_column,
                                  target_column):
        target_default_value = default_value
        if source_column.simpleType:
            source_datatype = source_column.simpleType.name
            if source_datatype == 'TIMESTAMP':
                if default_value == 'getdate()':
                    target_default_value = 'CURRENT_TIMESTAMP'
            elif source_datatype in ['DATETIME', 'SMALLDATETIME']:
                if source_column.defaultValue == 'getdate()':
                    target_default_value = 'CURRENT_TIMESTAMP'

                    # Only timestamp supports CURRENT_TIMESTAMP, so force the target type to it
                    target_column.simpleType = find_object_with_name(
                        state.targetCatalog.simpleDatatypes, 'TIMESTAMP')
                    target_column.length = -1
                    state.addMigrationLogEntry(
                        0, source_column, target_column,
                        'Default value is %s, so type was changed from %s to TIMESTAMP'
                        % (default_value, source_datatype))

            if default_value and not default_value.startswith(
                    "'") and target_default_value == default_value:
                # not a string, check for numeric literals
                try:
                    float(default_value)
                except:
                    # not a numeric literal
                    target_default_value = ''
                    state.addMigrationLogEntry(
                        1, source_column, target_column,
                        'Default value %s is not supported' % default_value)

        return target_default_value
def reverseEngineerTablePK(connection, table):
    """Reverse engineers the primary key for the given table."""

    schema = table.owner
    catalog = schema.owner
    execute_query(connection, 'USE %s' % catalog.name)
    query = """SELECT sc.name
FROM sysobjects so JOIN syskeys sk ON so.id=sk.id
     JOIN syscolumns sc ON sc.id=sk.id AND sc.colid IN (sk.key1, sk.key2, sk.key3, sk.key4, sk.key5, sk.key6, sk.key7, sk.key8)
WHERE so.uid=USER_ID(?) AND sk.id=OBJECT_ID(?) AND sk.type=1"""

    if len(table.columns) == 0:
        grt.send_error(
            'Sybase reverseEngineerTablePK',
            "Reverse engineer of table's %s.%s primary key was attempted but the table has no columns attribute"
            % (schema.name, table.name))
        return 1  # Table must have columns reverse engineered before we can rev eng its primary key

    pk_col_names = [
        row[0]
        for row in execute_query(connection, query, schema.name, table.name)
    ]

    for pk_column in pk_col_names:
        column = find_object_with_name(table.columns, pk_column)
        if column:
            table.addPrimaryKeyColumn(column)
    return 0
    def migrateColumnDefaultValue(self, state, default_value, source_column, target_column):
        target_default_value = default_value
        if source_column.simpleType:
            source_datatype = source_column.simpleType.name
            if source_datatype == 'TIMESTAMP':
                if default_value == 'getdate()':
                    target_default_value = 'CURRENT_TIMESTAMP'
            elif source_datatype in ['DATETIME', 'SMALLDATETIME']:
                if source_column.defaultValue == 'getdate()':
                    target_default_value = 'CURRENT_TIMESTAMP'

                    # Only timestamp supports CURRENT_TIMESTAMP, so force the target type to it
                    target_column.simpleType = find_object_with_name(state.targetCatalog.simpleDatatypes, 'TIMESTAMP')
                    state.addMigrationLogEntry(0, source_column, target_column, 
                              'Default value is %s, so type was changed from %s to TIMESTAMP' % (default_value, source_datatype))

            if default_value and not default_value.startswith("'") and target_default_value == default_value:
                # not a string, check for numeric literals
                try:
                    float(default_value)
                except:
                    # not a numeric literal
                    target_default_value = ''
                    state.addMigrationLogEntry(1, source_column, target_column, 
                              'Default value %s is not supported' % default_value)
                    
        return target_default_value
    def reverseEngineerTablePK(cls, connection, table):
        """Reverse engineers the primary key(s) for the given table."""

        schema = table.owner
        catalog = schema.owner


        if len(table.columns) == 0:  # Table must have columns reverse engineered before we can rev eng its primary key(s)
            grt.send_error('Migration: reverseEngineerTablePK: Reverse engineering of table %s was attempted but the table has no columns attribute' % table.name)
            return 1
        
        # Primary keys and indices come together in the SYSIDX system view, so we'll rev eng them at once:
        query = """SELECT st.table_id, si.index_id, si.index_name, si.index_category, si."unique"
FROM SYSIDX si
JOIN SYSTAB st ON si.table_id=st.table_id
JOIN SYSUSER su ON st.creator=su.user_id
WHERE st.table_name='%s' AND su.user_name='%s'
ORDER BY si.index_id""" % (table.name, schema.name)
        idx_cursor = cls.get_connection(connection).cursor()
        for table_id, index_id, index_name, index_category, index_unique in idx_cursor.execute(query):
            index = grt.classes.db_Index()
            index.name = index_name
            index.isPrimary = 1 if index_category == 1 else 0
            index.unique = 1 if index_unique in (1, 2) else 0
            if index_category == 1:
                index.indexType = 'PRIMARY'
            elif index_category == 2:
                continue  # This is a foreign key, will be handled when reverse engineering them
            elif index_category == 3:  # Can be a regular index or a unique constraint
                if index_unique == 2:
                    index.indexType = 'UNIQUE'
                else:
                    index.indexType = 'INDEX'
            else:
                index.indexType = 'FULLTEXT'
            
    #        index.hasFilter = False  # TODO: Find out if there's a way to determine this

            # Get the columns for the index:
            idx_cols_query = """SELECT sc.column_name, sic."order"
FROM SYSIDXCOL sic
JOIN SYSTAB st ON sic.table_id=st.table_id
JOIN SYSTABCOL sc ON (sc.column_id = sic.column_id AND sc.table_id = sic.table_id)
WHERE st.table_id=%s AND sic.index_id=%s
ORDER BY sic.sequence""" % (table_id, index_id)
            idx_cols_cursor = cls.get_connection(connection).cursor()
            for column_name, order in idx_cols_cursor.execute(idx_cols_query):
                column = find_object_with_name(table.columns, column_name)
                if column:
                    index_column = grt.classes.db_IndexColumn()
                    index_column.name = index_name + '.' + column_name
                    index_column.referencedColumn = column
                    index_column.descend = 1 if order and order.upper() == 'D' else 0
                    index.columns.append(index_column)
            table.addIndex(index)

            if index.isPrimary:
                table.primaryKey = index
        return 0
Beispiel #11
0
    def reverseEngineerTablePK(cls, connection, table):
        """Reverse engineers the primary key(s) for the given table."""

        schema = table.owner


        if len(table.columns) == 0:  # Table must have columns reverse engineered before we can rev eng its primary key(s)
            grt.send_error('Migration: reverseEngineerTablePK: Reverse engineering of table %s was attempted but the table has no columns attribute' % table.name)
            return 1
        
        # Primary keys and indices come together in the SYSIDX system view, so we'll rev eng them at once:
        query = """SELECT st.table_id, si.index_id, si.index_name, si.index_category, si."unique"
FROM SYSIDX si
JOIN SYSTAB st ON si.table_id=st.table_id
JOIN SYSUSER su ON st.creator=su.user_id
WHERE st.table_name='%s' AND su.user_name='%s'
ORDER BY si.index_id""" % (table.name, schema.name)
        idx_cursor = cls.get_connection(connection).cursor()
        for table_id, index_id, index_name, index_category, index_unique in idx_cursor.execute(query):
            index = grt.classes.db_Index()
            index.name = index_name
            index.isPrimary = 1 if index_category == 1 else 0
            index.unique = 1 if index_unique in (1, 2) else 0
            if index_category == 1:
                index.indexType = 'PRIMARY'
            elif index_category == 2:
                continue  # This is a foreign key, will be handled when reverse engineering them
            elif index_category == 3:  # Can be a regular index or a unique constraint
                if index_unique == 2:
                    index.indexType = 'UNIQUE'
                else:
                    index.indexType = 'INDEX'
            else:
                index.indexType = 'FULLTEXT'
            
    #        index.hasFilter = False  # TODO: Find out if there's a way to determine this

            # Get the columns for the index:
            idx_cols_query = """SELECT sc.column_name, sic."order"
FROM SYSIDXCOL sic
JOIN SYSTAB st ON sic.table_id=st.table_id
JOIN SYSTABCOL sc ON (sc.column_id = sic.column_id AND sc.table_id = sic.table_id)
WHERE st.table_id=%s AND sic.index_id=%s
ORDER BY sic.sequence""" % (table_id, index_id)
            idx_cols_cursor = cls.get_connection(connection).cursor()
            for column_name, order in idx_cols_cursor.execute(idx_cols_query):
                column = find_object_with_name(table.columns, column_name)
                if column:
                    index_column = grt.classes.db_IndexColumn()
                    index_column.name = index_name + '.' + column_name
                    index_column.referencedColumn = column
                    index_column.descend = 1 if order and order.upper() == 'D' else 0
                    index.columns.append(index_column)
            table.addIndex(index)

            if index.isPrimary:
                table.primaryKey = index
        return 0
    def reverseEngineerTablePK(cls, connection, table):
        """Reverse engineers the primary key(s) for the given table."""

        schema = table.owner
        catalog = schema.owner

        query = """SELECT tc.CONSTRAINT_NAME, kcu.COLUMN_NAME
    FROM INFORMATION_SCHEMA.TABLE_CONSTRAINTS AS tc
      JOIN INFORMATION_SCHEMA.KEY_COLUMN_USAGE AS kcu
        ON kcu.CONSTRAINT_SCHEMA = tc.CONSTRAINT_SCHEMA
       AND kcu.CONSTRAINT_NAME = tc.CONSTRAINT_NAME
       AND kcu.TABLE_SCHEMA = tc.TABLE_SCHEMA
       AND kcu.TABLE_NAME = tc.TABLE_NAME
    WHERE tc.CONSTRAINT_TYPE='PRIMARY KEY' AND tc.TABLE_CATALOG = '%s' AND tc.TABLE_SCHEMA = '%s' AND tc.TABLE_NAME = '%s'
    ORDER BY tc.CONSTRAINT_NAME, kcu.ORDINAL_POSITION""" % (
            catalog.name,
            schema.name,
            table.name,
        )

        if (
            len(table.columns) == 0
        ):  # Table must have columns reverse engineered before we can rev eng its primary key(s)
            grt.send_error(
                "%s reverseEngineerTablePK: Reverse engineer of table %s was attempted but the table has "
                "no columns attribute" % (cls.getTargetDBMSName(), table.name)
            )
            return 1

        fk_rows = cls.execute_query(connection, query).fetchall()
        if fk_rows:
            index = grt.classes.db_Index()
            index.name = fk_rows[0][0]
            index.isPrimary = 1
            index.unique = 1
            index.indexType = "PRIMARY"

            for _, pk_col in fk_rows:
                table_column = find_object_with_name(table.columns, pk_col)
                if not table_column:
                    grt.send_warning(
                        '%s reverseEngineerTablePK: Could not find column "%s" in table "%s" referenced '
                        'by primary key constraint "%s". The primary key will not be added.'
                        % (cls.getTargetDBMSName(), pk_col, table.name, index.name)
                    )
                    return 0

                index_column = grt.classes.db_IndexColumn()
                index_column.name = index.name + "." + pk_col
                index_column.referencedColumn = table_column

                index.columns.append(index_column)

            table.primaryKey = index
            table.addIndex(index)
        return 0
    def reverseEngineerTableIndices(cls, connection, table):
        schema = table.owner
        
        if len(table.columns) == 0:
            grt.send_error('%s: reverseEngineerTableIndices', 
                'Reverse engineer of table %s.%s was attempted but the table has no columns attribute' % (cls.getTargetDBMSName(), schema.name, table.name) )
            return 1    # Table must have columns reverse engineered before we can rev eng its indices

        all_indices_query = """SELECT c2.relname, i.indisunique::int, i.indisclustered::int, i.indnatts, i.indkey
FROM pg_catalog.pg_class c, pg_catalog.pg_class c2, pg_catalog.pg_namespace n, pg_catalog.pg_index i
WHERE c.oid = i.indrelid AND i.indexrelid = c2.oid AND c.relnamespace = n.oid AND 
n.nspname = '%s' AND c.relname = '%s' AND i.indisprimary = False 
ORDER BY c2.relname""" % (schema.name, table.name)

        index_columns_query = """SELECT a.attname
FROM unnest(ARRAY%r) attrid
JOIN pg_catalog.pg_attribute a ON attrid=a.attnum
JOIN pg_catalog.pg_class c ON c.oid = a.attrelid
JOIN pg_catalog.pg_namespace n ON c.relnamespace = n.oid
WHERE n.nspname = '%s' AND c.relname = '%s'"""

        index_rows = cls.execute_query(connection, all_indices_query).fetchall()
        for index_name, is_unique, is_clustered, column_count, column_refs in index_rows:
            index = grt.classes.db_Index()
            index.name = index_name
            index.isPrimary = 0
            index.unique = is_unique
            index.indexType = ('UNIQUE' if is_unique else 'INDEX')
            #index.clustered = is_clustered

            # Get the columns for the index:
            cols = [ int(col) for col in column_refs.split() ]
            if column_count != len(cols):
                grt.send_warning('%s: reverseEngineerTableIndices' % cls.getTargetDBMSName(), 
                    'Reverse engineer of index %s.%s was attempted but the referenced columns count differs '
                    'from the number of its referenced columns. Skipping index!' % (schema.name, index_name) )
                continue

            for (column_name, ) in cls.execute_query(connection, index_columns_query % (cols, schema.name, table.name)):
                column = find_object_with_name(table.columns, column_name)
                if column:
                    index_column = grt.classes.db_IndexColumn()
                    index_column.name = index_name + '.' + column_name
                    #index_column.descend = is_descending_key
                    index_column.referencedColumn = column
                    index.columns.append(index_column)
                else:
                    grt.send_warning('%s: reverseEngineerTableIndices' % cls.getTargetDBMSName(), 
                        'Reverse engineer of index %s.%s was attempted but the referenced column %s '
                        'could not be found on table %s. Skipping index!' % (schema.name, index_name, column_name, table.name) )
                    continue

            table.addIndex(index)
        return 0
    def reverseEngineerTableIndices(cls, connection, table):
        schema = table.owner
        
        if len(table.columns) == 0:
            grt.send_error('%s: reverseEngineerTableIndices', 
                'Reverse engineer of table %s.%s was attempted but the table has no columns attribute' % (cls.getTargetDBMSName(), schema.name, table.name) )
            return 1    # Table must have columns reverse engineered before we can rev eng its indices

        all_indices_query = """SELECT c2.relname, i.indisunique::int, i.indisclustered::int, i.indnatts, i.indkey
FROM pg_catalog.pg_class c, pg_catalog.pg_class c2, pg_catalog.pg_namespace n, pg_catalog.pg_index i
WHERE c.oid = i.indrelid AND i.indexrelid = c2.oid AND c.relnamespace = n.oid AND 
n.nspname = '%s' AND c.relname = '%s' AND i.indisprimary = False 
ORDER BY c2.relname""" % (schema.name, table.name)

        index_columns_query = """SELECT a.attname
FROM unnest(ARRAY%r) attrid
JOIN pg_catalog.pg_attribute a ON attrid=a.attnum
JOIN pg_catalog.pg_class c ON c.oid = a.attrelid
JOIN pg_catalog.pg_namespace n ON c.relnamespace = n.oid
WHERE n.nspname = '%s' AND c.relname = '%s'"""

        index_rows = cls.execute_query(connection, all_indices_query).fetchall()
        for index_name, is_unique, is_clustered, column_count, column_refs in index_rows:
            index = grt.classes.db_Index()
            index.name = index_name
            index.isPrimary = 0
            index.unique = is_unique
            index.indexType = ('UNIQUE' if is_unique else 'INDEX')
            #index.clustered = is_clustered

            # Get the columns for the index:
            cols = [ int(col) for col in column_refs.split() ]
            if column_count != len(cols):
                grt.send_warning('%s: reverseEngineerTableIndices' % cls.getTargetDBMSName(), 
                    'Reverse engineer of index %s.%s was attempted but the referenced columns count differs '
                    'from the number of its referenced columns. Skipping index!' % (schema.name, index_name) )
                continue

            for (column_name, ) in cls.execute_query(connection, index_columns_query % (cols, schema.name, table.name)):
                column = find_object_with_name(table.columns, column_name)
                if column:
                    index_column = grt.classes.db_IndexColumn()
                    index_column.name = index_name + '.' + column_name
                    #index_column.descend = is_descending_key
                    index_column.referencedColumn = column
                    index.columns.append(index_column)
                else:
                    grt.send_warning('%s: reverseEngineerTableIndices' % cls.getTargetDBMSName(), 
                        'Reverse engineer of index %s.%s was attempted but the referenced column %s '
                        'could not be found on table %s. Skipping index!' % (schema.name, index_name, column_name, table.name) )
                    continue

            table.addIndex(index)
        return 0
    def reverseEngineerTablePK(cls, connection, table):
        """Reverse engineers the primary key for the given table."""

        schema = table.owner
        catalog = schema.owner


        if len(table.columns) == 0:  # Table must have columns reverse engineered before we can rev eng its primary key
            grt.send_error('Migration: reverseEngineerTablePKAndIndices: Reverse engineer of table %s was attempted but the table has no columns attribute' % table.name)
            return 1
        
        # Find the index name associated with the PK:
        pk_index_rows = cls.get_connection(connection).cursor().primaryKeys(catalog=catalog.name, schema=schema.name, table=table.name).fetchall()
        pk_index_name = pk_index_rows[0].pk_name if pk_index_rows else ''

        indices_dict = {}  # Map the indices names to their respective columns:
        for row in cls.get_connection(connection).cursor().statistics(catalog=catalog.name, schema=schema.name, table=table.name):
            if row.type == constant.SQL_TABLE_STAT:  # this entry is not an index
                continue
            indices_dict.setdefault(row.index_name, []).append(row)

        if pk_index_name not in indices_dict:  # The primary key is not listed in cursor().statistics
            from collections import namedtuple
            Row = namedtuple('IndexRow', ['table_cat', 'table_schem', 'table_name', 'non_unique', 'index_qualifier',
                                          'index_name', 'type', 'ordinal_position', 'column_name', 'asc_or_desc',
                                          'cardinality', 'pages', 'filter_condition'])
            for pk_index_row in pk_index_rows:
                row = Row(None, schema.name, table.name, 0, None, pk_index_name, 1, 1, pk_index_row.column_name, 'A', None, None, None)
                indices_dict.setdefault(pk_index_name, []).append(row)
        
        for index_name, row_list in list(indices_dict.items()):
            index = grt.classes.db_Index()
            index.name = index_name
            index.isPrimary = 1 if index_name == pk_index_name else 0
            index.unique = not row_list[0].non_unique
            index.indexType = 'UNIQUE' if index.unique else 'INDEX'
    #        index.hasFilter = False  # TODO: Find out if there's a way to determine this

            # Get the columns for the index:
            for row in sorted(row_list, key=lambda elem: elem[7]):  # Sorted by ordinal_position
                column = find_object_with_name(table.columns, row.column_name)
                if column:
                    index_column = grt.classes.db_IndexColumn()
                    index_column.name = index_name + '.' + row.column_name
                    index_column.referencedColumn = column
                    index.columns.append(index_column)
            table.addIndex(index)

            if index.isPrimary:
                table.primaryKey = index

        return 0
    def reverseEngineerTablePK(cls, connection, table):
        """Reverse engineers the primary key(s) for the given table."""

        schema = table.owner
        catalog = schema.owner


        if len(table.columns) == 0:  # Table must have columns reverse engineered before we can rev eng its primary key(s)
            grt.send_error('Migration: reverseEngineerTablePKAndIndices: Reverse engineer of table %s was attempted but the table has no columns attribute' % table.name)
            return 1
        
        # Find the index name associated with the PK:
        pk_index_rows = cls.get_connection(connection).cursor().primaryKeys(catalog=catalog.name, schema=schema.name, table=table.name).fetchall()
        pk_index_name = pk_index_rows[0].pk_name if pk_index_rows else ''

        indices_dict = {}  # Map the indices names to their respective columns:
        for row in cls.get_connection(connection).cursor().statistics(catalog=catalog.name, schema=schema.name, table=table.name):
            if row.type == constant.SQL_TABLE_STAT:  # this entry is not an index
                continue
            indices_dict.setdefault(row.index_name, []).append(row)

        if pk_index_name not in indices_dict:  # The primary key is not listed in cursor().statistics
            from collections import namedtuple
            Row = namedtuple('IndexRow', ['table_cat', 'table_schem', 'table_name', 'non_unique', 'index_qualifier',
                                          'index_name', 'type', 'ordinal_position', 'column_name', 'asc_or_desc',
                                          'cardinality', 'pages', 'filter_condition'])
            for pk_index_row in pk_index_rows:
                row = Row(None, schema.name, table.name, 0, None, pk_index_name, 1, 1, pk_index_row.column_name, 'A', None, None, None)
                indices_dict.setdefault(pk_index_name, []).append(row)
        
        for index_name, row_list in indices_dict.iteritems():
            index = grt.classes.db_Index()
            index.name = index_name
            index.isPrimary = 1 if index_name == pk_index_name else 0
            index.unique = not row_list[0].non_unique
            index.indexType = 'UNIQUE' if index.unique else 'INDEX'
    #        index.hasFilter = False  # TODO: Find out if there's a way to determine this

            # Get the columns for the index:
            for row in sorted(row_list, key=lambda elem: elem[7]):  # Sorted by ordinal_position
                column = find_object_with_name(table.columns, row.column_name)
                if column:
                    index_column = grt.classes.db_IndexColumn()
                    index_column.name = index_name + '.' + row.column_name
                    index_column.referencedColumn = column
                    index.columns.append(index_column)
            table.addIndex(index)

            if index.isPrimary:
                table.primaryKey = index

        return 0
    def reverseEngineerTablePK(cls, connection, table):
        """Reverse engineers the primary key(s) for the given table."""

        schema = table.owner
        catalog = schema.owner

        query = """SELECT tc.CONSTRAINT_NAME, kcu.COLUMN_NAME
    FROM INFORMATION_SCHEMA.TABLE_CONSTRAINTS AS tc
      JOIN INFORMATION_SCHEMA.KEY_COLUMN_USAGE AS kcu
        ON kcu.CONSTRAINT_SCHEMA = tc.CONSTRAINT_SCHEMA
       AND kcu.CONSTRAINT_NAME = tc.CONSTRAINT_NAME
       AND kcu.TABLE_SCHEMA = tc.TABLE_SCHEMA
       AND kcu.TABLE_NAME = tc.TABLE_NAME
    WHERE tc.CONSTRAINT_TYPE='PRIMARY KEY' AND tc.TABLE_CATALOG = '%s' AND tc.TABLE_SCHEMA = '%s' AND tc.TABLE_NAME = '%s'
    ORDER BY tc.CONSTRAINT_NAME, kcu.ORDINAL_POSITION""" % (
            catalog.name, schema.name, table.name)

        if len(
                table.columns
        ) == 0:  # Table must have columns reverse engineered before we can rev eng its primary key(s)
            grt.send_error(
                '%s reverseEngineerTablePK: Reverse engineer of table %s was attempted but the table has '
                'no columns attribute' % (cls.getTargetDBMSName(), table.name))
            return 1

        fk_rows = cls.execute_query(connection, query).fetchall()
        if fk_rows:
            index = grt.classes.db_Index()
            index.name = fk_rows[0][0]
            index.isPrimary = 1
            index.unique = 1
            index.indexType = 'PRIMARY'

            for _, pk_col in fk_rows:
                table_column = find_object_with_name(table.columns, pk_col)
                if not table_column:
                    grt.send_warning(
                        '%s reverseEngineerTablePK: Could not find column "%s" in table "%s" referenced '
                        'by primary key constraint "%s". The primary key will not be added.'
                        % (cls.getTargetDBMSName(), pk_col, table.name,
                           index.name))
                    return 0

                index_column = grt.classes.db_IndexColumn()
                index_column.name = index.name + '.' + pk_col
                index_column.referencedColumn = table_column

                index.columns.append(index_column)

            table.primaryKey = index
            table.addIndex(index)
        return 0
    def reverseEngineerTablePK(cls, connection, table):
        """Reverse engineers the primary key(s) for the given table."""

        if len(table.columns) == 0:  # Table must have columns reverse engineered before we can rev eng its primary key(s)
            grt.send_error('Migration: reverseEngineerTablePKAndIndices: Reverse engineer of table %s was attempted but the table has no columns attribute' % table.name)
            return 1
        
        pk_index_name = 'PrimaryKey'

        indices_dict = {}  # Map the indices names to their respective columns:
        for row in cls.get_connection(connection).cursor().statistics(table=table.name):
            if row.type == constant.SQL_TABLE_STAT:  # this entry is not an index
                continue
            indices_dict.setdefault(row.index_name, []).append(row)

        for index_name, row_list in indices_dict.iteritems():
            index = grt.classes.db_Index()
            index.name = index_name
            index.isPrimary = 1 if index_name == pk_index_name else 0
            index.unique = not row_list[0].non_unique
            index.indexType = 'UNIQUE' if index.unique else 'INDEX'
    #        index.hasFilter = False  # TODO: Find out if there's a way to determine this

            skip = False
            # Get the columns for the index:
            for row in sorted(row_list, key=lambda elem: elem[7]):  # Sorted by ordinal_position
                column = find_object_with_name(table.columns, row.column_name)
                if column:
                    # skip indexes on LONGCHAR columns
                    if column.simpleType.name in ["LONGCHAR"]:
                        grt.send_warning("Migration: reverseEngineerTable: Skipping index %s.%s on a %s column\n" % (table.name, column.name, column.simpleType.name)) 
                        skip = True
                    else:
                        index_column = grt.classes.db_IndexColumn()
                        index_column.name = index_name + '.' + row.column_name
                        index_column.referencedColumn = column
                        index.columns.append(index_column)
                        if not column.isNotNull and index.isPrimary:
                            column.isNotNull = 1
                            grt.send_warning("Migration: reverseEngineerTablePK: column %s.%s was changed to NOT NULL because it's a Primary Key column\n" % (column.owner.name, column.name))
                else:
                    grt.send_warning("Migration: reverseEngineerTablePK: could not find column %s, belonging to key %s. Key will be skipped\n" % (row.column_name, index_name))
                    skip = True
            if not skip:
                table.addIndex(index)
                if index.isPrimary:
                    table.primaryKey = index

        return 0
Beispiel #19
0
    def reverseEngineerTablePK(cls, connection, table):
        """Reverse engineers the primary key for the given table."""

        if len(table.columns) == 0:  # Table must have columns reverse engineered before we can rev eng its primary key
            grt.send_error('Migration: reverseEngineerTablePKAndIndices: Reverse engineer of table %s was attempted but the table has no columns attribute' % table.name)
            return 1
        
        pk_index_name = 'PrimaryKey'

        indices_dict = {}  # Map the indices names to their respective columns:
        for row in cls.get_connection(connection).cursor().statistics(table=table.name):
            if row.type == constant.SQL_TABLE_STAT:  # this entry is not an index
                continue
            indices_dict.setdefault(row.index_name, []).append(row)

        for index_name, row_list in list(indices_dict.items()):
            index = grt.classes.db_Index()
            index.name = index_name
            index.isPrimary = 1 if index_name == pk_index_name else 0
            index.unique = not row_list[0].non_unique
            index.indexType = 'UNIQUE' if index.unique else 'INDEX'
    #        index.hasFilter = False  # TODO: Find out if there's a way to determine this

            skip = False
            # Get the columns for the index:
            for row in sorted(row_list, key=lambda elem: elem[7]):  # Sorted by ordinal_position
                column = find_object_with_name(table.columns, row.column_name)
                if column:
                    # skip indexes on LONGCHAR columns
                    if column.simpleType.name in ["LONGCHAR"]:
                        grt.send_warning("Migration: reverseEngineerTable: Skipping index %s.%s on a %s column\n" % (table.name, column.name, column.simpleType.name)) 
                        skip = True
                    else:
                        index_column = grt.classes.db_IndexColumn()
                        index_column.name = index_name + '.' + row.column_name
                        index_column.referencedColumn = column
                        index.columns.append(index_column)
                        if not column.isNotNull and index.isPrimary:
                            column.isNotNull = 1
                            grt.send_warning("Migration: reverseEngineerTablePK: column %s.%s was changed to NOT NULL because it's a Primary Key column\n" % (column.owner.name, column.name))
                else:
                    grt.send_warning("Migration: reverseEngineerTablePK: could not find column %s, belonging to key %s. Key will be skipped\n" % (row.column_name, index_name))
                    skip = True
            if not skip:
                table.addIndex(index)
                if index.isPrimary:
                    table.primaryKey = index

        return 0
Beispiel #20
0
def reverseEngineerTriggers(connection, schema):
    execute_query(connection, 'USE %s' % schema.owner.name)  # catalog

    tables_with_triggers_query = """SELECT name, deltrig, instrig, updtrig
FROM sysobjects
WHERE uid = USER_ID(?) AND type='U'
AND(deltrig != 0 OR instrig != 0 OR updtrig != 0)"""

    trigger_details_query = """SELECT so.name AS trigger_name, sc.id AS trigger_id, sc.text AS trigger_definition
FROM syscomments sc JOIN sysobjects so ON sc.id=so.id
WHERE sc.id IN (%s)
ORDER BY so.name, sc.colid"""

    triggers = {}
    for row in execute_query(connection, tables_with_triggers_query, schema.name):
        if row[1] != 0:
            triggers.setdefault(row[1], [row[0], ''])[1] += ';DELETE'
        if row[2] != 0:
            triggers.setdefault(row[2], [row[0], ''])[1] += ';INSERT'
        if row[3] != 0:
            triggers.setdefault(row[3], [row[0], ''])[1] += ';UPDATE'

    step = 1.0 / (len(getTriggerNames(connection, schema.owner.name, schema.name)) + 1e-10)
    all_triggers = execute_query(connection, trigger_details_query % ', '.join(str(trig_id) for trig_id in triggers)) if triggers else None
    trigger_name2id = {}
    def register_trigger_name(row):
        trigger_name2id[row[0]] = row[1]
    if all_triggers:
        for idx, trigger_name, trigger_definition in join_multiline_content('trigger_name', 'trigger_definition',
                                                                            all_triggers, register_trigger_name):
            grt.send_progress(idx * step, 'Reverse engineering trigger %s.%s' % (schema.name, trigger_name))
            trigger = grt.classes.db_sybase_Trigger()
            trigger.name = trigger_name or ''
            trigger.sqlDefinition = trigger_definition
            trigger.timing = 'AFTER'  # All Sybase ASE triggers are fired after the data is changed
#            trigger.orientation = 'ROW'  # TODO: This needs extra analysis
            trigger.enabled = 1  # TODO: Figure out how to tell the actual value
            trigger_table, trigger_events = triggers[trigger_name2id[trigger_name]]
            trigger.event = trigger_events.strip(';')  # It would take values as 'INSERT;UPDATE'
            trigger.owner = find_object_with_name(schema.tables, trigger_table)

            if trigger.owner:
                trigger.owner.triggers.append(trigger)
            else:
                grt.send_warning('Sybase reverseEngineerTriggers', 'Parent table not found for trigger "%s"' % trigger.name)

    grt.send_progress(1.0, 'Finished reverse engineering of triggers for the %s schema.' % schema.name)
    return 0
def reverseEngineerTriggers(connection, schema):
    execute_query(connection, 'USE %s' % schema.owner.name)  # catalog

    tables_with_triggers_query = """SELECT name, deltrig, instrig, updtrig
FROM sysobjects
WHERE uid = USER_ID(?) AND type='U'
AND(deltrig != 0 OR instrig != 0 OR updtrig != 0)"""

    trigger_details_query = """SELECT so.name AS trigger_name, sc.id AS trigger_id, sc.text AS trigger_definition
FROM syscomments sc JOIN sysobjects so ON sc.id=so.id
WHERE sc.id IN (%s)
ORDER BY so.name, sc.colid"""

    triggers = {}
    for row in execute_query(connection, tables_with_triggers_query, schema.name):
        if row[1] != 0:
            triggers.setdefault(row[1], [row[0], ''])[1] += ';DELETE'
        if row[2] != 0:
            triggers.setdefault(row[2], [row[0], ''])[1] += ';INSERT'
        if row[3] != 0:
            triggers.setdefault(row[3], [row[0], ''])[1] += ';UPDATE'

    step = 1.0 / (len(getTriggerNames(connection, schema.owner.name, schema.name)) + 1e-10)
    all_triggers = execute_query(connection, trigger_details_query % ', '.join(str(trig_id) for trig_id in triggers)) if triggers else None
    trigger_name2id = {}
    def register_trigger_name(row):
        trigger_name2id[row[0]] = row[1]
    if all_triggers:
        for idx, trigger_name, trigger_definition in join_multiline_content('trigger_name', 'trigger_definition',
                                                                            all_triggers, register_trigger_name):
            grt.send_progress(idx * step, 'Reverse engineering trigger %s.%s' % (schema.name, trigger_name))
            trigger = grt.classes.db_sybase_Trigger()
            trigger.name = trigger_name or ''
            trigger.sqlDefinition = trigger_definition
            trigger.timing = 'AFTER'  # All Sybase ASE triggers are fired after the data is changed
#            trigger.orientation = 'ROW'  # TODO: This needs extra analysis
            trigger.enabled = 1  # TODO: Figure out how to tell the actual value
            trigger_table, trigger_events = triggers[trigger_name2id[trigger_name]]
            trigger.event = trigger_events.strip(';')  # It would take values as 'INSERT;UPDATE'
            trigger.owner = find_object_with_name(schema.tables, trigger_table)

            if trigger.owner:
                trigger.owner.triggers.append(trigger)
            else:
                grt.send_warning('Sybase reverseEngineerTriggers', 'Parent table not found for trigger "%s"' % trigger.name)

    grt.send_progress(1.0, 'Finished reverse engineering of triggers for the %s schema.' % schema.name)
    return 0
Beispiel #22
0
    def reverseEngineerTablePK(cls, connection, table):
        """Reverse engineers the primary key(s) for the given table."""

        schema = table.owner
        catalog = schema.owner


        if len(table.columns) == 0:  # Table must have columns reverse engineered before we can rev eng its primary key(s)
            grt.send_error('Migration: reverseEngineerTablePKAndIndices: Reverse engineer of table %s was attempted but the table has no columns attribute' % table.name)
            return 1
        
        # Find the index name associated with the PK:
        pk_index_row = cls.get_connection(connection).cursor().primaryKeys(catalog=catalog.name, schema=schema.name, table=table.name).fetchone()
        pk_index_name = pk_index_row.pk_name if pk_index_row else ''

        indices_dict = {}  # Map the indices names to their respective columns:
        for row in cls.get_connection(connection).cursor().statistics(catalog=catalog.name, schema=schema.name, table=table.name):
            if row.type == constant.SQL_TABLE_STAT:  # this entry is not an index
                continue
            indices_dict.setdefault(row.index_name, []).append(row)
        
        for index_name, row_list in indices_dict.iteritems():
            index = grt.classes.db_Index()
            index.name = index_name
            index.isPrimary = 1 if index_name == pk_index_name else 0
            index.unique = not row_list[0].non_unique
            index.indexType = 'UNIQUE' if index.unique else 'INDEX'
    #        index.hasFilter = False  # TODO: Find out if there's a way to determine this

            # Get the columns for the index:
            for row in sorted(row_list, key=lambda elem: elem[7]):  # Sorted by ordinal_position
                column = find_object_with_name(table.columns, row.column_name)
                if column:
                    index_column = grt.classes.db_IndexColumn()
                    index_column.name = index_name + '.' + row.column_name
                    index_column.referencedColumn = column
                    index.columns.append(index_column)
            table.addIndex(index)

            if index.isPrimary:
                table.primaryKey = index

        return 0
def reverseEngineerTablePK(connection, table):
    """Reverse engineers the primary key(s) for the given table."""

    schema = table.owner
    catalog = schema.owner
    execute_query(connection, 'USE %s' % catalog.name)
    query ="""SELECT sc.name
FROM sysobjects so JOIN syskeys sk ON so.id=sk.id
     JOIN syscolumns sc ON sc.id=sk.id AND sc.colid IN (sk.key1, sk.key2, sk.key3, sk.key4, sk.key5, sk.key6, sk.key7, sk.key8)
WHERE so.uid=USER_ID(?) AND sk.id=OBJECT_ID(?) AND sk.type=1"""

    if len(table.columns) == 0:
        grt.send_error('Sybase reverseEngineerTablePK', "Reverse engineer of table's %s.%s primary key was attempted but the table has no columns attribute" % (schema.name, table.name) )
        return 1    # Table must have columns reverse engineered before we can rev eng its primary key(s)

    pk_col_names = [ row[0] for row in execute_query(connection, query, schema.name, table.name) ]

    for pk_column in pk_col_names:
        column = find_object_with_name(table.columns, pk_column)
        if column:
            table.addPrimaryKeyColumn(column)
    return 0
    def migrateColumnDefaultValue(self, state, default_value, source_column, target_column):
        source_table = source_column.owner
        source_schema = source_table.owner

        target_catalog = target_column.owner.owner.owner
        target_default_value = default_value

        def raise_on_no_match(re_str, target):
            if re.match(re_str, target) is None:
                raise ValueError('"%s" does not match the regular expression "%s"' % (target, re_str))
            return True

        value_validators = [
            (["SMALLINT", "INT", "BIGINT"], int),
            (["NUMERIC", "DECIMAL", "FLOAT", "REAL", "DOUBLE PRECISION"], float),
            (["CHAR", "VARCHAR", "NCHAR", "NVARCHAR", "BLOB", "CLOB", "XML"], lambda _: True),
            (["BIT", "BIT VARYING"], lambda val: raise_on_no_match(r"[Bb]?'?[10]+'?", val)),  # E.g. B'101001'
            (["DATE"], lambda val: raise_on_no_match(r"(\d{4}|\d{2})-\d{1,2}-\d{1,2}", val)),
            (["TIME"], lambda val: raise_on_no_match(r"(\d{1,2} )?\d{1,2}:\d{0,2}:\d{0,2}", val)),
            (
                ["TIMESTAMP"],
                lambda val: raise_on_no_match(
                    r"((\d{4}|\d{2})-\d{1,2}-\d{1,2}( (\d{1,2} )?\d{1,2}:\d{0,2}:\d{0,2})?|NULL|NOW\(\))", val.upper()
                ),
            ),
            (["BOOLEAN"], lambda val: raise_on_no_match(r"(TRUE|FALSE|NULL)", val.upper())),
        ]

        seq_regex = re.compile(r"nextval[(]'(.*?)'::regclass[)]")

        source_datatype = None
        if source_column.simpleType:
            source_datatype = source_column.simpleType.name
            if default_value:
                match = seq_regex.match(default_value)
                if match and target_column.simpleType and target_column.simpleType.group.name == "numeric":
                    seq = find_object_with_name(source_schema.sequences, match.group(1))
                    if seq:
                        pk_cols = (
                            set(column.referencedColumn.name for column in source_table.primaryKey.columns)
                            if source_table.primaryKey
                            else set()
                        )
                        if source_column.name in pk_cols:  # AUTO_INCREMENT can only be used on columns in a key
                            try:
                                start = int(seq.startValue)
                            except ValueError:
                                pass
                            else:
                                if start > 1:
                                    target_column.owner.nextAutoInc = seq.startValue
                            target_column.autoIncrement = 1
                        return ""

                if target_column.simpleType.group.name == "numeric":
                    default_value = default_value.strip("' ")

        if default_value:
            if "::" in default_value:
                default_value = default_value.split("::", 1)[0]

            target_default_value = default_value

            for value_validator in value_validators:
                if source_datatype in value_validator[0]:
                    try:
                        value_validator[1](default_value)
                    except Exception:
                        target_default_value = ""
                        state.addMigrationLogEntry(
                            1,
                            source_column,
                            target_column,
                            "Default value %s is not supported. Removed!" % default_value,
                        )
                    else:
                        target_datatype = target_column.simpleType and target_column.simpleType.name or ""
                        if source_datatype == "BOOLEAN":
                            val = default_value.upper()
                            if val == "TRUE":
                                target_default_value = "1"
                            elif val == "FALSE":
                                target_default_value = "0"
                        elif source_datatype == "TIMESTAMP" and default_value.upper() == "NOW()":
                            if (
                                target_catalog.version.majorNumber,
                                target_catalog.version.minorNumber,
                                target_catalog.version.releaseNumber,
                            ) < (5, 6, 5):
                                if target_datatype == "TIMESTAMP":
                                    target_default_value = (
                                        "CURRENT_TIMESTAMP"
                                    )  # now() => CURRENT_TIMESTAMP for TIMESTAMP columns in server v<5.6.5
                                else:
                                    target_default_value = ""
                                    state.addMigrationLogEntry(
                                        1,
                                        source_column,
                                        target_column,
                                        'Default value now() is not supported for a MySQL column of type "%s".Removed!'
                                        % target_datatype,
                                    )
                            else:  # Server version from 5.6.5 and newer
                                target_default_value = (
                                    "CURRENT_TIMESTAMP"
                                )  # CURRENT_TIMESTAMP freely allowed for DATETIME & TIMESTAMP columns

        return target_default_value
def reverseEngineerTableFKs(connection, table):
    """Reverse engineers the foreign keys for the given table."""

    schema = table.owner
    catalog = schema.owner

    execute_query(connection, 'USE %s' % catalog.name)

    query = """SELECT so.name, USER_NAME(so.uid),
COL_NAME(sk.id, key1),
COL_NAME(sk.id, key2),
COL_NAME(sk.id, key3),
COL_NAME(sk.id, key4),
COL_NAME(sk.id, key5),
COL_NAME(sk.id, key6),
COL_NAME(sk.id, key7),
COL_NAME(sk.id, key8),
COL_NAME(sk.depid, depkey1),
COL_NAME(sk.depid, depkey2),
COL_NAME(sk.depid, depkey3),
COL_NAME(sk.depid, depkey4),
COL_NAME(sk.depid, depkey5),
COL_NAME(sk.depid, depkey6),
COL_NAME(sk.depid, depkey7),
COL_NAME(sk.depid, depkey8)
FROM syskeys sk JOIN sysobjects so ON sk.depid = so.id
WHERE sk.type = 2 AND sk.id = OBJECT_ID('%s.%s')""" % (schema.name, table.name)

    if len(table.columns) == 0:
        grt.send_error('Sybase reverseEngineerTableFKs', 'Reverse engineer of foreign keys for table %s.%s was attempted but the table has no columns attribute' % (schema.name, table.name) )
        return 1    # Table must have columns reverse engineered before we can rev eng its foreign keys

    table.foreignKeys.remove_all()
    for row in execute_query(connection, query):
        fk_columns = [col_name for col_name in row[2:10] if col_name]
        fk_ref_columns = [col_name for col_name in row[10:] if col_name]
        foreign_key = grt.classes.db_sybase_ForeignKey()
        foreign_key.name = '%s_%s_%s_fk' % (schema.name, table.name, '_'.join(fk_columns))
        foreign_key.owner = table
        foreign_key.deleteRule = foreign_key.updateRule = 'RESTRICT'
        foreign_key.modelOnly = 0
        referenced_schema = find_object_with_name(catalog.schemata, row[1])
        if not referenced_schema:
            grt.send_error('Sybase reverseEngineerTableFKs', 'Could not find schema "%s" in catalog "%s"' %
                           (schema.name, catalog.name) )
            return 1

        foreign_key.referencedTable = find_object_with_name(referenced_schema.tables, row[0])
        if not foreign_key.referencedTable:
            grt.send_error('Sybase reverseEngineerTableFKs', 'Could not find referenced table "%s" in schema "%s"' %
                           (row[0], schema.name) )
            return 1

        for column_name, referenced_column_name in zip(fk_columns, fk_ref_columns):
            column = find_object_with_name(table.columns, column_name)
            if not column:
                grt.send_error('Sybase reverseEngineerTableFKs', 'Could not find column "%s" in table "%s.%s"' %
                               (column_name, schema.name, table.name) )
                return 1
            referenced_column = find_object_with_name(foreign_key.referencedTable.columns, referenced_column_name)
            if not referenced_column:
                grt.send_error('Sybase reverseEngineerTableFKs', 'Could not find column "%s" in table "%s.%s"' %
                               (referenced_column_name, referenced_schema.name, foreign_key.referencedTable.name) )
                return 1
            foreign_key.columns.append(column)
            foreign_key.referencedColumns.append(referenced_column)

        table.foreignKeys.append(foreign_key)
    return 0
def reverseEngineerTableIndices(connection, table):
    """Reverse engineers the indices for the given table."""

    schema = table.owner
    catalog = schema.owner
    
    if len(table.columns) == 0:
        grt.send_error('Sybase reverseEngineerTableIndices', 'Reverse engineer of table %s.%s was attempted but the table has no columns attribute' % (schema.name, table.name) )
        return 1    # Table must have columns reverse engineered before we can rev eng its indices

    execute_query(connection, 'USE %s' % catalog.name)

    query = """SELECT INDEX_NAME = A.name,
IS_CLUSTERED = CASE
                WHEN ((A.status&16) = 16 OR (A.status2&512) = 512) THEN 1
                ELSE 0
               END,
IS_PRIMARY = CASE
                WHEN ((A.status&0x800) = 0x800) THEN 1
                ELSE 0
             END,
IS_UNIQUE = CASE
                WHEN ((A.status&2) = 2) THEN 1
                ELSE 0
            END,
IGNORE_DUP = CASE
                WHEN ((A.status&4) = 4) THEN 1
                ELSE 0
             END,
INDEX_COL(USER_NAME(B.uid) + '.' + B.name, indid, 1),
INDEX_COL(USER_NAME(B.uid) + '.' + B.name, indid, 2),
INDEX_COL(USER_NAME(B.uid) + '.' + B.name, indid, 3),
INDEX_COL(USER_NAME(B.uid) + '.' + B.name, indid, 4),
INDEX_COL(USER_NAME(B.uid) + '.' + B.name, indid, 5),
INDEX_COL(USER_NAME(B.uid) + '.' + B.name, indid, 6),
INDEX_COL(USER_NAME(B.uid) + '.' + B.name, indid, 7),
INDEX_COL(USER_NAME(B.uid) + '.' + B.name, indid, 8),
INDEX_COL(USER_NAME(B.uid) + '.' + B.name, indid, 9),
INDEX_COL(USER_NAME(B.uid) + '.' + B.name, indid, 10),
INDEX_COL(USER_NAME(B.uid) + '.' + B.name, indid, 11),
INDEX_COL(USER_NAME(B.uid) + '.' + B.name, indid, 12),
INDEX_COL(USER_NAME(B.uid) + '.' + B.name, indid, 13),
INDEX_COL(USER_NAME(B.uid) + '.' + B.name, indid, 14),
INDEX_COL(USER_NAME(B.uid) + '.' + B.name, indid, 15),
INDEX_COL(USER_NAME(B.uid) + '.' + B.name, indid, 16),
INDEX_COL(USER_NAME(B.uid) + '.' + B.name, indid, 17),
INDEX_COL(USER_NAME(B.uid) + '.' + B.name, indid, 18),
INDEX_COL(USER_NAME(B.uid) + '.' + B.name, indid, 19),
INDEX_COL(USER_NAME(B.uid) + '.' + B.name, indid, 20),
INDEX_COL(USER_NAME(B.uid) + '.' + B.name, indid, 21),
INDEX_COL(USER_NAME(B.uid) + '.' + B.name, indid, 22),
INDEX_COL(USER_NAME(B.uid) + '.' + B.name, indid, 23),
INDEX_COL(USER_NAME(B.uid) + '.' + B.name, indid, 24),
INDEX_COL(USER_NAME(B.uid) + '.' + B.name, indid, 25),
INDEX_COL(USER_NAME(B.uid) + '.' + B.name, indid, 26),
INDEX_COL(USER_NAME(B.uid) + '.' + B.name, indid, 27),
INDEX_COL(USER_NAME(B.uid) + '.' + B.name, indid, 28),
INDEX_COL(USER_NAME(B.uid) + '.' + B.name, indid, 29),
INDEX_COL(USER_NAME(B.uid) + '.' + B.name, indid, 30),
INDEX_COL(USER_NAME(B.uid) + '.' + B.name, indid, 31)
FROM sysindexes A,  sysobjects B
WHERE A.indid > 0 AND A.indid < 255 AND A.status2 & 2 != 2 AND
B.id = A.id AND B.type = 'U' AND
USER_NAME(B.uid) = ? AND B.name=? ORDER BY 1, 2, 3"""

    for index_row in execute_query(connection, query, schema.name, table.name):
        index = grt.classes.db_sybase_Index()
        index.name = index_row[0]
        index.clustered = index_row[1]
        index.isPrimary = index_row[2]
        index.unique = index_row[3]
        index.indexType = 'UNIQUE' if index.unique else 'INDEX'
        index.ignoreDuplicateRows = index_row[4]
        table.addIndex(index)

        # Get the columns for the index:
        index_column_names = [colname for colname in index_row[5:] if colname is not None]
        for column_name in index_column_names:
            column = find_object_with_name(table.columns, column_name)
            if column:
                index_column = grt.classes.db_sybase_IndexColumn()
                index_column.name = index.name + '.' + column_name
                index_column.referencedColumn = column
                index.columns.append(index_column)
    return 0
def reverseEngineerUserDatatypes(connection, catalog):
    base_types = dict( (
            (34,'IMAGE'),
            (35,'TEXT'),
            (36,'EXTENDED TYPE'),
            (37,'TIMESTAMP'),
            (38,'INTN'),
            (39,'VARCHAR'),
            (45,'BINARY'),
            (47,'CHAR'),
            (48,'TINYINT'),
            (49,'DATE'),
            (50,'BIT'),
            (51,'TIME'),
            (52,'SMALLINT'),
            (55,'DECIMAL'),
            (56,'INT'),
            (58,'SMALLDATETIME'),
            (59,'REAL'),
            (60,'MONEY'),
            (61,'DATETIME'),
            (62,'FLOAT'),
            (63,'NUMERIC'),
            (65,'USMALLINT'),
            (66,'UINT'),
            (67,'UBIGINT'),
            (68,'UINTN'),
            (106,'DECIMALN'),
            (108,'NUMERICN'),
            (109,'FLOATN'),
            (110,'MONEYN'),
            (111,'DATETIMN'),
            (122,'SMALLMONEY'),
            (123,'DATEN'),
            (135,'UNICHAR'),
            (147,'TIMEN'),
            (155,'UNIVARCHAR'),
            (169,'TEXT_LOCATOR'),
            (170,'IMAGE_LOCATOR'),
            (171,'UNITEXT_LOCATOR'),
            (174,'UNITEXT'),
            (187,'BIGDATETIMEN'),
            (188,'BIGTIMEN'),
            (189,'BIGDATETIME'),
            (190,'BIGTIME'),
            (191,'BIGINT'),
        )  )   

    query = """SELECT name, length, prec, scale, allownulls, type
FROM systypes
WHERE accessrule != NULL"""

    execute_query(connection, 'USE %s' % quoteIdentifier(catalog.name))
    sybase_rdbms_instance = get_sybase_rdbms_instance()
    catalog.userDatatypes.remove_all()
    for name, length, precision, scale, is_nullable, base_type in execute_query(connection, query):
        datatype = grt.classes.db_sybase_UserDatatype()
        datatype.name = name.upper()
        datatype.characterMaximumLength = length
        datatype.numericPrecision = precision
        datatype.numericScale = scale
        datatype.isNullable = is_nullable
        simple_type = find_object_with_name(sybase_rdbms_instance.simpleDatatypes, base_types[base_type])
        if simple_type:
            datatype.actualType = simple_type
        else:
            grt.send_warning('Sybase reverseEngineerUserDatatypes', 'Could not found base type "%s" for user defined type "%s"' % (base_type, name) )
        catalog.userDatatypes.append(datatype)
    return 0
    def migrateColumnDefaultValue(self, state, default_value, source_column,
                                  target_column):
        source_table = source_column.owner
        source_schema = source_table.owner

        target_catalog = target_column.owner.owner.owner
        target_default_value = default_value

        def raise_on_no_match(re_str, target):
            if re.match(re_str, target) is None:
                raise ValueError(
                    '"%s" does not match the regular expression "%s"' %
                    (target, re_str))
            return True

        value_validators = [
            (['SMALLINT', 'INT', 'BIGINT'], int),
            (['NUMERIC', 'DECIMAL', 'FLOAT', 'REAL',
              'DOUBLE PRECISION'], float),
            (['CHAR', 'VARCHAR', 'NCHAR', 'NVARCHAR', 'BLOB', 'CLOB',
              'XML'], lambda _: True),
            (['BIT', 'BIT VARYING'
              ], lambda val: raise_on_no_match(r"[Bb]?'?[10]+'?", val)
             ),  # E.g. B'101001'
            (['DATE'], lambda val: raise_on_no_match(
                r"(\d{4}|\d{2})-\d{1,2}-\d{1,2}", val)),
            (['TIME'], lambda val: raise_on_no_match(
                r"(\d{1,2} )?\d{1,2}:\d{0,2}:\d{0,2}", val)),
            (['TIMESTAMP'], lambda val: raise_on_no_match(
                r"((\d{4}|\d{2})-\d{1,2}-\d{1,2}( (\d{1,2} )?\d{1,2}:\d{0,2}:\d{0,2})?|NULL|NOW\(\))",
                val.upper())),
            (['BOOLEAN'],
             lambda val: raise_on_no_match(r'(TRUE|FALSE|NULL)', val.upper())),
        ]

        seq_regex = re.compile(r"nextval[(]'(.*?)'::regclass[)]")

        source_datatype = None
        if source_column.simpleType:
            source_datatype = source_column.simpleType.name
            if default_value:
                match = seq_regex.match(default_value)
                if match and target_column.simpleType and target_column.simpleType.group.name == 'numeric':
                    seq = find_object_with_name(source_schema.sequences,
                                                match.group(1))
                    if seq:
                        pk_cols = set(
                            column.referencedColumn.name
                            for column in source_table.primaryKey.columns
                        ) if source_table.primaryKey else set()
                        if source_column.name in pk_cols:  # AUTO_INCREMENT can only be used on columns in a key
                            try:
                                start = int(seq.startValue)
                            except ValueError:
                                pass
                            else:
                                if start > 1:
                                    target_column.owner.nextAutoInc = seq.startValue
                            target_column.autoIncrement = 1
                        return ''

                if target_column.simpleType.group.name == 'numeric':
                    default_value = default_value.strip("' ")

        if default_value:
            if '::' in default_value:
                default_value = default_value.split('::', 1)[0]

            target_default_value = default_value

            for value_validator in value_validators:
                if source_datatype in value_validator[0]:
                    try:
                        value_validator[1](default_value)
                    except Exception:
                        target_default_value = ''
                        state.addMigrationLogEntry(
                            1, source_column, target_column,
                            'Default value %s is not supported. Removed!' %
                            default_value)
                    else:
                        target_datatype = target_column.simpleType and target_column.simpleType.name or ''
                        if source_datatype == 'BOOLEAN':
                            val = default_value.upper()
                            if val == 'TRUE':
                                target_default_value = '1'
                            elif val == 'FALSE':
                                target_default_value = '0'
                        elif source_datatype == 'TIMESTAMP' and default_value.upper(
                        ) == 'NOW()':
                            if (target_catalog.version.majorNumber,
                                    target_catalog.version.minorNumber,
                                    target_catalog.version.releaseNumber) < (
                                        5, 6, 5):
                                if target_datatype == 'TIMESTAMP':
                                    target_default_value = 'CURRENT_TIMESTAMP'  # now() => CURRENT_TIMESTAMP for TIMESTAMP columns in server v<5.6.5
                                else:
                                    target_default_value = ''
                                    state.addMigrationLogEntry(
                                        1, source_column, target_column,
                                        'Default value now() is not supported for a MySQL column of type "%s".Removed!'
                                        % target_datatype)
                            else:  # Server version from 5.6.5 and newer
                                target_default_value = 'CURRENT_TIMESTAMP'  # CURRENT_TIMESTAMP freely allowed for DATETIME & TIMESTAMP columns

        return target_default_value
    def reverseEngineerTableFKs(cls, connection, table):
        """Reverse engineers the foreign keys for the given table."""

        catalog = table.owner.owner
        schema = table.owner

        query = """SELECT kcu1.COLUMN_NAME,
           rc.CONSTRAINT_NAME, kcu2.TABLE_SCHEMA, kcu2.TABLE_NAME,
           kcu2.COLUMN_NAME, rc.UPDATE_RULE, rc.DELETE_RULE
    FROM INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS rc
         JOIN INFORMATION_SCHEMA.TABLE_CONSTRAINTS AS tc
            ON rc.CONSTRAINT_NAME = tc.CONSTRAINT_NAME
         JOIN INFORMATION_SCHEMA.KEY_COLUMN_USAGE kcu1
            ON  kcu1.CONSTRAINT_CATALOG = rc.CONSTRAINT_CATALOG
            AND kcu1.CONSTRAINT_SCHEMA  = rc.CONSTRAINT_SCHEMA
            AND kcu1.CONSTRAINT_NAME    = rc.CONSTRAINT_NAME
         JOIN INFORMATION_SCHEMA.KEY_COLUMN_USAGE kcu2
            ON  kcu2.CONSTRAINT_CATALOG = rc.UNIQUE_CONSTRAINT_CATALOG
            AND kcu2.CONSTRAINT_SCHEMA  = rc.UNIQUE_CONSTRAINT_SCHEMA
            AND kcu2.CONSTRAINT_NAME    = rc.UNIQUE_CONSTRAINT_NAME
    WHERE tc.CONSTRAINT_TYPE = 'FOREIGN KEY' AND kcu1.ORDINAL_POSITION = kcu2.ORDINAL_POSITION
          AND kcu1.TABLE_CATALOG = ?
          AND kcu1.TABLE_SCHEMA = ?
          AND kcu1.TABLE_NAME = ?
    ORDER BY kcu1.CONSTRAINT_NAME, kcu1.ORDINAL_POSITION"""

        if len(table.columns) == 0:
            grt.send_error('%s reverseEngineerTableFKs: Reverse engineering of table '
                           '%s was attempted but the table has no columns attribute' % (cls.getTargetDBMSName(), table.name))
            return 1    # Table must have columns reverse engineered before we can rev eng its foreign keys

        cursor = cls.execute_query(connection, query, catalog.name, schema.name, table.name)
        current_fk = None
        table.foreignKeys.remove_all()
        for col_name, fk_name, ref_schema, ref_table, ref_col, upd_rule, del_rule in cursor:
            if not current_fk or fk_name != current_fk.name:
                if current_fk:
                    table.foreignKeys.append(current_fk)
                foreign_key = grt.classes.db_ForeignKey()
                foreign_key.name = fk_name
                foreign_key.owner = table
                foreign_key.deleteRule = del_rule.upper()
                foreign_key.updateRule = upd_rule.upper()
                foreign_key.modelOnly = 0
                referenced_schema = find_object_with_name(catalog.schemata, ref_schema)
                if not referenced_schema:
                    grt.send_warning('%s reverseEngineerTableFKs: Could not find referenced schema "%s" '
                                     'for foreign key constraint "%s"' % (cls.getTargetDBMSName(), ref_schema, fk_name))
                    continue
                referenced_table = find_object_with_name(referenced_schema.tables, ref_table)
                if not referenced_table:
                    grt.send_warning('%s reverseEngineerTableFKs: Could not find referenced table "%s.%s" '
                                     'for foreign key constraint "%s"' % (cls.getTargetDBMSName(), ref_schema, ref_table, fk_name))
                    continue
                if len(referenced_table.columns) == 0:
                    grt.send_error('%s reverseEngineerTableFKs: Reverse engineering of table '
                                   '%s was attempted but the table has no columns attribute' % (cls.getTargetDBMSName(), referenced_table.name))
                    return 1    # Table must have columns reverse engineered before we can rev eng its foreign keys

                foreign_key.referencedTable = referenced_table
                current_fk = foreign_key

            column = find_object_with_name(table.columns, col_name)
            if not column:
                grt.send_warning('%s reverseEngineerTableFKs: Could not find column "%s.%s.%s" '
                                 'for foreign key constraint "%s"' % (cls.getTargetDBMSName(), schema.name, table.name, col_name, fk_name))
                continue
            current_fk.columns.append(column)

            referenced_column = find_object_with_name(current_fk.referencedTable.columns, ref_col)
            if not referenced_column:
                grt.send_warning('%s reverseEngineerTableFKs: Could not find referenced column "%s.%s.%s" '
                                 'for foreign key constraint "%s"' % (cls.getTargetDBMSName(), ref_schema, ref_table, ref_col, fk_name))
                continue
            current_fk.referencedColumns.append(referenced_column)

        # Store the last fk:
        if current_fk:
            table.foreignKeys.append(current_fk)

        return 0
def reverseEngineerUserDatatypes(connection, catalog):
    base_types = dict((
        (34, 'IMAGE'),
        (35, 'TEXT'),
        (36, 'EXTENDED TYPE'),
        (37, 'TIMESTAMP'),
        (38, 'INTN'),
        (39, 'VARCHAR'),
        (45, 'BINARY'),
        (47, 'CHAR'),
        (48, 'TINYINT'),
        (49, 'DATE'),
        (50, 'BIT'),
        (51, 'TIME'),
        (52, 'SMALLINT'),
        (55, 'DECIMAL'),
        (56, 'INT'),
        (58, 'SMALLDATETIME'),
        (59, 'REAL'),
        (60, 'MONEY'),
        (61, 'DATETIME'),
        (62, 'FLOAT'),
        (63, 'NUMERIC'),
        (65, 'USMALLINT'),
        (66, 'UINT'),
        (67, 'UBIGINT'),
        (68, 'UINTN'),
        (106, 'DECIMALN'),
        (108, 'NUMERICN'),
        (109, 'FLOATN'),
        (110, 'MONEYN'),
        (111, 'DATETIMN'),
        (122, 'SMALLMONEY'),
        (123, 'DATEN'),
        (135, 'UNICHAR'),
        (147, 'TIMEN'),
        (155, 'UNIVARCHAR'),
        (169, 'TEXT_LOCATOR'),
        (170, 'IMAGE_LOCATOR'),
        (171, 'UNITEXT_LOCATOR'),
        (174, 'UNITEXT'),
        (187, 'BIGDATETIMEN'),
        (188, 'BIGTIMEN'),
        (189, 'BIGDATETIME'),
        (190, 'BIGTIME'),
        (191, 'BIGINT'),
    ))

    query = """SELECT name, length, prec, scale, allownulls, type
FROM systypes
WHERE accessrule != NULL"""

    execute_query(connection, 'USE %s' % quoteIdentifier(catalog.name))
    sybase_rdbms_instance = get_sybase_rdbms_instance()
    catalog.userDatatypes.remove_all()
    for name, length, precision, scale, is_nullable, base_type in execute_query(
            connection, query):
        datatype = grt.classes.db_sybase_UserDatatype()
        datatype.name = name.upper()
        datatype.characterMaximumLength = length
        datatype.numericPrecision = precision
        datatype.numericScale = scale
        datatype.isNullable = is_nullable
        simple_type = find_object_with_name(
            sybase_rdbms_instance.simpleDatatypes, base_types[base_type])
        if simple_type:
            datatype.actualType = simple_type
        else:
            grt.send_warning(
                'Sybase reverseEngineerUserDatatypes',
                'Could not found base type "%s" for user defined type "%s"' %
                (base_type, name))
        catalog.userDatatypes.append(datatype)
    return 0
def reverseEngineerTableIndices(connection, table):
    """Reverse engineers the indices for the given table."""

    schema = table.owner
    catalog = schema.owner

    if len(table.columns) == 0:
        grt.send_error(
            'Sybase reverseEngineerTableIndices',
            'Reverse engineer of table %s.%s was attempted but the table has no columns attribute'
            % (schema.name, table.name))
        return 1  # Table must have columns reverse engineered before we can rev eng its indices

    execute_query(connection, 'USE %s' % catalog.name)

    query = """SELECT INDEX_NAME = A.name,
IS_CLUSTERED = CASE
                WHEN ((A.status&16) = 16 OR (A.status2&512) = 512) THEN 1
                ELSE 0
               END,
IS_PRIMARY = CASE
                WHEN ((A.status&0x800) = 0x800) THEN 1
                ELSE 0
             END,
IS_UNIQUE = CASE
                WHEN ((A.status&2) = 2) THEN 1
                ELSE 0
            END,
IGNORE_DUP = CASE
                WHEN ((A.status&4) = 4) THEN 1
                ELSE 0
             END,
INDEX_COL(USER_NAME(B.uid) + '.' + B.name, indid, 1),
INDEX_COL(USER_NAME(B.uid) + '.' + B.name, indid, 2),
INDEX_COL(USER_NAME(B.uid) + '.' + B.name, indid, 3),
INDEX_COL(USER_NAME(B.uid) + '.' + B.name, indid, 4),
INDEX_COL(USER_NAME(B.uid) + '.' + B.name, indid, 5),
INDEX_COL(USER_NAME(B.uid) + '.' + B.name, indid, 6),
INDEX_COL(USER_NAME(B.uid) + '.' + B.name, indid, 7),
INDEX_COL(USER_NAME(B.uid) + '.' + B.name, indid, 8),
INDEX_COL(USER_NAME(B.uid) + '.' + B.name, indid, 9),
INDEX_COL(USER_NAME(B.uid) + '.' + B.name, indid, 10),
INDEX_COL(USER_NAME(B.uid) + '.' + B.name, indid, 11),
INDEX_COL(USER_NAME(B.uid) + '.' + B.name, indid, 12),
INDEX_COL(USER_NAME(B.uid) + '.' + B.name, indid, 13),
INDEX_COL(USER_NAME(B.uid) + '.' + B.name, indid, 14),
INDEX_COL(USER_NAME(B.uid) + '.' + B.name, indid, 15),
INDEX_COL(USER_NAME(B.uid) + '.' + B.name, indid, 16),
INDEX_COL(USER_NAME(B.uid) + '.' + B.name, indid, 17),
INDEX_COL(USER_NAME(B.uid) + '.' + B.name, indid, 18),
INDEX_COL(USER_NAME(B.uid) + '.' + B.name, indid, 19),
INDEX_COL(USER_NAME(B.uid) + '.' + B.name, indid, 20),
INDEX_COL(USER_NAME(B.uid) + '.' + B.name, indid, 21),
INDEX_COL(USER_NAME(B.uid) + '.' + B.name, indid, 22),
INDEX_COL(USER_NAME(B.uid) + '.' + B.name, indid, 23),
INDEX_COL(USER_NAME(B.uid) + '.' + B.name, indid, 24),
INDEX_COL(USER_NAME(B.uid) + '.' + B.name, indid, 25),
INDEX_COL(USER_NAME(B.uid) + '.' + B.name, indid, 26),
INDEX_COL(USER_NAME(B.uid) + '.' + B.name, indid, 27),
INDEX_COL(USER_NAME(B.uid) + '.' + B.name, indid, 28),
INDEX_COL(USER_NAME(B.uid) + '.' + B.name, indid, 29),
INDEX_COL(USER_NAME(B.uid) + '.' + B.name, indid, 30),
INDEX_COL(USER_NAME(B.uid) + '.' + B.name, indid, 31)
FROM sysindexes A,  sysobjects B
WHERE A.indid > 0 AND A.indid < 255 AND A.status2 & 2 != 2 AND
B.id = A.id AND B.type = 'U' AND
USER_NAME(B.uid) = ? AND B.name=? ORDER BY 1, 2, 3"""

    for index_row in execute_query(connection, query, schema.name, table.name):
        index = grt.classes.db_sybase_Index()
        index.name = index_row[0]
        index.clustered = index_row[1]
        index.isPrimary = index_row[2]
        index.unique = index_row[3]
        index.indexType = 'UNIQUE' if index.unique else 'INDEX'
        index.ignoreDuplicateRows = index_row[4]
        table.addIndex(index)

        # Get the columns for the index:
        index_column_names = [
            colname for colname in index_row[5:] if colname is not None
        ]
        for column_name in index_column_names:
            column = find_object_with_name(table.columns, column_name)
            if column:
                index_column = grt.classes.db_sybase_IndexColumn()
                index_column.name = index.name + '.' + column_name
                index_column.referencedColumn = column
                index.columns.append(index_column)
    return 0
def reverseEngineerTableFKs(connection, table):
    """Reverse engineers the foreign keys for the given table."""

    schema = table.owner
    catalog = schema.owner

    execute_query(connection, 'USE %s' % catalog.name)

    query = """SELECT so.name, USER_NAME(so.uid),
COL_NAME(sk.id, key1),
COL_NAME(sk.id, key2),
COL_NAME(sk.id, key3),
COL_NAME(sk.id, key4),
COL_NAME(sk.id, key5),
COL_NAME(sk.id, key6),
COL_NAME(sk.id, key7),
COL_NAME(sk.id, key8),
COL_NAME(sk.depid, depkey1),
COL_NAME(sk.depid, depkey2),
COL_NAME(sk.depid, depkey3),
COL_NAME(sk.depid, depkey4),
COL_NAME(sk.depid, depkey5),
COL_NAME(sk.depid, depkey6),
COL_NAME(sk.depid, depkey7),
COL_NAME(sk.depid, depkey8)
FROM syskeys sk JOIN sysobjects so ON sk.depid = so.id
WHERE sk.type = 2 AND sk.id = OBJECT_ID('%s.%s')""" % (schema.name, table.name)

    if len(table.columns) == 0:
        grt.send_error(
            'Sybase reverseEngineerTableFKs',
            'Reverse engineer of foreign keys for table %s.%s was attempted but the table has no columns attribute'
            % (schema.name, table.name))
        return 1  # Table must have columns reverse engineered before we can rev eng its foreign keys

    table.foreignKeys.remove_all()
    for row in execute_query(connection, query):
        fk_columns = [col_name for col_name in row[2:10] if col_name]
        fk_ref_columns = [col_name for col_name in row[10:] if col_name]
        foreign_key = grt.classes.db_sybase_ForeignKey()
        foreign_key.name = '%s_%s_%s_fk' % (schema.name, table.name,
                                            '_'.join(fk_columns))
        foreign_key.owner = table
        foreign_key.deleteRule = foreign_key.updateRule = 'RESTRICT'
        foreign_key.modelOnly = 0
        referenced_schema = find_object_with_name(catalog.schemata, row[1])
        if not referenced_schema:
            grt.send_error(
                'Sybase reverseEngineerTableFKs',
                'Could not find schema "%s" in catalog "%s"' %
                (schema.name, catalog.name))
            return 1

        foreign_key.referencedTable = find_object_with_name(
            referenced_schema.tables, row[0])
        if not foreign_key.referencedTable:
            grt.send_error(
                'Sybase reverseEngineerTableFKs',
                'Could not find referenced table "%s" in schema "%s"' %
                (row[0], schema.name))
            return 1

        for column_name, referenced_column_name in zip(fk_columns,
                                                       fk_ref_columns):
            column = find_object_with_name(table.columns, column_name)
            if not column:
                grt.send_error(
                    'Sybase reverseEngineerTableFKs',
                    'Could not find column "%s" in table "%s.%s"' %
                    (column_name, schema.name, table.name))
                return 1
            referenced_column = find_object_with_name(
                foreign_key.referencedTable.columns, referenced_column_name)
            if not referenced_column:
                grt.send_error(
                    'Sybase reverseEngineerTableFKs',
                    'Could not find column "%s" in table "%s.%s"' %
                    (referenced_column_name, referenced_schema.name,
                     foreign_key.referencedTable.name))
                return 1
            foreign_key.columns.append(column)
            foreign_key.referencedColumns.append(referenced_column)

        table.foreignKeys.append(foreign_key)
    return 0
    def reverseEngineer(cls, connection, catalog_name, schemata_list, context):
        grt.send_progress(0, "Reverse engineering catalog information")
        cls.check_interruption()
        catalog = cls.reverseEngineerCatalog(connection, catalog_name)

        # calculate total workload 1st
        grt.send_progress(0.1, 'Preparing...')
        table_count_per_schema = {}
        view_count_per_schema = {}
        routine_count_per_schema = {}
        trigger_count_per_schema = {}
        total_count_per_schema = {}

        get_tables = context.get("reverseEngineerTables", True)
        get_triggers = context.get("reverseEngineerTriggers", True)
        get_views = context.get("reverseEngineerViews", True)
        get_routines = context.get("reverseEngineerRoutines", True)

        # 10% of the progress is for preparation
        total = 1e-10  # total should not be zero to avoid DivisionByZero exceptions
        i = 0.0
        accumulated_progress = 0.1
        for schema_name in schemata_list:
            cls.check_interruption()
            table_count_per_schema[schema_name] = len(cls.getTableNames(connection, catalog_name, schema_name)) if get_tables else 0
            view_count_per_schema[schema_name] = len(cls.getViewNames(connection, catalog_name, schema_name)) if get_views else 0
            cls.check_interruption()
            routine_count_per_schema[schema_name] = len(cls.getProcedureNames(connection, catalog_name, schema_name)) + len(cls.getFunctionNames(connection, catalog_name, schema_name)) if get_routines else 0
            trigger_count_per_schema[schema_name] = len(cls.getTriggerNames(connection, catalog_name, schema_name)) if get_triggers else 0

            total_count_per_schema[schema_name] = (table_count_per_schema[schema_name] + view_count_per_schema[schema_name] +
                                                   routine_count_per_schema[schema_name] + trigger_count_per_schema[schema_name] + 1e-10)
            total += total_count_per_schema[schema_name]

            grt.send_progress(accumulated_progress + 0.1 * (i / (len(schemata_list) + 1e-10) ), "Gathered stats for %s" % schema_name)
            i += 1.0

        # Now take 60% in the first pass of reverse engineering:
        accumulated_progress = 0.2
        for schema_name in schemata_list:
            schema_progress_share = 0.6 * (total_count_per_schema.get(schema_name, 0.0) / total)
            schema = find_object_with_name(catalog.schemata, schema_name) 

            if schema:
                # Reverse engineer tables:
                step_progress_share = schema_progress_share * (table_count_per_schema[schema_name] / (total_count_per_schema[schema_name] + 1e-10))
                if get_tables:
                    cls.check_interruption()
                    grt.send_info('Reverse engineering tables from %s' % schema_name)
                    grt.begin_progress_step(accumulated_progress, accumulated_progress + step_progress_share)
                    # Remove previous first pass marks that may exist if the user goes back and attempt rev eng again:
                    progress_flags = cls._connections[connection.__id__].setdefault('_rev_eng_progress_flags', set())
                    progress_flags.discard('%s_tables_first_pass' % schema_name)
                    cls.reverseEngineerTables(connection, schema)
                    grt.end_progress_step()
        
                accumulated_progress += step_progress_share
                grt.send_progress(accumulated_progress, 'First pass of table reverse engineering for schema %s completed!' % schema_name)
        
                # Reverse engineer views:
                step_progress_share = schema_progress_share * (view_count_per_schema[schema_name] / (total_count_per_schema[schema_name] + 1e-10))
                if get_views:
                    cls.check_interruption()
                    grt.send_info('Reverse engineering views from %s' % schema_name)
                    grt.begin_progress_step(accumulated_progress, accumulated_progress + step_progress_share)
                    cls.reverseEngineerViews(connection, schema)
                    grt.end_progress_step()
        
                accumulated_progress += step_progress_share
                grt.send_progress(accumulated_progress, 'Reverse engineering of views for schema %s completed!' % schema_name)
        
                # Reverse engineer routines:
                step_progress_share = schema_progress_share * (routine_count_per_schema[schema_name] / (total_count_per_schema[schema_name] + 1e-10))
                if get_routines:
                    cls.check_interruption()
                    grt.send_info('Reverse engineering routines from %s' % schema_name)
                    grt.begin_progress_step(accumulated_progress, accumulated_progress + step_progress_share)
                    grt.begin_progress_step(0.0, 0.5)
                    cls.reverseEngineerProcedures(connection, schema)
                    cls.check_interruption()
                    grt.end_progress_step()
                    grt.begin_progress_step(0.5, 1.0)
                    reverseEngineerFunctions(connection, schema)
                    grt.end_progress_step()
                    grt.end_progress_step()
        
                accumulated_progress += step_progress_share
                grt.send_progress(accumulated_progress, 'Reverse engineering of routines for schema %s completed!' % schema_name)
        
                # Reverse engineer triggers:
                step_progress_share = schema_progress_share * (trigger_count_per_schema[schema_name] / (total_count_per_schema[schema_name] + 1e-10))
                if get_triggers:
                    cls.check_interruption()
                    grt.send_info('Reverse engineering triggers from %s' % schema_name)
                    grt.begin_progress_step(accumulated_progress, accumulated_progress + step_progress_share)
                    cls.reverseEngineerTriggers(connection, schema)
                    grt.end_progress_step()
        
                accumulated_progress = 0.8
                grt.send_progress(accumulated_progress, 'Reverse engineering of triggers for schema %s completed!' % schema_name)
            else:  # No schema with the given name was found
                grt.send_warning('The schema %s was not found in the catalog %s. Skipping it.' % (schema_name, catalog_name) )
                
        # Now the second pass for reverse engineering tables:
        if get_tables:
            total_tables = sum(table_count_per_schema[schema.name] for schema in catalog.schemata if schema.name in schemata_list)
            for schema in catalog.schemata:
                if schema.name not in schemata_list:
                    continue
                cls.check_interruption()
                step_progress_share = 0.2 * (table_count_per_schema[schema.name] / (total_tables + 1e-10))
                grt.send_info('Reverse engineering foreign keys for tables in schema %s' % schema.name)
                grt.begin_progress_step(accumulated_progress, accumulated_progress + step_progress_share)
                cls.reverseEngineerTables(connection, schema)
                grt.end_progress_step()
        
                accumulated_progress += step_progress_share
                grt.send_progress(accumulated_progress, 'Second pass of table reverse engineering for schema %s completed!' % schema_name)
            

        grt.send_progress(1.0, 'Reverse engineering completed!')
        return catalog
    def reverseEngineer(cls, connection, catalog_name, schemata_list, context):
        grt.send_progress(0, "Reverse engineering catalog information")
        cls.check_interruption()
        catalog = cls.reverseEngineerCatalog(connection, catalog_name)

        # calculate total workload 1st
        grt.send_progress(0.1, 'Preparing...')
        table_count_per_schema = {}
        view_count_per_schema = {}
        routine_count_per_schema = {}
        trigger_count_per_schema = {}
        total_count_per_schema = {}

        get_tables = context.get("reverseEngineerTables", True)
        get_triggers = context.get("reverseEngineerTriggers", True)
        get_views = context.get("reverseEngineerViews", True)
        get_routines = context.get("reverseEngineerRoutines", True)

        # 10% of the progress is for preparation
        total = 1e-10  # total should not be zero to avoid DivisionByZero exceptions
        i = 0.0
        accumulated_progress = 0.1
        for schema_name in schemata_list:
            cls.check_interruption()
            table_count_per_schema[schema_name] = len(cls.getTableNames(connection, catalog_name, schema_name)) if get_tables else 0
            view_count_per_schema[schema_name] = len(cls.getViewNames(connection, catalog_name, schema_name)) if get_views else 0
            cls.check_interruption()
            routine_count_per_schema[schema_name] = len(cls.getProcedureNames(connection, catalog_name, schema_name)) + len(cls.getFunctionNames(connection, catalog_name, schema_name)) if get_routines else 0
            trigger_count_per_schema[schema_name] = len(cls.getTriggerNames(connection, catalog_name, schema_name)) if get_triggers else 0

            total_count_per_schema[schema_name] = (table_count_per_schema[schema_name] + view_count_per_schema[schema_name] +
                                                   routine_count_per_schema[schema_name] + trigger_count_per_schema[schema_name] + 1e-10)
            total += total_count_per_schema[schema_name]

            grt.send_progress(accumulated_progress + 0.1 * (i / (len(schemata_list) + 1e-10) ), "Gathered stats for %s" % schema_name)
            i += 1.0

        # Now take 60% in the first pass of reverse engineering:
        accumulated_progress = 0.2
        for schema_name in schemata_list:
            schema_progress_share = 0.6 * (total_count_per_schema.get(schema_name, 0.0) / total)
            schema = find_object_with_name(catalog.schemata, schema_name) 

            if schema:
                # Reverse engineer tables:
                step_progress_share = schema_progress_share * (table_count_per_schema[schema_name] / (total_count_per_schema[schema_name] + 1e-10))
                if get_tables:
                    cls.check_interruption()
                    grt.send_info('Reverse engineering tables from %s' % schema_name)
                    grt.begin_progress_step(accumulated_progress, accumulated_progress + step_progress_share)
                    # Remove previous first pass marks that may exist if the user goes back and attempt rev eng again:
                    progress_flags = cls._connections[connection.__id__].setdefault('_rev_eng_progress_flags', set())
                    progress_flags.discard('%s_tables_first_pass' % schema_name)
                    cls.reverseEngineerTables(connection, schema)
                    grt.end_progress_step()
        
                accumulated_progress += step_progress_share
                grt.send_progress(accumulated_progress, 'First pass of table reverse engineering for schema %s completed!' % schema_name)
        
                # Reverse engineer views:
                step_progress_share = schema_progress_share * (view_count_per_schema[schema_name] / (total_count_per_schema[schema_name] + 1e-10))
                if get_views:
                    cls.check_interruption()
                    grt.send_info('Reverse engineering views from %s' % schema_name)
                    grt.begin_progress_step(accumulated_progress, accumulated_progress + step_progress_share)
                    cls.reverseEngineerViews(connection, schema)
                    grt.end_progress_step()
        
                accumulated_progress += step_progress_share
                grt.send_progress(accumulated_progress, 'Reverse engineering of views for schema %s completed!' % schema_name)
        
                # Reverse engineer routines:
                step_progress_share = schema_progress_share * (routine_count_per_schema[schema_name] / (total_count_per_schema[schema_name] + 1e-10))
                if get_routines:
                    cls.check_interruption()
                    grt.send_info('Reverse engineering routines from %s' % schema_name)
                    grt.begin_progress_step(accumulated_progress, accumulated_progress + step_progress_share)
                    grt.begin_progress_step(0.0, 0.5)
                    cls.reverseEngineerProcedures(connection, schema)
                    cls.check_interruption()
                    grt.end_progress_step()
                    grt.begin_progress_step(0.5, 1.0)
                    reverseEngineerFunctions(connection, schema)
                    grt.end_progress_step()
                    grt.end_progress_step()
        
                accumulated_progress += step_progress_share
                grt.send_progress(accumulated_progress, 'Reverse engineering of routines for schema %s completed!' % schema_name)
        
                # Reverse engineer triggers:
                step_progress_share = schema_progress_share * (trigger_count_per_schema[schema_name] / (total_count_per_schema[schema_name] + 1e-10))
                if get_triggers:
                    cls.check_interruption()
                    grt.send_info('Reverse engineering triggers from %s' % schema_name)
                    grt.begin_progress_step(accumulated_progress, accumulated_progress + step_progress_share)
                    cls.reverseEngineerTriggers(connection, schema)
                    grt.end_progress_step()
        
                accumulated_progress = 0.8
                grt.send_progress(accumulated_progress, 'Reverse engineering of triggers for schema %s completed!' % schema_name)
            else:  # No schema with the given name was found
                grt.send_warning('The schema %s was not found in the catalog %s. Skipping it.' % (schema_name, catalog_name) )
                
        # Now the second pass for reverse engineering tables:
        if get_tables:
            total_tables = sum(table_count_per_schema[schema.name] for schema in catalog.schemata if schema.name in schemata_list)
            for schema in catalog.schemata:
                if schema.name not in schemata_list:
                    continue
                cls.check_interruption()
                step_progress_share = 0.2 * (table_count_per_schema[schema.name] / (total_tables + 1e-10))
                grt.send_info('Reverse engineering foreign keys for tables in schema %s' % schema.name)
                grt.begin_progress_step(accumulated_progress, accumulated_progress + step_progress_share)
                cls.reverseEngineerTables(connection, schema)
                grt.end_progress_step()
        
                accumulated_progress += step_progress_share
                grt.send_progress(accumulated_progress, 'Second pass of table reverse engineering for schema %s completed!' % schema_name)
            

        grt.send_progress(1.0, 'Reverse engineering completed!')
        return catalog