Beispiel #1
0
    def page_activated(self, advancing):
        if advancing:
            if self.main.plan.state.objectCreationParams.get("CreateInDB", True):
                self._copy_db.set_active(True)
            else:
                self._copy_db.set_active(False)

            self.refresh_table_list()
            for k in self.main.plan.state.dataBulkTransferParams.keys():
                del self.main.plan.state.dataBulkTransferParams[k]


            if sys.platform == "win32":
                filename = mforms.Utilities.get_special_folder(mforms.Desktop)+"\\copy_migrated_tables.cmd"
            else:
                filename = mforms.Utilities.get_special_folder(mforms.Desktop)+"/copy_migrated_tables.sh"
            self.CopyScript_entry.set_value(filename)
            self.CopyScript_check_duplicate = True

            source_os = self.main.plan.migrationSource.get_os()
            if not source_os:
                self.BulkCopyScript_radiobutton.set_enabled(False)
                bulk_copy_filename = ''
                grt.send_warning('Cannot get operating system of source server.')
            elif source_os == "windows":
                bulk_copy_filename = os.path.join(mforms.Utilities.get_special_folder(mforms.Desktop), 'bulk_copy_tables.cmd')
            else:
                bulk_copy_filename = os.path.join(mforms.Utilities.get_special_folder(mforms.Desktop), 'bulk_copy_tables.sh')
            self.BulkCopyScript_entry.set_value(bulk_copy_filename)
            self.BulkCopyScript_check_duplicate = True


        WizardPage.page_activated(self, advancing)
    def page_activated(self, advancing):
        if advancing:
            if self.main.plan.state.objectCreationParams.get("CreateInDB", True):
                self._copy_db.set_active(True)
            else:
                self._copy_db.set_active(False)

            self.refresh_table_list()
            for k in self.main.plan.state.dataBulkTransferParams.keys():
                del self.main.plan.state.dataBulkTransferParams[k]

            if sys.platform == "win32":
                filename = mforms.Utilities.get_special_folder(mforms.Desktop) + "\\copy_migrated_tables.cmd"
            else:
                filename = mforms.Utilities.get_special_folder(mforms.Desktop) + "/copy_migrated_tables.sh"
            self.copy_script_entry.set_value(filename)
            self.copy_script_check_duplicate = True

            source_os = self.main.plan.migrationSource.get_os()
            if not source_os:
                self.bulk_copy_script_radiobutton.set_enabled(False)
                bulk_copy_filename = ""
                grt.send_warning("Cannot get operating system of source server.")
            elif source_os == "windows":
                bulk_copy_filename = os.path.join(
                    mforms.Utilities.get_special_folder(mforms.Desktop), "bulk_copy_tables.cmd"
                )
            else:
                bulk_copy_filename = os.path.join(
                    mforms.Utilities.get_special_folder(mforms.Desktop), "bulk_copy_tables.sh"
                )
            self.bulk_copy_script_entry.set_value(bulk_copy_filename)
            self.bulk_copy_script_check_duplicate = True

        WizardPage.page_activated(self, advancing)
def execute_script(connection, script, log):
    connection = get_connection(connection)

    ranges = grt.modules.MysqlSqlFacade.getSqlStatementRanges(script)
    for start, length in ranges:
        if grt.query_status():
            raise grt.UserInterrupt()
        statement = script[start:start+length]
        try:
            grt.send_info("Execute statement", statement)
            grt.log_debug3("DbMySQLFE", "Execute %s\n" % statement)
            connection.execute(statement)
        except db_utils.QueryError, exc:
            if log:
                entry = grt.classes.GrtLogEntry()
                entry.owner = log
                entry.name = str(exc)
                entry.entryType = 2
                log.entries.append(entry)
            grt.send_warning("%s" % exc)
            grt.log_error("DbMySQLFE", "Exception executing '%s': %s\n" % (statement, exc))
            return False
        except Exception, exc:
            if log:
                entry = grt.classes.GrtLogEntry()
                entry.owner = log
                entry.name = "Exception: " + str(exc)
                entry.entryType = 2
                log.entries.append(entry)
            grt.send_warning("Exception caught: %s" % exc)
            grt.log_error("DbMySQLFE", "Exception executing '%s': %s\n" % (statement, exc))
            return False
def execute_script(connection, script, log):
    connection = get_connection(connection)

    ranges = grt.modules.MysqlSqlFacade.getSqlStatementRanges(script)
    for start, length in ranges:
        if grt.query_status():
            raise grt.UserInterrupt()
        statement = script[start:start + length]
        try:
            grt.send_info("Execute statement", statement)
            grt.log_debug3("DbMySQLFE", "Execute %s\n" % statement)
            connection.execute(statement)
        except db_utils.QueryError, exc:
            if log:
                entry = grt.classes.GrtLogEntry()
                entry.owner = log
                entry.name = str(exc)
                entry.entryType = 2
                log.entries.append(entry)
            grt.send_warning("%s" % exc)
            grt.log_error("DbMySQLFE",
                          "Exception executing '%s': %s\n" % (statement, exc))
            return False
        except Exception, exc:
            if log:
                entry = grt.classes.GrtLogEntry()
                entry.owner = log
                entry.name = "Exception: " + str(exc)
                entry.entryType = 2
                log.entries.append(entry)
            grt.send_warning("Exception caught: %s" % exc)
            grt.log_error("DbMySQLFE",
                          "Exception executing '%s': %s\n" % (statement, exc))
            return False
    def reverseEngineerTablePK(cls, connection, table):
        """Reverse engineers the primary key(s) for the given table."""

        schema = table.owner
        catalog = schema.owner

        query = """SELECT tc.CONSTRAINT_NAME, kcu.COLUMN_NAME
    FROM INFORMATION_SCHEMA.TABLE_CONSTRAINTS AS tc
      JOIN INFORMATION_SCHEMA.KEY_COLUMN_USAGE AS kcu
        ON kcu.CONSTRAINT_SCHEMA = tc.CONSTRAINT_SCHEMA
       AND kcu.CONSTRAINT_NAME = tc.CONSTRAINT_NAME
       AND kcu.TABLE_SCHEMA = tc.TABLE_SCHEMA
       AND kcu.TABLE_NAME = tc.TABLE_NAME
    WHERE tc.CONSTRAINT_TYPE='PRIMARY KEY' AND tc.TABLE_CATALOG = '%s' AND tc.TABLE_SCHEMA = '%s' AND tc.TABLE_NAME = '%s'
    ORDER BY tc.CONSTRAINT_NAME, kcu.ORDINAL_POSITION""" % (
            catalog.name,
            schema.name,
            table.name,
        )

        if (
            len(table.columns) == 0
        ):  # Table must have columns reverse engineered before we can rev eng its primary key(s)
            grt.send_error(
                "%s reverseEngineerTablePK: Reverse engineer of table %s was attempted but the table has "
                "no columns attribute" % (cls.getTargetDBMSName(), table.name)
            )
            return 1

        fk_rows = cls.execute_query(connection, query).fetchall()
        if fk_rows:
            index = grt.classes.db_Index()
            index.name = fk_rows[0][0]
            index.isPrimary = 1
            index.unique = 1
            index.indexType = "PRIMARY"

            for _, pk_col in fk_rows:
                table_column = find_object_with_name(table.columns, pk_col)
                if not table_column:
                    grt.send_warning(
                        '%s reverseEngineerTablePK: Could not find column "%s" in table "%s" referenced '
                        'by primary key constraint "%s". The primary key will not be added.'
                        % (cls.getTargetDBMSName(), pk_col, table.name, index.name)
                    )
                    return 0

                index_column = grt.classes.db_IndexColumn()
                index_column.name = index.name + "." + pk_col
                index_column.referencedColumn = table_column

                index.columns.append(index_column)

            table.primaryKey = index
            table.addIndex(index)
        return 0
    def reverseEngineerTableIndices(cls, connection, table):
        schema = table.owner
        
        if len(table.columns) == 0:
            grt.send_error('%s: reverseEngineerTableIndices', 
                'Reverse engineer of table %s.%s was attempted but the table has no columns attribute' % (cls.getTargetDBMSName(), schema.name, table.name) )
            return 1    # Table must have columns reverse engineered before we can rev eng its indices

        all_indices_query = """SELECT c2.relname, i.indisunique::int, i.indisclustered::int, i.indnatts, i.indkey
FROM pg_catalog.pg_class c, pg_catalog.pg_class c2, pg_catalog.pg_namespace n, pg_catalog.pg_index i
WHERE c.oid = i.indrelid AND i.indexrelid = c2.oid AND c.relnamespace = n.oid AND 
n.nspname = '%s' AND c.relname = '%s' AND i.indisprimary = False 
ORDER BY c2.relname""" % (schema.name, table.name)

        index_columns_query = """SELECT a.attname
FROM unnest(ARRAY%r) attrid
JOIN pg_catalog.pg_attribute a ON attrid=a.attnum
JOIN pg_catalog.pg_class c ON c.oid = a.attrelid
JOIN pg_catalog.pg_namespace n ON c.relnamespace = n.oid
WHERE n.nspname = '%s' AND c.relname = '%s'"""

        index_rows = cls.execute_query(connection, all_indices_query).fetchall()
        for index_name, is_unique, is_clustered, column_count, column_refs in index_rows:
            index = grt.classes.db_Index()
            index.name = index_name
            index.isPrimary = 0
            index.unique = is_unique
            index.indexType = ('UNIQUE' if is_unique else 'INDEX')
            #index.clustered = is_clustered

            # Get the columns for the index:
            cols = [ int(col) for col in column_refs.split() ]
            if column_count != len(cols):
                grt.send_warning('%s: reverseEngineerTableIndices' % cls.getTargetDBMSName(), 
                    'Reverse engineer of index %s.%s was attempted but the referenced columns count differs '
                    'from the number of its referenced columns. Skipping index!' % (schema.name, index_name) )
                continue

            for (column_name, ) in cls.execute_query(connection, index_columns_query % (cols, schema.name, table.name)):
                column = find_object_with_name(table.columns, column_name)
                if column:
                    index_column = grt.classes.db_IndexColumn()
                    index_column.name = index_name + '.' + column_name
                    #index_column.descend = is_descending_key
                    index_column.referencedColumn = column
                    index.columns.append(index_column)
                else:
                    grt.send_warning('%s: reverseEngineerTableIndices' % cls.getTargetDBMSName(), 
                        'Reverse engineer of index %s.%s was attempted but the referenced column %s '
                        'could not be found on table %s. Skipping index!' % (schema.name, index_name, column_name, table.name) )
                    continue

            table.addIndex(index)
        return 0
    def reverseEngineerTableColumns(cls, connection, table):
        schema = table.owner
        catalog = schema.owner

        query = """SELECT COLUMN_NAME, COLUMN_DEFAULT,
        IS_NULLABLE, DATA_TYPE, CHARACTER_MAXIMUM_LENGTH,
        NUMERIC_PRECISION, NUMERIC_SCALE, DATETIME_PRECISION,
        CHARACTER_SET_NAME, COLLATION_NAME
    FROM INFORMATION_SCHEMA.COLUMNS
    WHERE TABLE_CATALOG='%s' AND TABLE_SCHEMA='%s' AND TABLE_NAME='%s'
    ORDER BY ORDINAL_POSITION""" % (
            catalog.name,
            schema.name,
            table.name,
        )

        table_columns = cls.execute_query(connection, query)
        for (
            column_name,
            column_default,
            is_nullable,
            type_name,
            char_max_length,
            precision,
            scale,
            datetime_precision,
            charset,
            collation,
        ) in table_columns:
            column = grt.classes.db_Column()
            column.name = column_name
            column.isNotNull = is_nullable == "NO"
            column.length = char_max_length
            column.precision = precision if precision is not None else -1
            column.scale = scale if scale is not None else -1
            column.defaultValue = column_default if column_default is not None else ""

            datatype = cls.getColumnDatatype(connection, table, column, type_name)
            if isinstance(datatype, grt.classes.db_SimpleDatatype):
                column.simpleType = datatype
            elif isinstance(datatype, grt.classes.db_UserDatatype):
                column.userType = datatype
            else:
                column.simpleType = cls.getColumnDatatype(connection, table, column, "VARCHAR")
                column.length = 255
                msg = (
                    'Column datatype "%s" for column "%s" in table "%s.%s" is unknown, reverse engineering as VARCHAR(255)'
                    % (type_name, column.name, schema.name, table.name)
                )
                grt.send_warning("%s reverseEngineerTableColumns: " % cls.getTargetDBMSName() + msg)

            table.addColumn(column)

        return 0
    def reverseEngineerTableIndices(cls, connection, table):
        schema = table.owner
        
        if len(table.columns) == 0:
            grt.send_error('%s: reverseEngineerTableIndices', 
                'Reverse engineer of table %s.%s was attempted but the table has no columns attribute' % (cls.getTargetDBMSName(), schema.name, table.name) )
            return 1    # Table must have columns reverse engineered before we can rev eng its indices

        all_indices_query = """SELECT c2.relname, i.indisunique::int, i.indisclustered::int, i.indnatts, i.indkey
FROM pg_catalog.pg_class c, pg_catalog.pg_class c2, pg_catalog.pg_namespace n, pg_catalog.pg_index i
WHERE c.oid = i.indrelid AND i.indexrelid = c2.oid AND c.relnamespace = n.oid AND 
n.nspname = '%s' AND c.relname = '%s' AND i.indisprimary = False 
ORDER BY c2.relname""" % (schema.name, table.name)

        index_columns_query = """SELECT a.attname
FROM unnest(ARRAY%r) attrid
JOIN pg_catalog.pg_attribute a ON attrid=a.attnum
JOIN pg_catalog.pg_class c ON c.oid = a.attrelid
JOIN pg_catalog.pg_namespace n ON c.relnamespace = n.oid
WHERE n.nspname = '%s' AND c.relname = '%s'"""

        index_rows = cls.execute_query(connection, all_indices_query).fetchall()
        for index_name, is_unique, is_clustered, column_count, column_refs in index_rows:
            index = grt.classes.db_Index()
            index.name = index_name
            index.isPrimary = 0
            index.unique = is_unique
            index.indexType = ('UNIQUE' if is_unique else 'INDEX')
            #index.clustered = is_clustered

            # Get the columns for the index:
            cols = [ int(col) for col in column_refs.split() ]
            if column_count != len(cols):
                grt.send_warning('%s: reverseEngineerTableIndices' % cls.getTargetDBMSName(), 
                    'Reverse engineer of index %s.%s was attempted but the referenced columns count differs '
                    'from the number of its referenced columns. Skipping index!' % (schema.name, index_name) )
                continue

            for (column_name, ) in cls.execute_query(connection, index_columns_query % (cols, schema.name, table.name)):
                column = find_object_with_name(table.columns, column_name)
                if column:
                    index_column = grt.classes.db_IndexColumn()
                    index_column.name = index_name + '.' + column_name
                    #index_column.descend = is_descending_key
                    index_column.referencedColumn = column
                    index.columns.append(index_column)
                else:
                    grt.send_warning('%s: reverseEngineerTableIndices' % cls.getTargetDBMSName(), 
                        'Reverse engineer of index %s.%s was attempted but the referenced column %s '
                        'could not be found on table %s. Skipping index!' % (schema.name, index_name, column_name, table.name) )
                    continue

            table.addIndex(index)
        return 0
    def reverseEngineerTablePK(cls, connection, table):
        """Reverse engineers the primary key(s) for the given table."""

        schema = table.owner
        catalog = schema.owner

        query = """SELECT tc.CONSTRAINT_NAME, kcu.COLUMN_NAME
    FROM INFORMATION_SCHEMA.TABLE_CONSTRAINTS AS tc
      JOIN INFORMATION_SCHEMA.KEY_COLUMN_USAGE AS kcu
        ON kcu.CONSTRAINT_SCHEMA = tc.CONSTRAINT_SCHEMA
       AND kcu.CONSTRAINT_NAME = tc.CONSTRAINT_NAME
       AND kcu.TABLE_SCHEMA = tc.TABLE_SCHEMA
       AND kcu.TABLE_NAME = tc.TABLE_NAME
    WHERE tc.CONSTRAINT_TYPE='PRIMARY KEY' AND tc.TABLE_CATALOG = '%s' AND tc.TABLE_SCHEMA = '%s' AND tc.TABLE_NAME = '%s'
    ORDER BY tc.CONSTRAINT_NAME, kcu.ORDINAL_POSITION""" % (
            catalog.name, schema.name, table.name)

        if len(
                table.columns
        ) == 0:  # Table must have columns reverse engineered before we can rev eng its primary key(s)
            grt.send_error(
                '%s reverseEngineerTablePK: Reverse engineer of table %s was attempted but the table has '
                'no columns attribute' % (cls.getTargetDBMSName(), table.name))
            return 1

        fk_rows = cls.execute_query(connection, query).fetchall()
        if fk_rows:
            index = grt.classes.db_Index()
            index.name = fk_rows[0][0]
            index.isPrimary = 1
            index.unique = 1
            index.indexType = 'PRIMARY'

            for _, pk_col in fk_rows:
                table_column = find_object_with_name(table.columns, pk_col)
                if not table_column:
                    grt.send_warning(
                        '%s reverseEngineerTablePK: Could not find column "%s" in table "%s" referenced '
                        'by primary key constraint "%s". The primary key will not be added.'
                        % (cls.getTargetDBMSName(), pk_col, table.name,
                           index.name))
                    return 0

                index_column = grt.classes.db_IndexColumn()
                index_column.name = index.name + '.' + pk_col
                index_column.referencedColumn = table_column

                index.columns.append(index_column)

            table.primaryKey = index
            table.addIndex(index)
        return 0
    def reverseEngineerTableColumns(cls, connection, table):
        query = """SELECT UPPER(sd.domain_name), sc.column_name, sc.nulls, sc.width, sc.scale, sc."default"
FROM SYSTABCOL sc JOIN SYSDOMAIN sd ON sc.domain_id=sd.domain_id
JOIN SYSTAB st ON sc.table_id=st.table_id
JOIN SYSUSER su ON st.creator=su.user_id
WHERE st.table_name='%s' AND su.user_name='%s'
ORDER BY sc.column_id""" % (
            table.name,
            table.owner.name,
        )
        for datatype, col_name, nullable, width, scale, default_value in cls.execute_query(connection, query):
            column = grt.classes.db_Column()
            column.name = col_name or ""
            column.isNotNull = nullable in ["N", "n"]
            column.collationName = ""  # TODO: find a way to get the column's collation

            if datatype.startswith("UNSIGNED "):
                datatype = datatype[9:]
                column.flags.append("UNSIGNED")

            is_simple_datatype, datatype_object = cls.find_datatype_object(table.owner.owner, datatype)

            if not datatype_object:
                is_simple_datatype, datatype_object = cls.find_datatype_object(table.owner.owner, "VARCHAR")
                width = 255
                msg = 'Column datatype "%s" for column "%s" in table "%s.%s" reverse engineered as VARCHAR(255)' % (
                    datatype,
                    column.name,
                    table.owner.name,
                    table.name,
                )
                grt.send_warning("SQL Anywhere reverseEngineerTableColumns", msg)

            if is_simple_datatype:
                column.simpleType = datatype_object
            else:
                column.userType = datatype_object

            column.defaultValue = str(default_value) if default_value is not None else ""

            group = datatype_object.group.name if is_simple_datatype else datatype_object.actualType.group.name

            width = int(width) if width is not None else -1
            if group.upper() == "NUMERIC":
                column.length = -1
                column.precision = width
                column.scale = scale
            else:
                column.length = width
                column.precision = column.scale = -1

            table.addColumn(column)
    def reverseEngineerTableColumns(cls, connection, table):
        schema = table.owner
        catalog = schema.owner

        simple_datatypes_list = [
            datatype.name.upper() for datatype in catalog.simpleDatatypes
        ]
        user_datatypes_list = [
            datatype.name.upper() for datatype in catalog.userDatatypes
        ]

        odbc_datatypes = dict(
            (dtype.data_type, dtype.type_name)
            for dtype in cls.get_connection(connection).cursor().getTypeInfo())

        table_columns = cls.get_connection(connection).cursor().columns(
            table=table.name)
        for column_info in table_columns:
            column = grt.classes.db_Column()
            column.name = column_info[3]  # column_name
            column.isNotNull = column_info[17] != 'YES'  # is_nullable
            column.length = column_info[6]  # column_size
            column.scale = column_info[8]  # decimal_digits
            column.precision = column_info[6]  # column_size

            datatype = None
            try:
                type_name = odbc_datatypes[column_info[4]].upper()  # data_type
                datatype = simple_datatypes_list.index(type_name)
            except (KeyError, ValueError):
                try:
                    user_datatype = catalog.userDatatypes[
                        user_datatypes_list.index(type_name)]
                except (ValueError, TypeError, NameError):
                    user_datatype = None
                    datatype = simple_datatypes_list.index('VARCHAR')
                    column.length = 255
                    msg = 'Column datatype "%s" for column "%s" in table "%s.%s" reverse engineered as VARCHAR(255)' % (
                        type_name, column.name, schema.name, table.name)
                    grt.send_warning('%s reverseEngineerTableColumns: ' %
                                     cls.getTargetDBMSName() + msg)
                else:
                    datatype = None
                    column.userType = user_datatype

            if isinstance(datatype, int):
                column.simpleType = catalog.simpleDatatypes[datatype]

            table.addColumn(column)

        return 0
Beispiel #12
0
    def doFetchSchemaNames(self, only_these_catalogs=[]):
        """Fetch list of schema names in catalog.schema format and stores them in the migration.sourceSchemataNames node"""

        grt.send_progress(0.0, "Checking connection...")
        self.connect()
        if self.rdbms.doesSupportCatalogs:
            grt.send_progress(0.1, "Fetching catalog names...")
            self.state.sourceSchemataNames.remove_all()
            catalog_names = self.getCatalogNames()
            if only_these_catalogs:
                inexistent_catalogs = set(only_these_catalogs).difference(
                    catalog_names)
                if inexistent_catalogs:
                    grt.send_warning(
                        'The following catalogs where not found: ' +
                        ', '.join(list(inexistent_catalogs)))
                catalog_names = list(
                    set(only_these_catalogs).difference(
                        inexistent_catalogs)) or self.getCatalogNames()
            self._catalog_names = catalog_names
            grt.send_progress(0.1, "Fetching schema names...")
            i = 0.0
            accumulated_progress = 0.1
            step_progress_share = 1.0 / (len(catalog_names) + 1e-10)
            for catalog in catalog_names:
                grt.send_progress(accumulated_progress,
                                  'Fetching schema names from %s...' % catalog)
                schema_names = self.getSchemaNames(catalog)
                for schema in schema_names:
                    self.state.sourceSchemataNames.append("%s.%s" %
                                                          (catalog, schema))
                accumulated_progress += 0.9 * step_progress_share
        else:  # The rdbms doesn't support catalogs
            grt.send_progress(0.1, "Fetching schema names...")
            schema_names = self.getSchemaNames('')
            if only_these_catalogs:  # Here only_these_catalogs would rather mean only these schemata
                inexistent_schemata = set(only_these_catalogs).difference(
                    schema_names)
                if inexistent_schemata:
                    grt.send_warning(
                        'The following schemata where not found: ' +
                        ', '.join(list(inexistent_schemata)))
                schema_names = list(
                    set(only_these_catalogs).difference(
                        inexistent_schemata)) or self.getSchemaNames('')
            self._catalog_names = []
            self.state.sourceSchemataNames.remove_all()
            for schema in schema_names:
                self.state.sourceSchemataNames.append('def.%s' % schema)
        grt.send_progress(1.0, "Finished")
Beispiel #13
0
    def reverseEngineerTableColumns(cls, connection, table):
        query = """SELECT UPPER(sd.domain_name), sc.column_name, sc.nulls, sc.width, sc.scale, sc."default"
FROM SYSTABCOL sc JOIN SYSDOMAIN sd ON sc.domain_id=sd.domain_id
JOIN SYSTAB st ON sc.table_id=st.table_id
JOIN SYSUSER su ON st.creator=su.user_id
WHERE st.table_name='%s' AND su.user_name='%s'
ORDER BY sc.column_id""" % (table.name, table.owner.name)
        for datatype, col_name, nullable, width, scale, default_value in cls.execute_query(
                connection, query):
            column = grt.classes.db_Column()
            column.name = col_name or ''
            column.isNotNull = nullable in ['N', 'n']
            column.collationName = ''  # TODO: find a way to get the column's collation

            if datatype.startswith('UNSIGNED '):
                datatype = datatype[9:]
                column.flags.append('UNSIGNED')

            is_simple_datatype, datatype_object = cls.find_datatype_object(
                table.owner.owner, datatype)

            if not datatype_object:
                is_simple_datatype, datatype_object = cls.find_datatype_object(
                    table.owner.owner, 'VARCHAR')
                width = 255
                msg = 'Column datatype "%s" for column "%s" in table "%s.%s" reverse engineered as VARCHAR(255)' % (
                    datatype, column.name, table.owner.name, table.name)
                grt.send_warning('SQL Anywhere reverseEngineerTableColumns',
                                 msg)

            if is_simple_datatype:
                column.simpleType = datatype_object
            else:
                column.userType = datatype_object

            column.defaultValue = str(
                default_value) if default_value is not None else ''

            group = datatype_object.group.name if is_simple_datatype else datatype_object.actualType.group.name

            width = int(width) if width is not None else -1
            if group.upper() == 'NUMERIC':
                column.length = -1
                column.precision = width
                column.scale = scale
            else:
                column.length = width
                column.precision = column.scale = -1

            table.addColumn(column)
Beispiel #14
0
 def secondary_default_value_validation(self, state, source_column, target_column):
     # Only 1 CURRENT_TIMESTAMP column can exist and it has to be the 1st TIMESTAMP column in the table
     if target_column.simpleType:
         if target_column.simpleType.name == 'TIMESTAMP' and target_column.defaultValue == 'CURRENT_TIMESTAMP':
             for column in target_column.owner.columns:
                 if column == target_column:
                     break
                 if column.simpleType and column.simpleType.name == 'TIMESTAMP':
                     # column.defaultValue == 'CURRENT_TIMESTAMP':
                     state.addMigrationLogEntry(1, source_column, target_column,
                           'DEFAULT CURRENT_TIMESTAMP can only be used in the first TIMESTAMP column of the table. '
                                                'Default value removed.')
                     target_column.defaultValue = ''
     else:
         grt.send_warning('Could not migrate datatype of column %s in table %s.%s' % (target_column.name, target_column.owner.owner.name, target_column.owner.name))
        def process_fk(catalog, table, fk_name, fk_rows):
            foreign_key = grt.classes.db_ForeignKey()
            if fk_name in cls._connections[connection.__id__]['fk_names']:
                while True:
                    suffix = '_%06d' % random.randint(0, 999999)
                    if fk_name + suffix not in cls._connections[connection.__id__]['fk_names']:
                        break
                fk_name += suffix
            foreign_key.name = fk_name
            foreign_key.owner = table
            foreign_key.deleteRule = get_delete_action(fk_rows[0].grbit)
            foreign_key.updateRule = get_update_action(fk_rows[0].grbit)
            foreign_key.modelOnly = 0
            
            # Find the referenced table:
            foreign_key.referencedTable = find_object_with_name(catalog.schemata[0].tables, fk_rows[0].szReferencedObject)
            if not foreign_key.referencedTable:
                grt.send_error('Migration: reverseEngineerTableFKs: Table "%s" not found in schemata "%s"' % (fk_rows[0].szReferencedObject, catalog.schemata[0].name) )
                return 1
            
            for fk_row in fk_rows:
                column = find_object_with_name(table.columns, fk_row.szColumn)
                if not column:
                    grt.send_error('Migration: reverseEngineerTableFKs: Column "%s" not found in table "%s"' % (fk_row.szColumn, table.name) )
                    continue

                ref_column = find_object_with_name(foreign_key.referencedTable.columns, fk_row.szReferencedColumn)
                if not ref_column:
                    grt.send_error('Migration: reverseEngineerTableFKs: Column "%s" not found in table "%s"' % (fk_row.szReferencedColumn, foreign_key.referencedTable.name) )
                    continue
                
                foreign_key.columns.append(column)
                foreign_key.referencedColumns.append(ref_column)

            # Find and delete indexes that are identical to FKs
            for index in reversed(table.indices):
                if table.primaryKey != index and len(index.columns) == len(foreign_key.columns):
                    match = True
                    for i, col in enumerate(index.columns):
                        if foreign_key.columns[i] != col.referencedColumn:
                            match = False
                            break
                    if match:
                        grt.send_warning("Migration: reverseEngineerTable: Skipping duplicate index %s from table %s\n" % (col.name, table.name))
                        table.indices.remove(index)

            cls._connections[connection.__id__]['fk_names'][foreign_key.name] = table 
            table.foreignKeys.append(foreign_key)
Beispiel #16
0
        def process_fk(catalog, table, fk_name, fk_rows):
            foreign_key = grt.classes.db_ForeignKey()
            if fk_name in cls._connections[connection.__id__]['fk_names']:
                while True:
                    suffix = '_%06d' % random.randint(0, 999999)
                    if fk_name + suffix not in cls._connections[connection.__id__]['fk_names']:
                        break
                fk_name += suffix
            foreign_key.name = fk_name
            foreign_key.owner = table
            foreign_key.deleteRule = get_delete_action(fk_rows[0].grbit)
            foreign_key.updateRule = get_update_action(fk_rows[0].grbit)
            foreign_key.modelOnly = 0
            
            # Find the referenced table:
            foreign_key.referencedTable = find_object_with_name(catalog.schemata[0].tables, fk_rows[0].szReferencedObject)
            if not foreign_key.referencedTable:
                grt.send_error('Migration: reverseEngineerTableFKs: Table "%s" not found in schemata "%s"' % (fk_rows[0].szReferencedObject, catalog.schemata[0].name) )
                return 1
            
            for fk_row in fk_rows:
                column = find_object_with_name(table.columns, fk_row.szColumn)
                if not column:
                    grt.send_error('Migration: reverseEngineerTableFKs: Column "%s" not found in table "%s"' % (fk_row.szColumn, table.name) )
                    continue

                ref_column = find_object_with_name(foreign_key.referencedTable.columns, fk_row.szReferencedColumn)
                if not ref_column:
                    grt.send_error('Migration: reverseEngineerTableFKs: Column "%s" not found in table "%s"' % (fk_row.szReferencedColumn, foreign_key.referencedTable.name) )
                    continue
                
                foreign_key.columns.append(column)
                foreign_key.referencedColumns.append(ref_column)

            # Find and delete indexes that are identical to FKs
            for index in reversed(table.indices):
                if table.primaryKey != index and len(index.columns) == len(foreign_key.columns):
                    match = True
                    for i, col in enumerate(index.columns):
                        if foreign_key.columns[i] != col.referencedColumn:
                            match = False
                            break
                    if match:
                        grt.send_warning("Migration: reverseEngineerTable: Skipping duplicate index %s from table %s\n" % (col.name, table.name))
                        table.indices.remove(index)

            cls._connections[connection.__id__]['fk_names'][foreign_key.name] = table 
            table.foreignKeys.append(foreign_key)
def reverseEngineerTriggers(connection, schema):
    execute_query(connection, 'USE %s' % schema.owner.name)  # catalog

    tables_with_triggers_query = """SELECT name, deltrig, instrig, updtrig
FROM sysobjects
WHERE uid = USER_ID(?) AND type='U'
AND(deltrig != 0 OR instrig != 0 OR updtrig != 0)"""

    trigger_details_query = """SELECT so.name AS trigger_name, sc.id AS trigger_id, sc.text AS trigger_definition
FROM syscomments sc JOIN sysobjects so ON sc.id=so.id
WHERE sc.id IN (%s)
ORDER BY so.name, sc.colid"""

    triggers = {}
    for row in execute_query(connection, tables_with_triggers_query, schema.name):
        if row[1] != 0:
            triggers.setdefault(row[1], [row[0], ''])[1] += ';DELETE'
        if row[2] != 0:
            triggers.setdefault(row[2], [row[0], ''])[1] += ';INSERT'
        if row[3] != 0:
            triggers.setdefault(row[3], [row[0], ''])[1] += ';UPDATE'

    step = 1.0 / (len(getTriggerNames(connection, schema.owner.name, schema.name)) + 1e-10)
    all_triggers = execute_query(connection, trigger_details_query % ', '.join(str(trig_id) for trig_id in triggers)) if triggers else None
    trigger_name2id = {}
    def register_trigger_name(row):
        trigger_name2id[row[0]] = row[1]
    if all_triggers:
        for idx, trigger_name, trigger_definition in join_multiline_content('trigger_name', 'trigger_definition',
                                                                            all_triggers, register_trigger_name):
            grt.send_progress(idx * step, 'Reverse engineering trigger %s.%s' % (schema.name, trigger_name))
            trigger = grt.classes.db_sybase_Trigger()
            trigger.name = trigger_name or ''
            trigger.sqlDefinition = trigger_definition
            trigger.timing = 'AFTER'  # All Sybase ASE triggers are fired after the data is changed
#            trigger.orientation = 'ROW'  # TODO: This needs extra analysis
            trigger.enabled = 1  # TODO: Figure out how to tell the actual value
            trigger_table, trigger_events = triggers[trigger_name2id[trigger_name]]
            trigger.event = trigger_events.strip(';')  # It would take values as 'INSERT;UPDATE'
            trigger.owner = find_object_with_name(schema.tables, trigger_table)

            if trigger.owner:
                trigger.owner.triggers.append(trigger)
            else:
                grt.send_warning('Sybase reverseEngineerTriggers', 'Parent table not found for trigger "%s"' % trigger.name)

    grt.send_progress(1.0, 'Finished reverse engineering of triggers for the %s schema.' % schema.name)
    return 0
Beispiel #18
0
def reverseEngineerTriggers(connection, schema):
    execute_query(connection, 'USE %s' % schema.owner.name)  # catalog

    tables_with_triggers_query = """SELECT name, deltrig, instrig, updtrig
FROM sysobjects
WHERE uid = USER_ID(?) AND type='U'
AND(deltrig != 0 OR instrig != 0 OR updtrig != 0)"""

    trigger_details_query = """SELECT so.name AS trigger_name, sc.id AS trigger_id, sc.text AS trigger_definition
FROM syscomments sc JOIN sysobjects so ON sc.id=so.id
WHERE sc.id IN (%s)
ORDER BY so.name, sc.colid"""

    triggers = {}
    for row in execute_query(connection, tables_with_triggers_query, schema.name):
        if row[1] != 0:
            triggers.setdefault(row[1], [row[0], ''])[1] += ';DELETE'
        if row[2] != 0:
            triggers.setdefault(row[2], [row[0], ''])[1] += ';INSERT'
        if row[3] != 0:
            triggers.setdefault(row[3], [row[0], ''])[1] += ';UPDATE'

    step = 1.0 / (len(getTriggerNames(connection, schema.owner.name, schema.name)) + 1e-10)
    all_triggers = execute_query(connection, trigger_details_query % ', '.join(str(trig_id) for trig_id in triggers)) if triggers else None
    trigger_name2id = {}
    def register_trigger_name(row):
        trigger_name2id[row[0]] = row[1]
    if all_triggers:
        for idx, trigger_name, trigger_definition in join_multiline_content('trigger_name', 'trigger_definition',
                                                                            all_triggers, register_trigger_name):
            grt.send_progress(idx * step, 'Reverse engineering trigger %s.%s' % (schema.name, trigger_name))
            trigger = grt.classes.db_sybase_Trigger()
            trigger.name = trigger_name or ''
            trigger.sqlDefinition = trigger_definition
            trigger.timing = 'AFTER'  # All Sybase ASE triggers are fired after the data is changed
#            trigger.orientation = 'ROW'  # TODO: This needs extra analysis
            trigger.enabled = 1  # TODO: Figure out how to tell the actual value
            trigger_table, trigger_events = triggers[trigger_name2id[trigger_name]]
            trigger.event = trigger_events.strip(';')  # It would take values as 'INSERT;UPDATE'
            trigger.owner = find_object_with_name(schema.tables, trigger_table)

            if trigger.owner:
                trigger.owner.triggers.append(trigger)
            else:
                grt.send_warning('Sybase reverseEngineerTriggers', 'Parent table not found for trigger "%s"' % trigger.name)

    grt.send_progress(1.0, 'Finished reverse engineering of triggers for the %s schema.' % schema.name)
    return 0
Beispiel #19
0
 def doFetchSchemaNames(self, only_these_catalogs=[]):
     """Fetch list of schema names in catalog.schema format and stores them in the migration.sourceSchemataNames node"""
     
     grt.send_progress(0.0, "Checking connection...")
     self.connect()
     if self.rdbms.doesSupportCatalogs > 0:
         grt.send_progress(0.1, "Fetching catalog names...")
         self.state.sourceSchemataNames.remove_all()
         catalog_names = self.getCatalogNames()
         if only_these_catalogs:
             inexistent_catalogs = set(only_these_catalogs).difference(catalog_names)
             if inexistent_catalogs:
                 grt.send_warning('The following catalogs were not found: ' + ', '.join(list(inexistent_catalogs)))
             catalog_names = list(set(only_these_catalogs).difference(inexistent_catalogs)) or self.getCatalogNames()
         self._catalog_names = catalog_names
         grt.send_progress(0.1, "Fetching schema names...")
         accumulated_progress = 0.1
         step_progress_share = 1.0 / (len(catalog_names) + 1e-10)
         for catalog in catalog_names:
             if not catalog:
                 continue
             grt.send_progress(accumulated_progress, 'Fetching schema names from %s...' % catalog)
             schema_names = self.getSchemaNames(catalog)
             for schema in schema_names:
                 if not schema:
                     continue
                 self.state.sourceSchemataNames.append("%s.%s" % (self._db_module.quoteIdentifier(catalog), self._db_module.quoteIdentifier(schema)))
             accumulated_progress += 0.9 * step_progress_share
     elif self.rdbms.doesSupportCatalogs == 0:  # The rdbms doesn't support catalogs
         grt.send_progress(0.1, "Fetching schema names...")
         schema_names = self.getSchemaNames('')
         if only_these_catalogs:  # Here only_these_catalogs would rather mean only these schemata
             inexistent_schemata = set(only_these_catalogs).difference(schema_names)
             if inexistent_schemata:
                 grt.send_warning('The following schemas where not found: ' + ', '.join(list(inexistent_schemata)))
             schema_names = list(set(only_these_catalogs).difference(inexistent_schemata))  or self.getSchemaNames('')
         self._catalog_names = []
         self.state.sourceSchemataNames.remove_all()
         for schema in schema_names:
             self.state.sourceSchemataNames.append('%s.%s' % (self._db_module.quoteIdentifier('def'), self._db_module.quoteIdentifier(schema)))
     else: # no schema either
         self._catalog_names = []
         self.state.sourceSchemataNames.remove_all()
         for schema in self.getSchemaNames(''):
             self.state.sourceSchemataNames.append(self._db_module.quoteIdentifier(schema))
     grt.send_progress(1.0, "Finished")
    def reverseEngineerTableColumns(cls, connection, table):
        schema = table.owner
        catalog = schema.owner

        query = """SELECT COLUMN_NAME, COLUMN_DEFAULT,
        IS_NULLABLE, DATA_TYPE, CHARACTER_MAXIMUM_LENGTH,
        NUMERIC_PRECISION, NUMERIC_SCALE, DATETIME_PRECISION,
        CHARACTER_SET_NAME, COLLATION_NAME
    FROM INFORMATION_SCHEMA.COLUMNS
    WHERE TABLE_CATALOG='%s' AND TABLE_SCHEMA='%s' AND TABLE_NAME='%s'
    ORDER BY ORDINAL_POSITION""" % (catalog.name, schema.name, table.name)

        table_columns = cls.execute_query(connection, query)
        for (column_name, column_default, is_nullable, type_name,
             char_max_length, precision, scale, datetime_precision, charset,
             collation) in table_columns:
            column = grt.classes.db_Column()
            column.name = column_name
            column.isNotNull = is_nullable == 'NO'
            column.length = char_max_length
            column.precision = precision if precision is not None else -1
            column.scale = scale if scale is not None else -1
            column.defaultValue = column_default if column_default is not None else ''

            datatype = cls.getColumnDatatype(connection, table, column,
                                             type_name)
            if isinstance(datatype, grt.classes.db_SimpleDatatype):
                column.simpleType = datatype
            elif isinstance(datatype, grt.classes.db_UserDatatype):
                column.userType = datatype
            else:
                column.simpleType = cls.getColumnDatatype(
                    connection, table, column, 'VARCHAR')
                column.length = 255
                msg = 'Column datatype "%s" for column "%s" in table "%s.%s" is unknown, reverse engineering as VARCHAR(255)' % (
                    type_name, column.name, schema.name, table.name)
                grt.send_warning('%s reverseEngineerTableColumns: ' %
                                 cls.getTargetDBMSName() + msg)

            table.addColumn(column)

        return 0
    def reverseEngineerTableColumns(cls, connection, table):
        schema = table.owner
        catalog = schema.owner

        simple_datatypes_list = [ datatype.name.upper() for datatype in catalog.simpleDatatypes ]
        user_datatypes_list   = [ datatype.name.upper() for datatype in catalog.userDatatypes ]

        odbc_datatypes = dict( (dtype.data_type, dtype.type_name) for dtype in cls.get_connection(connection).cursor().getTypeInfo() )

        table_columns = cls.get_connection(connection).cursor().columns(catalog=catalog.name, schema=schema.name, table=table.name)
        for column_info in table_columns:
            column = grt.classes.db_Column()
            column.name = column_info[3]  # column_name
            column.isNotNull = column_info[17] == 'YES'  # is_nullable
            column.length = column_info[6]  # column_size
            column.scale = column_info[8]  # decimal_digits
            column.precision = column_info[6]  # column_size

            datatype = None
            try:
                type_name = odbc_datatypes[column_info[4]].upper()  # data_type
                datatype = simple_datatypes_list.index(type_name)
            except (KeyError, ValueError):
                try:
                    user_datatype = catalog.userDatatypes[user_datatypes_list.index(type_name)]
                except (ValueError, TypeError, NameError):
                    user_datatype = None
                    datatype = simple_datatypes_list.index('VARCHAR')
                    column.length = 255
                    msg = 'Column datatype "%s" for column "%s" in table "%s.%s" reverse engineered as VARCHAR(255)' % (type_name, column.name, schema.name, table.name)
                    grt.send_warning('%s reverseEngineerTableColumns: ' % cls.getTargetDBMSName() + msg)
                else:
                    datatype = None
                    column.userType = user_datatype

            if isinstance(datatype, int):
                column.simpleType = catalog.simpleDatatypes[datatype]

            table.addColumn(column)

        return 0
Beispiel #22
0
    def reverseEngineerTablePK(cls, connection, table):
        """Reverse engineers the primary key for the given table."""

        if len(table.columns) == 0:  # Table must have columns reverse engineered before we can rev eng its primary key
            grt.send_error('Migration: reverseEngineerTablePKAndIndices: Reverse engineer of table %s was attempted but the table has no columns attribute' % table.name)
            return 1
        
        pk_index_name = 'PrimaryKey'

        indices_dict = {}  # Map the indices names to their respective columns:
        for row in cls.get_connection(connection).cursor().statistics(table=table.name):
            if row.type == constant.SQL_TABLE_STAT:  # this entry is not an index
                continue
            indices_dict.setdefault(row.index_name, []).append(row)

        for index_name, row_list in list(indices_dict.items()):
            index = grt.classes.db_Index()
            index.name = index_name
            index.isPrimary = 1 if index_name == pk_index_name else 0
            index.unique = not row_list[0].non_unique
            index.indexType = 'UNIQUE' if index.unique else 'INDEX'
    #        index.hasFilter = False  # TODO: Find out if there's a way to determine this

            skip = False
            # Get the columns for the index:
            for row in sorted(row_list, key=lambda elem: elem[7]):  # Sorted by ordinal_position
                column = find_object_with_name(table.columns, row.column_name)
                if column:
                    # skip indexes on LONGCHAR columns
                    if column.simpleType.name in ["LONGCHAR"]:
                        grt.send_warning("Migration: reverseEngineerTable: Skipping index %s.%s on a %s column\n" % (table.name, column.name, column.simpleType.name)) 
                        skip = True
                    else:
                        index_column = grt.classes.db_IndexColumn()
                        index_column.name = index_name + '.' + row.column_name
                        index_column.referencedColumn = column
                        index.columns.append(index_column)
                        if not column.isNotNull and index.isPrimary:
                            column.isNotNull = 1
                            grt.send_warning("Migration: reverseEngineerTablePK: column %s.%s was changed to NOT NULL because it's a Primary Key column\n" % (column.owner.name, column.name))
                else:
                    grt.send_warning("Migration: reverseEngineerTablePK: could not find column %s, belonging to key %s. Key will be skipped\n" % (row.column_name, index_name))
                    skip = True
            if not skip:
                table.addIndex(index)
                if index.isPrimary:
                    table.primaryKey = index

        return 0
    def reverseEngineerTablePK(cls, connection, table):
        """Reverse engineers the primary key(s) for the given table."""

        if len(table.columns) == 0:  # Table must have columns reverse engineered before we can rev eng its primary key(s)
            grt.send_error('Migration: reverseEngineerTablePKAndIndices: Reverse engineer of table %s was attempted but the table has no columns attribute' % table.name)
            return 1
        
        pk_index_name = 'PrimaryKey'

        indices_dict = {}  # Map the indices names to their respective columns:
        for row in cls.get_connection(connection).cursor().statistics(table=table.name):
            if row.type == constant.SQL_TABLE_STAT:  # this entry is not an index
                continue
            indices_dict.setdefault(row.index_name, []).append(row)

        for index_name, row_list in indices_dict.iteritems():
            index = grt.classes.db_Index()
            index.name = index_name
            index.isPrimary = 1 if index_name == pk_index_name else 0
            index.unique = not row_list[0].non_unique
            index.indexType = 'UNIQUE' if index.unique else 'INDEX'
    #        index.hasFilter = False  # TODO: Find out if there's a way to determine this

            skip = False
            # Get the columns for the index:
            for row in sorted(row_list, key=lambda elem: elem[7]):  # Sorted by ordinal_position
                column = find_object_with_name(table.columns, row.column_name)
                if column:
                    # skip indexes on LONGCHAR columns
                    if column.simpleType.name in ["LONGCHAR"]:
                        grt.send_warning("Migration: reverseEngineerTable: Skipping index %s.%s on a %s column\n" % (table.name, column.name, column.simpleType.name)) 
                        skip = True
                    else:
                        index_column = grt.classes.db_IndexColumn()
                        index_column.name = index_name + '.' + row.column_name
                        index_column.referencedColumn = column
                        index.columns.append(index_column)
                        if not column.isNotNull and index.isPrimary:
                            column.isNotNull = 1
                            grt.send_warning("Migration: reverseEngineerTablePK: column %s.%s was changed to NOT NULL because it's a Primary Key column\n" % (column.owner.name, column.name))
                else:
                    grt.send_warning("Migration: reverseEngineerTablePK: could not find column %s, belonging to key %s. Key will be skipped\n" % (row.column_name, index_name))
                    skip = True
            if not skip:
                table.addIndex(index)
                if index.isPrimary:
                    table.primaryKey = index

        return 0
def reverseEngineerTableColumns(connection, table):
    schema = table.owner
    catalog = schema.owner
    execute_query(connection, 'USE %s' % quoteIdentifier(catalog.name))
    query = """SELECT  ISNULL(C.name, '') AS COLUMN_NAME, T.name AS DATA_TYPE,
		C.length AS CHARACTER_MAXIMUM_LENGTH, C.prec AS NUMERIC_PRECISION,
		C.scale AS NUMERIC_SCALE, CONVERT(BIT, (C.status & 0x08)) AS IS_NULLABLE,
        CONVERT(BIT, (C.status & 0x80)) AS IS_IDENTITY_COLUMN, K.text AS COLUMN_DEFAULT
		FROM syscolumns C, systypes T, sysobjects A, syscomments K
	    WHERE USER_NAME(A.uid) = ? AND
        A.id = C.id AND C.id = OBJECT_ID(?) AND
        C.usertype *= T.usertype AND
        C.cdefault *= K.id
        ORDER BY C.colid"""
    
    rows = execute_query(connection, query, schema.name, table.name)

    sybase_rdbms_instance = get_sybase_rdbms_instance()
    sybase_simple_datatypes_list = [ datatype.name for datatype in sybase_rdbms_instance.simpleDatatypes ]
    user_datatypes_list = [ datatype.name for datatype in catalog.userDatatypes ]

    col_names = [ col_description[0] for col_description in rows.description ]
    for row in rows:
        row_values = dict( nameval for nameval in zip(col_names, row) )
        column = grt.classes.db_sybase_Column()
        column.name = row_values['COLUMN_NAME'] or ''
        column.isNotNull = not row_values['IS_NULLABLE']
        column.collationName = row_values.get('COLLATION_NAME', '')  # TODO: find a way to get the column's collation
        column.length = row_values['CHARACTER_MAXIMUM_LENGTH'] or 0
        column.precision = row_values['NUMERIC_PRECISION'] if row_values['NUMERIC_PRECISION'] is not None else -1
        column.scale = row_values['NUMERIC_SCALE'] if row_values['NUMERIC_SCALE'] is not None else -1
        column.comment = row_values.get('COLUMN_COMMENT', '')  # TODO: find a way to get the column's comments
        column.identity = row_values['IS_IDENTITY_COLUMN'] or 0
        user_datatype = None
        try:
            datatype = sybase_simple_datatypes_list.index( row_values['DATA_TYPE'].upper() )
        except ValueError:
            try:
                user_datatype = catalog.userDatatypes[user_datatypes_list.index( row_values['DATA_TYPE'].upper() )]
            except (ValueError, TypeError):
                user_datatype = None
                datatype = sybase_simple_datatypes_list.index('VARCHAR')
                column.length = 255
                msg = 'Column datatype "%s" for column "%s" in table "%s.%s" reverse engineered as VARCHAR(255)' % (row_values['DATA_TYPE'].upper(), column.name, schema.name, table.name)
                grt.send_warning('Sybase reverseEngineerTableColumns', msg)
            else:
                datatype = None
                column.userType = user_datatype

        if datatype is not None:
            column.simpleType = sybase_rdbms_instance.simpleDatatypes[datatype]

        default_value = row_values['COLUMN_DEFAULT']
        
        if default_value is not None and default_value.startswith('DEFAULT '):
            column.defaultValue = default_value[8:]

        table.addColumn(column)

        # TODO: charset name

    return 0
def reverseEngineerUserDatatypes(connection, catalog):
    base_types = dict( (
            (34,'IMAGE'),
            (35,'TEXT'),
            (36,'EXTENDED TYPE'),
            (37,'TIMESTAMP'),
            (38,'INTN'),
            (39,'VARCHAR'),
            (45,'BINARY'),
            (47,'CHAR'),
            (48,'TINYINT'),
            (49,'DATE'),
            (50,'BIT'),
            (51,'TIME'),
            (52,'SMALLINT'),
            (55,'DECIMAL'),
            (56,'INT'),
            (58,'SMALLDATETIME'),
            (59,'REAL'),
            (60,'MONEY'),
            (61,'DATETIME'),
            (62,'FLOAT'),
            (63,'NUMERIC'),
            (65,'USMALLINT'),
            (66,'UINT'),
            (67,'UBIGINT'),
            (68,'UINTN'),
            (106,'DECIMALN'),
            (108,'NUMERICN'),
            (109,'FLOATN'),
            (110,'MONEYN'),
            (111,'DATETIMN'),
            (122,'SMALLMONEY'),
            (123,'DATEN'),
            (135,'UNICHAR'),
            (147,'TIMEN'),
            (155,'UNIVARCHAR'),
            (169,'TEXT_LOCATOR'),
            (170,'IMAGE_LOCATOR'),
            (171,'UNITEXT_LOCATOR'),
            (174,'UNITEXT'),
            (187,'BIGDATETIMEN'),
            (188,'BIGTIMEN'),
            (189,'BIGDATETIME'),
            (190,'BIGTIME'),
            (191,'BIGINT'),
        )  )   

    query = """SELECT name, length, prec, scale, allownulls, type
FROM systypes
WHERE accessrule != NULL"""

    execute_query(connection, 'USE %s' % quoteIdentifier(catalog.name))
    sybase_rdbms_instance = get_sybase_rdbms_instance()
    catalog.userDatatypes.remove_all()
    for name, length, precision, scale, is_nullable, base_type in execute_query(connection, query):
        datatype = grt.classes.db_sybase_UserDatatype()
        datatype.name = name.upper()
        datatype.characterMaximumLength = length
        datatype.numericPrecision = precision
        datatype.numericScale = scale
        datatype.isNullable = is_nullable
        simple_type = find_object_with_name(sybase_rdbms_instance.simpleDatatypes, base_types[base_type])
        if simple_type:
            datatype.actualType = simple_type
        else:
            grt.send_warning('Sybase reverseEngineerUserDatatypes', 'Could not found base type "%s" for user defined type "%s"' % (base_type, name) )
        catalog.userDatatypes.append(datatype)
    return 0
def reverseEngineer(connection, catalog_name, schemata_list, context):
    catalog = grt.classes.db_mysql_Catalog()
    catalog.name = catalog_name
    catalog.simpleDatatypes.remove_all()
    catalog.simpleDatatypes.extend(connection.driver.owner.simpleDatatypes)
    
    table_names_per_schema = {}
    routine_names_per_schema = {}
    trigger_names_per_schema = {}
    
    def filter_warnings(mtype, text, detail):
        # filter out parser warnings about stub creation/reuse from the message stream, since
        # they're harmless
        if mtype == "WARNING" and (" stub " in text or "Stub " in text):
            grt.send_info(text)
            return True
        return False
    
    version = getServerVersion(connection)
    
    get_tables = context.get("reverseEngineerTables", True)
    get_triggers = context.get("reverseEngineerTriggers", True) and (version.majorNumber, version.minorNumber, version.releaseNumber) >= (5, 1, 21)
    get_views = context.get("reverseEngineerViews", True)
    get_routines = context.get("reverseEngineerRoutines", True)
    
    # calculate total workload 1st
    
    # 10% of the progress is for preparation
    
    grt.send_progress(0, "Preparing...")
    total = 0
    i = 0.0
    for schema_name in schemata_list:
        check_interruption()
        if get_tables and get_views:
            table_names = getAllTableNames(connection, catalog_name, schema_name)
        elif get_tables:
            table_names = getTableNames(connection, catalog_name, schema_name)
        elif get_views:
            table_names = getViewNames(connection, catalog_name, schema_name)
        else:
            table_name = []
        total += len(table_names)
        table_names_per_schema[schema_name] = table_names
        check_interruption()
        if get_routines:
            procedure_names = getProcedureNames(connection, catalog_name, schema_name)
            check_interruption()
            function_names = getFunctionNames(connection, catalog_name, schema_name)
            check_interruption()
            total += len(procedure_names)
            total += len(function_names)
            routine_names_per_schema[schema_name] = procedure_names, function_names
        else:
            routine_names_per_schema[schema_name] = [], []
        if get_triggers:
            trigger_names = getTriggerNames(connection, catalog_name, schema_name)
            total += len(trigger_names)
        else:
            trigger_names = []
        trigger_names_per_schema[schema_name] = trigger_names
        
        grt.send_progress(0.1 * (i/len(schemata_list)), "Preparing...")
        i += 1.0

    def wrap_sql(sql, schema):
        return "USE `%s`;\n%s"%(escape_sql_identifier(schema), sql)

    def wrap_routine_sql(sql):
        return "DELIMITER $$\n"+sql

    i = 0.0
    for schema_name in schemata_list:
        schema = grt.classes.db_mysql_Schema()
        schema.owner = catalog
        schema.name = schema_name
        catalog.schemata.append(schema)
        context = grt.modules.MySQLParserServices.createParserContext(catalog.characterSets, getServerVersion(connection), getServerMode(connection), 1)
        options = {}

        if get_tables or get_views:
            grt.send_info("Reverse engineering tables from %s" % schema_name)
            for table_name in table_names_per_schema[schema_name]:
                check_interruption()
                grt.send_progress(0.1 + 0.9 * (i / total), "Retrieving table %s.%s..." % (schema_name, table_name))
                result = execute_query(connection, "SHOW CREATE TABLE `%s`.`%s`" % (escape_sql_identifier(schema_name), escape_sql_identifier(table_name)))
                i += 0.5
                grt.send_progress(0.1 + 0.9 * (i / total), "Reverse engineering %s.%s..." % (schema_name, table_name))
                if result and result.nextRow():
                    sql = result.stringByIndex(2)
                    grt.push_message_handler(filter_warnings)
                    grt.begin_progress_step(0.1 + 0.9 * (i / total), 0.1 + 0.9 * ((i+0.5) / total))
                    grt.modules.MySQLParserServices.parseSQLIntoCatalogSql(context, catalog, wrap_sql(sql, schema_name), options)
                    grt.end_progress_step()
                    grt.pop_message_handler()
                    i += 0.5
                else:
                    raise Exception("Could not fetch table information for %s.%s" % (schema_name, table_name))

        if get_triggers:
            grt.send_info("Reverse engineering triggers from %s" % schema_name)
            for trigger_name in trigger_names_per_schema[schema_name]:
                check_interruption()
                grt.send_progress(0.1 + 0.9 * (i / total), "Retrieving trigger %s.%s..." % (schema_name, trigger_name))
                result = execute_query(connection, "SHOW CREATE TRIGGER `%s`.`%s`" % (escape_sql_identifier(schema_name), escape_sql_identifier(trigger_name)))
                i += 0.5
                grt.send_progress(0.1 + 0.9 * (i / total), "Reverse engineering %s.%s..." % (schema_name, trigger_name))
                if result and result.nextRow():
                    sql = result.stringByName("SQL Original Statement")
                    grt.begin_progress_step(0.1 + 0.9 * (i / total), 0.1 + 0.9 * ((i+0.5) / total))
                    grt.modules.MySQLParserServices.parseSQLIntoCatalogSql(context, catalog, wrap_sql(wrap_routine_sql(sql), schema_name), options)
                    grt.end_progress_step()
                    i += 0.5
                else:
                    raise Exception("Could not fetch trigger information for %s.%s" % (schema_name, trigger_name))
        
        if get_routines:
            grt.send_info("Reverse engineering stored procedures from %s" % schema_name)
            procedure_names, function_names = routine_names_per_schema[schema_name]
            for name in procedure_names:
                check_interruption()
                grt.send_progress(0.1 + 0.9 * (i / total), "Retrieving stored procedure %s.%s..." % (schema_name, name))
                result = execute_query(connection, "SHOW CREATE PROCEDURE `%s`.`%s`" % (escape_sql_identifier(schema_name), escape_sql_identifier(name)))
                i += 0.5
                grt.send_progress(0.1 + 0.9 * (i / total), "Reverse engineering %s.%s..." % (schema_name, name))
                if result and result.nextRow():
                    sql = result.stringByName("Create Procedure")
                    grt.begin_progress_step(0.1 + 0.9 * (i / total), 0.1 + 0.9 * ((i+0.5) / total))
                    grt.modules.MySQLParserServices.parseSQLIntoCatalogSql(context, catalog, wrap_sql(wrap_routine_sql(sql), schema_name), options)
                    grt.end_progress_step()
                    i += 0.5
                else:
                    raise Exception("Could not fetch procedure information for %s.%s" % (schema_name, name))

            grt.send_info("Reverse engineering functions from %s" % schema_name)
            for name in function_names:
                check_interruption()
                grt.send_progress(0.1 + 0.9 * (i / total), "Retrieving function %s.%s..." % (schema_name, name))
                result = execute_query(connection, "SHOW CREATE FUNCTION `%s`.`%s`" % (escape_sql_identifier(schema_name), escape_sql_identifier(name)))
                i += 0.5
                grt.send_progress(0.1 + 0.9 * (i / total), "Reverse engineering %s.%s..." % (schema_name, name))
                if result and result.nextRow():
                    sql = result.stringByName("Create Function")
                    grt.begin_progress_step(0.1 + 0.9 * (i / total), 0.1 + 0.9 * ((i+0.5) / total))
                    grt.modules.MySQLParserServices.parseSQLIntoCatalogSql(context, catalog, wrap_sql(wrap_routine_sql(sql), schema_name), options)
                    grt.end_progress_step()
                    i += 0.5
                else:
                    raise Exception("Could not fetch function information for %s.%s" % (schema_name, name))

    grt.send_progress(1.0, "Reverse engineered %i objects" % total)
    
    # check for any stub tables left
    empty_schemas = []
    for schema in catalog.schemata:
        schema_has_stub_tables = False
        for table in reversed(schema.tables):
            if table.isStub:
                grt.send_warning("Table %s was referenced from another table, but was not reverse engineered" % table.name)
                schema.tables.remove(table)
                schema_has_stub_tables = True
        if not schema.tables and not schema.views and not schema.routines and schema_has_stub_tables:
            empty_schemas.append(schema)
    for schema in empty_schemas:
        catalog.schemata.remove(schema)

    return catalog
    def reverseEngineerTableFKs(cls, connection, table):
        """Reverse engineers the foreign keys for the given table."""

        catalog = table.owner.owner
        schema = table.owner

        query = """SELECT kcu1.COLUMN_NAME,
           rc.CONSTRAINT_NAME, kcu2.TABLE_SCHEMA, kcu2.TABLE_NAME,
           kcu2.COLUMN_NAME, rc.UPDATE_RULE, rc.DELETE_RULE
    FROM INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS rc
         JOIN INFORMATION_SCHEMA.TABLE_CONSTRAINTS AS tc
            ON rc.CONSTRAINT_NAME = tc.CONSTRAINT_NAME
         JOIN INFORMATION_SCHEMA.KEY_COLUMN_USAGE kcu1
            ON  kcu1.CONSTRAINT_CATALOG = rc.CONSTRAINT_CATALOG
            AND kcu1.CONSTRAINT_SCHEMA  = rc.CONSTRAINT_SCHEMA
            AND kcu1.CONSTRAINT_NAME    = rc.CONSTRAINT_NAME
         JOIN INFORMATION_SCHEMA.KEY_COLUMN_USAGE kcu2
            ON  kcu2.CONSTRAINT_CATALOG = rc.UNIQUE_CONSTRAINT_CATALOG
            AND kcu2.CONSTRAINT_SCHEMA  = rc.UNIQUE_CONSTRAINT_SCHEMA
            AND kcu2.CONSTRAINT_NAME    = rc.UNIQUE_CONSTRAINT_NAME
    WHERE tc.CONSTRAINT_TYPE = 'FOREIGN KEY' AND kcu1.ORDINAL_POSITION = kcu2.ORDINAL_POSITION
          AND kcu1.TABLE_CATALOG = ?
          AND kcu1.TABLE_SCHEMA = ?
          AND kcu1.TABLE_NAME = ?
    ORDER BY kcu1.CONSTRAINT_NAME, kcu1.ORDINAL_POSITION"""

        if len(table.columns) == 0:
            grt.send_error('%s reverseEngineerTableFKs: Reverse engineering of table '
                           '%s was attempted but the table has no columns attribute' % (cls.getTargetDBMSName(), table.name))
            return 1    # Table must have columns reverse engineered before we can rev eng its foreign keys

        cursor = cls.execute_query(connection, query, catalog.name, schema.name, table.name)
        current_fk = None
        table.foreignKeys.remove_all()
        for col_name, fk_name, ref_schema, ref_table, ref_col, upd_rule, del_rule in cursor:
            if not current_fk or fk_name != current_fk.name:
                if current_fk:
                    table.foreignKeys.append(current_fk)
                foreign_key = grt.classes.db_ForeignKey()
                foreign_key.name = fk_name
                foreign_key.owner = table
                foreign_key.deleteRule = del_rule.upper()
                foreign_key.updateRule = upd_rule.upper()
                foreign_key.modelOnly = 0
                referenced_schema = find_object_with_name(catalog.schemata, ref_schema)
                if not referenced_schema:
                    grt.send_warning('%s reverseEngineerTableFKs: Could not find referenced schema "%s" '
                                     'for foreign key constraint "%s"' % (cls.getTargetDBMSName(), ref_schema, fk_name))
                    continue
                referenced_table = find_object_with_name(referenced_schema.tables, ref_table)
                if not referenced_table:
                    grt.send_warning('%s reverseEngineerTableFKs: Could not find referenced table "%s.%s" '
                                     'for foreign key constraint "%s"' % (cls.getTargetDBMSName(), ref_schema, ref_table, fk_name))
                    continue
                if len(referenced_table.columns) == 0:
                    grt.send_error('%s reverseEngineerTableFKs: Reverse engineering of table '
                                   '%s was attempted but the table has no columns attribute' % (cls.getTargetDBMSName(), referenced_table.name))
                    return 1    # Table must have columns reverse engineered before we can rev eng its foreign keys

                foreign_key.referencedTable = referenced_table
                current_fk = foreign_key

            column = find_object_with_name(table.columns, col_name)
            if not column:
                grt.send_warning('%s reverseEngineerTableFKs: Could not find column "%s.%s.%s" '
                                 'for foreign key constraint "%s"' % (cls.getTargetDBMSName(), schema.name, table.name, col_name, fk_name))
                continue
            current_fk.columns.append(column)

            referenced_column = find_object_with_name(current_fk.referencedTable.columns, ref_col)
            if not referenced_column:
                grt.send_warning('%s reverseEngineerTableFKs: Could not find referenced column "%s.%s.%s" '
                                 'for foreign key constraint "%s"' % (cls.getTargetDBMSName(), ref_schema, ref_table, ref_col, fk_name))
                continue
            current_fk.referencedColumns.append(referenced_column)

        # Store the last fk:
        if current_fk:
            table.foreignKeys.append(current_fk)

        return 0
    def _merge_schemata(self, prefix=''):
        catalog = self.main.plan.migrationSource.catalog
        schema = catalog.schemata[0]
        # preserve the original name of the catalog
        schema.oldName = schema.name

        module_db = self.main.plan.migrationSource.module_db()

        # otypes is something like ['tables', 'views', 'routines']:
        otypes = [ suptype[0] for suptype in self.main.plan.migrationSource.supportedObjectTypes ]

        # Update names for the objects of this first schema:
        if prefix:
            actual_prefix = (schema.name if prefix == 'schema_name' else schema.__id__) + '_'
            for otype in otypes:
                for obj in getattr(schema, otype):
                    # this will be used later during datacopy to refer to the original object to copy from
                    obj.oldName = module_db.quoteIdentifier(schema.oldName)+"."+module_db.quoteIdentifier(obj.name)
                    oname = obj.name
                    obj.name = actual_prefix + obj.name
                    grt.send_info("Object %s was renamed to %s" % (oname, obj.name))
        else:
            for otype in otypes:
                for obj in getattr(schema, otype):
                    # this will be used later during datacopy to refer to the original object to copy from
                    obj.oldName = module_db.quoteIdentifier(schema.name)+"."+module_db.quoteIdentifier(obj.name)

        schema.name = catalog.name
        if not prefix:
            known_names = dict( (otype, set(obj.name for obj in getattr(schema, otype))) for otype in otypes)

        for other_schema in list(catalog.schemata)[1:]:
            if other_schema.defaultCharacterSetName != schema.defaultCharacterSetName:
                grt.send_warning('While merging schema %s into %s: Default charset for schemata differs (%s vs %s). Setting default charset to %s' % (other_schema.name, schema.name, other_schema.defaultCharacterSetName, schema.defaultCharacterSetName, schema.defaultCharacterSetName))
                self.main.plan.state.addMigrationLogEntry(0, other_schema, None,
                      'While merging schema %s into %s: Default charset for schemata differs (%s vs %s). Setting default charset to %s' % (other_schema.name, schema.name, other_schema.defaultCharacterSetName, schema.defaultCharacterSetName, schema.defaultCharacterSetName))

            if other_schema.defaultCollationName != schema.defaultCollationName:
                grt.send_warning('While merging schema %s into %s: Default collation for schemata differs (%s vs %s). Setting default collation to %s' % (other_schema.name, schema.name, other_schema.defaultCollationName, schema.defaultCollationName, schema.defaultCollationName))
                self.main.plan.state.addMigrationLogEntry(0, other_schema, None,
                      'While merging schema %s into %s: Default collation for schemata differs (%s vs %s). Setting default collation to %s' % (other_schema.name, schema.name, other_schema.defaultCollationName, schema.defaultCollationName, schema.defaultCollationName))

            for otype in otypes:
                other_objects = getattr(other_schema, otype)
                if not prefix:
                    repeated_object_names = known_names[otype].intersection(obj.name for obj in other_objects)
                    if repeated_object_names:
                        objects_dict = dict( (obj.name, obj) for obj in other_objects )
                        for repeated_object_name in repeated_object_names:
                            objects_dict[repeated_object_name].name += '_' + other_schema.name
                            grt.send_warning('The name of the %(otype)s "%(oname)s" conflicts with other %(otype)s names: renamed to "%(onewname)s"' % { 'otype':otype[:-1],
                                                                  'oname':repeated_object_name,
                                                                  'onewname':objects_dict[repeated_object_name].name })
        
                            self.main.plan.state.addMigrationLogEntry(0, other_schema, None,
                                  'The name of the %(otype)s "%(oname)s" conflicts with other %(otype)s names: renamed to "%(onewname)s"' % { 'otype':otype[:-1],
                                                                                                                                              'oname':repeated_object_name,
                                                                                                                                              'onewname':objects_dict[repeated_object_name].name }
                                                                      )
                        known_names[otype].update(other_objects)
                else:
                    actual_prefix = (other_schema.name if prefix == 'schema_name' else schema.__id__) + '_'

                getattr(schema, otype).extend(other_objects)
                for obj in other_objects:
                    # this will be used later during datacopy to refer to the original object to copy from
                    obj.oldName = module_db.quoteIdentifier(obj.owner.name)+"."+module_db.quoteIdentifier(obj.name)
                    
                    obj.owner = schema
                    if prefix:
                        oname = obj.name
                        obj.name = actual_prefix + obj.name
                        grt.send_info("Object %s was renamed to %s" % (oname, obj.name))

        # Keep only the merged schema:
        catalog.schemata.remove_all()
        catalog.schemata.append(schema)
Beispiel #29
0
    def _merge_schemata(self, prefix=''):
        catalog = self.main.plan.migrationSource.catalog
        schema = catalog.schemata[0]
        # preserve the original name of the catalog
        schema.oldName = schema.name

        module_db = self.main.plan.migrationSource.module_db()

        # otypes is something like ['tables', 'views', 'routines']:
        otypes = [
            suptype[0]
            for suptype in self.main.plan.migrationSource.supportedObjectTypes
        ]

        # Update names for the objects of this first schema:
        if prefix:
            actual_prefix = (schema.name if prefix == 'schema_name' else
                             schema.__id__) + '_'
            for otype in otypes:
                for obj in getattr(schema, otype):
                    # this will be used later during datacopy to refer to the original object to copy from
                    obj.oldName = module_db.quoteIdentifier(
                        schema.oldName) + "." + module_db.quoteIdentifier(
                            obj.name)
                    oname = obj.name
                    obj.name = actual_prefix + obj.name
                    grt.send_info("Object %s was renamed to %s" %
                                  (oname, obj.name))
        else:
            for otype in otypes:
                for obj in getattr(schema, otype):
                    # this will be used later during datacopy to refer to the original object to copy from
                    obj.oldName = module_db.quoteIdentifier(
                        schema.name) + "." + module_db.quoteIdentifier(
                            obj.name)

        schema.name = catalog.name
        if not prefix:
            known_names = dict(
                (otype, set(obj.name for obj in getattr(schema, otype)))
                for otype in otypes)

        for other_schema in list(catalog.schemata)[1:]:
            if other_schema.defaultCharacterSetName != schema.defaultCharacterSetName:
                grt.send_warning(
                    'While merging schema %s into %s: Default charset for schemas differs (%s vs %s). Setting default charset to %s'
                    % (other_schema.name, schema.name,
                       other_schema.defaultCharacterSetName,
                       schema.defaultCharacterSetName,
                       schema.defaultCharacterSetName))
                self.main.plan.state.addMigrationLogEntry(
                    0, other_schema, None,
                    'While merging schema %s into %s: Default charset for schemas differs (%s vs %s). Setting default charset to %s'
                    % (other_schema.name, schema.name,
                       other_schema.defaultCharacterSetName,
                       schema.defaultCharacterSetName,
                       schema.defaultCharacterSetName))

            if other_schema.defaultCollationName != schema.defaultCollationName:
                grt.send_warning(
                    'While merging schema %s into %s: Default collation for schemas differs (%s vs %s). Setting default collation to %s'
                    %
                    (other_schema.name, schema.name,
                     other_schema.defaultCollationName,
                     schema.defaultCollationName, schema.defaultCollationName))
                self.main.plan.state.addMigrationLogEntry(
                    0, other_schema, None,
                    'While merging schema %s into %s: Default collation for schemas differs (%s vs %s). Setting default collation to %s'
                    %
                    (other_schema.name, schema.name,
                     other_schema.defaultCollationName,
                     schema.defaultCollationName, schema.defaultCollationName))

            for otype in otypes:
                other_objects = getattr(other_schema, otype)
                if not prefix:
                    repeated_object_names = known_names[otype].intersection(
                        obj.name for obj in other_objects)
                    if repeated_object_names:
                        objects_dict = dict(
                            (obj.name, obj) for obj in other_objects)
                        for repeated_object_name in repeated_object_names:
                            objects_dict[
                                repeated_object_name].name += '_' + other_schema.name
                            grt.send_warning(
                                'The name of the %(otype)s "%(oname)s" conflicts with other %(otype)s names: renamed to "%(onewname)s"'
                                % {
                                    'otype':
                                    otype[:-1],
                                    'oname':
                                    repeated_object_name,
                                    'onewname':
                                    objects_dict[repeated_object_name].name
                                })

                            self.main.plan.state.addMigrationLogEntry(
                                0, other_schema, None,
                                'The name of the %(otype)s "%(oname)s" conflicts with other %(otype)s names: renamed to "%(onewname)s"'
                                % {
                                    'otype':
                                    otype[:-1],
                                    'oname':
                                    repeated_object_name,
                                    'onewname':
                                    objects_dict[repeated_object_name].name
                                })
                        known_names[otype].update(other_objects)
                else:
                    actual_prefix = (other_schema.name if prefix
                                     == 'schema_name' else schema.__id__) + '_'

                getattr(schema, otype).extend(other_objects)
                for obj in other_objects:
                    # this will be used later during datacopy to refer to the original object to copy from
                    obj.oldName = module_db.quoteIdentifier(
                        obj.owner.name) + "." + module_db.quoteIdentifier(
                            obj.name)

                    obj.owner = schema
                    if prefix:
                        oname = obj.name
                        obj.name = actual_prefix + obj.name
                        grt.send_info("Object %s was renamed to %s" %
                                      (oname, obj.name))

        # Keep only the merged schema:
        catalog.schemata.remove_all()
        catalog.schemata.append(schema)
    def reverseEngineer(cls, connection, catalog_name, schemata_list, context):
        grt.send_progress(0, "Reverse engineering catalog information")
        cls.check_interruption()
        catalog = cls.reverseEngineerCatalog(connection, catalog_name)

        # calculate total workload 1st
        grt.send_progress(0.1, 'Preparing...')
        table_count_per_schema = {}
        view_count_per_schema = {}
        routine_count_per_schema = {}
        trigger_count_per_schema = {}
        total_count_per_schema = {}

        get_tables = context.get("reverseEngineerTables", True)
        get_triggers = context.get("reverseEngineerTriggers", True)
        get_views = context.get("reverseEngineerViews", True)
        get_routines = context.get("reverseEngineerRoutines", True)

        # 10% of the progress is for preparation
        total = 1e-10  # total should not be zero to avoid DivisionByZero exceptions
        i = 0.0
        accumulated_progress = 0.1
        for schema_name in schemata_list:
            cls.check_interruption()
            table_count_per_schema[schema_name] = len(cls.getTableNames(connection, catalog_name, schema_name)) if get_tables else 0
            view_count_per_schema[schema_name] = len(cls.getViewNames(connection, catalog_name, schema_name)) if get_views else 0
            cls.check_interruption()
            routine_count_per_schema[schema_name] = len(cls.getProcedureNames(connection, catalog_name, schema_name)) + len(cls.getFunctionNames(connection, catalog_name, schema_name)) if get_routines else 0
            trigger_count_per_schema[schema_name] = len(cls.getTriggerNames(connection, catalog_name, schema_name)) if get_triggers else 0

            total_count_per_schema[schema_name] = (table_count_per_schema[schema_name] + view_count_per_schema[schema_name] +
                                                   routine_count_per_schema[schema_name] + trigger_count_per_schema[schema_name] + 1e-10)
            total += total_count_per_schema[schema_name]

            grt.send_progress(accumulated_progress + 0.1 * (i / (len(schemata_list) + 1e-10) ), "Gathered stats for %s" % schema_name)
            i += 1.0

        # Now take 60% in the first pass of reverse engineering:
        accumulated_progress = 0.2
        for schema_name in schemata_list:
            schema_progress_share = 0.6 * (total_count_per_schema.get(schema_name, 0.0) / total)
            schema = find_object_with_name(catalog.schemata, schema_name) 

            if schema:
                # Reverse engineer tables:
                step_progress_share = schema_progress_share * (table_count_per_schema[schema_name] / (total_count_per_schema[schema_name] + 1e-10))
                if get_tables:
                    cls.check_interruption()
                    grt.send_info('Reverse engineering tables from %s' % schema_name)
                    grt.begin_progress_step(accumulated_progress, accumulated_progress + step_progress_share)
                    # Remove previous first pass marks that may exist if the user goes back and attempt rev eng again:
                    progress_flags = cls._connections[connection.__id__].setdefault('_rev_eng_progress_flags', set())
                    progress_flags.discard('%s_tables_first_pass' % schema_name)
                    cls.reverseEngineerTables(connection, schema)
                    grt.end_progress_step()
        
                accumulated_progress += step_progress_share
                grt.send_progress(accumulated_progress, 'First pass of table reverse engineering for schema %s completed!' % schema_name)
        
                # Reverse engineer views:
                step_progress_share = schema_progress_share * (view_count_per_schema[schema_name] / (total_count_per_schema[schema_name] + 1e-10))
                if get_views:
                    cls.check_interruption()
                    grt.send_info('Reverse engineering views from %s' % schema_name)
                    grt.begin_progress_step(accumulated_progress, accumulated_progress + step_progress_share)
                    cls.reverseEngineerViews(connection, schema)
                    grt.end_progress_step()
        
                accumulated_progress += step_progress_share
                grt.send_progress(accumulated_progress, 'Reverse engineering of views for schema %s completed!' % schema_name)
        
                # Reverse engineer routines:
                step_progress_share = schema_progress_share * (routine_count_per_schema[schema_name] / (total_count_per_schema[schema_name] + 1e-10))
                if get_routines:
                    cls.check_interruption()
                    grt.send_info('Reverse engineering routines from %s' % schema_name)
                    grt.begin_progress_step(accumulated_progress, accumulated_progress + step_progress_share)
                    grt.begin_progress_step(0.0, 0.5)
                    cls.reverseEngineerProcedures(connection, schema)
                    cls.check_interruption()
                    grt.end_progress_step()
                    grt.begin_progress_step(0.5, 1.0)
                    reverseEngineerFunctions(connection, schema)
                    grt.end_progress_step()
                    grt.end_progress_step()
        
                accumulated_progress += step_progress_share
                grt.send_progress(accumulated_progress, 'Reverse engineering of routines for schema %s completed!' % schema_name)
        
                # Reverse engineer triggers:
                step_progress_share = schema_progress_share * (trigger_count_per_schema[schema_name] / (total_count_per_schema[schema_name] + 1e-10))
                if get_triggers:
                    cls.check_interruption()
                    grt.send_info('Reverse engineering triggers from %s' % schema_name)
                    grt.begin_progress_step(accumulated_progress, accumulated_progress + step_progress_share)
                    cls.reverseEngineerTriggers(connection, schema)
                    grt.end_progress_step()
        
                accumulated_progress = 0.8
                grt.send_progress(accumulated_progress, 'Reverse engineering of triggers for schema %s completed!' % schema_name)
            else:  # No schema with the given name was found
                grt.send_warning('The schema %s was not found in the catalog %s. Skipping it.' % (schema_name, catalog_name) )
                
        # Now the second pass for reverse engineering tables:
        if get_tables:
            total_tables = sum(table_count_per_schema[schema.name] for schema in catalog.schemata if schema.name in schemata_list)
            for schema in catalog.schemata:
                if schema.name not in schemata_list:
                    continue
                cls.check_interruption()
                step_progress_share = 0.2 * (table_count_per_schema[schema.name] / (total_tables + 1e-10))
                grt.send_info('Reverse engineering foreign keys for tables in schema %s' % schema.name)
                grt.begin_progress_step(accumulated_progress, accumulated_progress + step_progress_share)
                cls.reverseEngineerTables(connection, schema)
                grt.end_progress_step()
        
                accumulated_progress += step_progress_share
                grt.send_progress(accumulated_progress, 'Second pass of table reverse engineering for schema %s completed!' % schema_name)
            

        grt.send_progress(1.0, 'Reverse engineering completed!')
        return catalog
def reverseEngineerTableColumns(connection, table):
    schema = table.owner
    catalog = schema.owner
    execute_query(connection, 'USE %s' % quoteIdentifier(catalog.name))
    query = """SELECT  ISNULL(C.name, '') AS COLUMN_NAME, T.name AS DATA_TYPE,
        C.length AS CHARACTER_MAXIMUM_LENGTH, C.prec AS NUMERIC_PRECISION,
        C.scale AS NUMERIC_SCALE, CONVERT(BIT, (C.status & 0x08)) AS IS_NULLABLE,
        CONVERT(BIT, (C.status & 0x80)) AS IS_IDENTITY_COLUMN, K.text AS COLUMN_DEFAULT
        FROM syscolumns C, systypes T, sysobjects A, syscomments K
        WHERE USER_NAME(A.uid) = ? AND
        A.id = C.id AND C.id = OBJECT_ID(?) AND
        C.usertype *= T.usertype AND
        C.cdefault *= K.id
        ORDER BY C.colid"""

    rows = execute_query(connection, query, schema.name, table.name)

    sybase_rdbms_instance = get_sybase_rdbms_instance()
    sybase_simple_datatypes_list = [
        datatype.name for datatype in sybase_rdbms_instance.simpleDatatypes
    ]
    user_datatypes_list = [datatype.name for datatype in catalog.userDatatypes]

    col_names = [col_description[0] for col_description in rows.description]
    for row in rows:
        row_values = dict(nameval for nameval in zip(col_names, row))
        column = grt.classes.db_sybase_Column()
        column.name = row_values['COLUMN_NAME'] or ''
        column.isNotNull = not row_values['IS_NULLABLE']
        column.collationName = row_values.get(
            'COLLATION_NAME',
            '')  # TODO: find a way to get the column's collation
        column.length = row_values['CHARACTER_MAXIMUM_LENGTH'] or 0
        column.precision = row_values['NUMERIC_PRECISION'] if row_values[
            'NUMERIC_PRECISION'] is not None else -1
        column.scale = row_values['NUMERIC_SCALE'] if row_values[
            'NUMERIC_SCALE'] is not None else -1
        column.comment = row_values.get(
            'COLUMN_COMMENT',
            '')  # TODO: find a way to get the column's comments
        column.identity = row_values['IS_IDENTITY_COLUMN'] or 0
        user_datatype = None
        try:
            datatype = sybase_simple_datatypes_list.index(
                row_values['DATA_TYPE'].upper())
        except ValueError:
            try:
                user_datatype = catalog.userDatatypes[
                    user_datatypes_list.index(row_values['DATA_TYPE'].upper())]
            except (ValueError, TypeError):
                user_datatype = None
                datatype = sybase_simple_datatypes_list.index('VARCHAR')
                column.length = 255
                msg = 'Column datatype "%s" for column "%s" in table "%s.%s" reverse engineered as VARCHAR(255)' % (
                    row_values['DATA_TYPE'].upper(), column.name, schema.name,
                    table.name)
                grt.send_warning('Sybase reverseEngineerTableColumns', msg)
            else:
                datatype = None
                column.userType = user_datatype

        if datatype is not None:
            column.simpleType = sybase_rdbms_instance.simpleDatatypes[datatype]

        default_value = row_values['COLUMN_DEFAULT']

        if default_value is not None and default_value.startswith('DEFAULT '):
            column.defaultValue = default_value[8:]

        table.addColumn(column)

        # TODO: charset name

    return 0
Beispiel #32
0
def reverseEngineer(connection, catalog_name, schemata_list, context):
    catalog = grt.classes.db_mysql_Catalog()
    catalog.name = catalog_name
    catalog.simpleDatatypes.remove_all()
    catalog.simpleDatatypes.extend(connection.driver.owner.simpleDatatypes)
    
    table_names_per_schema = {}
    routine_names_per_schema = {}
    trigger_names_per_schema = {}
    
    def filter_warnings(mtype, text, detail):
        # filter out parser warnings about stub creation/reuse from the message stream, since
        # they're harmless
        if mtype == "WARNING" and (" stub " in text or "Stub " in text):
            grt.send_info(text)
            return True
        return False
    
    version = getServerVersion(connection)
    
    get_tables = context.get("reverseEngineerTables", True)
    get_triggers = context.get("reverseEngineerTriggers", True) and (version.majorNumber, version.minorNumber, version.releaseNumber) >= (5, 1, 21)
    get_views = context.get("reverseEngineerViews", True)
    get_routines = context.get("reverseEngineerRoutines", True)
    
    # calculate total workload 1st
    
    # 10% of the progress is for preparation
    
    grt.send_progress(0, "Preparing...")
    total = 0
    i = 0.0
    for schema_name in schemata_list:
        check_interruption()
        if get_tables and get_views:
            table_names = getAllTableNames(connection, catalog_name, schema_name)
        elif get_tables:
            table_names = getTableNames(connection, catalog_name, schema_name)
        elif get_views:
            table_names = getViewNames(connection, catalog_name, schema_name)
        else:
            table_name = []
        total += len(table_names)
        table_names_per_schema[schema_name] = table_names
        check_interruption()
        if get_routines:
            procedure_names = getProcedureNames(connection, catalog_name, schema_name)
            check_interruption()
            function_names = getFunctionNames(connection, catalog_name, schema_name)
            check_interruption()
            total += len(procedure_names)
            total += len(function_names)
            routine_names_per_schema[schema_name] = procedure_names, function_names
        else:
            routine_names_per_schema[schema_name] = [], []
        if get_triggers:
            trigger_names = getTriggerNames(connection, catalog_name, schema_name)
            total += len(trigger_names)
        else:
            trigger_names = []
        trigger_names_per_schema[schema_name] = trigger_names
        
        grt.send_progress(0.1 * (i/len(schemata_list)), "Preparing...")
        i += 1.0

    def wrap_sql(sql, schema):
        return "USE `%s`;\n%s"%(escape_sql_identifier(schema), sql)

    def wrap_routine_sql(sql):
        return "DELIMITER $$\n"+sql

    i = 0.0
    for schema_name in schemata_list:
        schema = grt.classes.db_mysql_Schema()
        schema.owner = catalog
        schema.name = schema_name
        catalog.schemata.append(schema)

        if get_tables or get_views:
            grt.send_info("Reverse engineering tables from %s" % schema_name)
            for table_name in table_names_per_schema[schema_name]:
                check_interruption()
                grt.send_progress(0.1 + 0.9 * (i / total), "Retrieving table %s.%s..." % (schema_name, table_name))
                result = execute_query(connection, "SHOW CREATE TABLE `%s`.`%s`" % (escape_sql_identifier(schema_name), escape_sql_identifier(table_name)))
                i += 0.5
                grt.send_progress(0.1 + 0.9 * (i / total), "Reverse engineering %s.%s..." % (schema_name, table_name))
                if result and result.nextRow():
                    sql = result.stringByIndex(2)
                    grt.push_message_handler(filter_warnings)
                    grt.begin_progress_step(0.1 + 0.9 * (i / total), 0.1 + 0.9 * ((i+0.5) / total))
                    grt.modules.MysqlSqlFacade.parseSqlScriptString(catalog, wrap_sql(sql, schema_name))
                    grt.end_progress_step()
                    grt.pop_message_handler()
                    i += 0.5
                else:
                    raise Exception("Could not fetch table information for %s.%s" % (schema_name, table_name))

        if get_triggers:
            grt.send_info("Reverse engineering triggers from %s" % schema_name)
            for trigger_name in trigger_names_per_schema[schema_name]:
                check_interruption()
                grt.send_progress(0.1 + 0.9 * (i / total), "Retrieving trigger %s.%s..." % (schema_name, trigger_name))
                result = execute_query(connection, "SHOW CREATE TRIGGER `%s`.`%s`" % (escape_sql_identifier(schema_name), escape_sql_identifier(trigger_name)))
                i += 0.5
                grt.send_progress(0.1 + 0.9 * (i / total), "Reverse engineering %s.%s..." % (schema_name, trigger_name))
                if result and result.nextRow():
                    sql = result.stringByName("SQL Original Statement")
                    grt.begin_progress_step(0.1 + 0.9 * (i / total), 0.1 + 0.9 * ((i+0.5) / total))
                    grt.modules.MysqlSqlFacade.parseSqlScriptString(catalog, wrap_sql(wrap_routine_sql(sql), schema_name))
                    grt.end_progress_step()
                    i += 0.5
                else:
                    raise Exception("Could not fetch trigger information for %s.%s" % (schema_name, trigger_name))
        
        if get_routines:
            grt.send_info("Reverse engineering stored procedures from %s" % schema_name)
            procedure_names, function_names = routine_names_per_schema[schema_name]
            for name in procedure_names:
                check_interruption()
                grt.send_progress(0.1 + 0.9 * (i / total), "Retrieving stored procedure %s.%s..." % (schema_name, name))
                result = execute_query(connection, "SHOW CREATE PROCEDURE `%s`.`%s`" % (escape_sql_identifier(schema_name), escape_sql_identifier(name)))
                i += 0.5
                grt.send_progress(0.1 + 0.9 * (i / total), "Reverse engineering %s.%s..." % (schema_name, name))
                if result and result.nextRow():
                    sql = result.stringByName("Create Procedure")
                    grt.begin_progress_step(0.1 + 0.9 * (i / total), 0.1 + 0.9 * ((i+0.5) / total))
                    grt.modules.MysqlSqlFacade.parseSqlScriptString(catalog, wrap_sql(wrap_routine_sql(sql), schema_name))
                    grt.end_progress_step()
                    i += 0.5
                else:
                    raise Exception("Could not fetch procedure information for %s.%s" % (schema_name, name))

            grt.send_info("Reverse engineering functions from %s" % schema_name)
            for name in function_names:
                check_interruption()
                grt.send_progress(0.1 + 0.9 * (i / total), "Retrieving function %s.%s..." % (schema_name, name))
                result = execute_query(connection, "SHOW CREATE FUNCTION `%s`.`%s`" % (escape_sql_identifier(schema_name), escape_sql_identifier(name)))
                i += 0.5
                grt.send_progress(0.1 + 0.9 * (i / total), "Reverse engineering %s.%s..." % (schema_name, name))
                if result and result.nextRow():
                    sql = result.stringByName("Create Function")
                    grt.begin_progress_step(0.1 + 0.9 * (i / total), 0.1 + 0.9 * ((i+0.5) / total))
                    grt.modules.MysqlSqlFacade.parseSqlScriptString(catalog, wrap_sql(wrap_routine_sql(sql), schema_name))
                    grt.end_progress_step()
                    i += 0.5
                else:
                    raise Exception("Could not fetch function information for %s.%s" % (schema_name, name))

    grt.send_progress(1.0, "Reverse engineered %i objects" % total)
    
    # check for any stub tables left
    empty_schemas = []
    for schema in catalog.schemata:
        schema_has_stub_tables = False
        for table in reversed(schema.tables):
            if table.isStub:
                grt.send_warning("Table %s was referenced from another table, but was not reverse engineered" % table.name)
                schema.tables.remove(table)
                schema_has_stub_tables = True
        if not schema.tables and not schema.views and not schema.routines and schema_has_stub_tables:
            empty_schemas.append(schema)
    for schema in empty_schemas:
        catalog.schemata.remove(schema)

    return catalog
    def reverseEngineer(cls, connection, catalog_name, schemata_list, context):
        grt.send_progress(0, "Reverse engineering catalog information")
        cls.check_interruption()
        catalog = cls.reverseEngineerCatalog(connection, catalog_name)

        # calculate total workload 1st
        grt.send_progress(0.1, 'Preparing...')
        table_count_per_schema = {}
        view_count_per_schema = {}
        routine_count_per_schema = {}
        trigger_count_per_schema = {}
        total_count_per_schema = {}

        get_tables = context.get("reverseEngineerTables", True)
        get_triggers = context.get("reverseEngineerTriggers", True)
        get_views = context.get("reverseEngineerViews", True)
        get_routines = context.get("reverseEngineerRoutines", True)

        # 10% of the progress is for preparation
        total = 1e-10  # total should not be zero to avoid DivisionByZero exceptions
        i = 0.0
        accumulated_progress = 0.1
        for schema_name in schemata_list:
            cls.check_interruption()
            table_count_per_schema[schema_name] = len(cls.getTableNames(connection, catalog_name, schema_name)) if get_tables else 0
            view_count_per_schema[schema_name] = len(cls.getViewNames(connection, catalog_name, schema_name)) if get_views else 0
            cls.check_interruption()
            routine_count_per_schema[schema_name] = len(cls.getProcedureNames(connection, catalog_name, schema_name)) + len(cls.getFunctionNames(connection, catalog_name, schema_name)) if get_routines else 0
            trigger_count_per_schema[schema_name] = len(cls.getTriggerNames(connection, catalog_name, schema_name)) if get_triggers else 0

            total_count_per_schema[schema_name] = (table_count_per_schema[schema_name] + view_count_per_schema[schema_name] +
                                                   routine_count_per_schema[schema_name] + trigger_count_per_schema[schema_name] + 1e-10)
            total += total_count_per_schema[schema_name]

            grt.send_progress(accumulated_progress + 0.1 * (i / (len(schemata_list) + 1e-10) ), "Gathered stats for %s" % schema_name)
            i += 1.0

        # Now take 60% in the first pass of reverse engineering:
        accumulated_progress = 0.2
        for schema_name in schemata_list:
            schema_progress_share = 0.6 * (total_count_per_schema.get(schema_name, 0.0) / total)
            schema = find_object_with_name(catalog.schemata, schema_name) 

            if schema:
                # Reverse engineer tables:
                step_progress_share = schema_progress_share * (table_count_per_schema[schema_name] / (total_count_per_schema[schema_name] + 1e-10))
                if get_tables:
                    cls.check_interruption()
                    grt.send_info('Reverse engineering tables from %s' % schema_name)
                    grt.begin_progress_step(accumulated_progress, accumulated_progress + step_progress_share)
                    # Remove previous first pass marks that may exist if the user goes back and attempt rev eng again:
                    progress_flags = cls._connections[connection.__id__].setdefault('_rev_eng_progress_flags', set())
                    progress_flags.discard('%s_tables_first_pass' % schema_name)
                    cls.reverseEngineerTables(connection, schema)
                    grt.end_progress_step()
        
                accumulated_progress += step_progress_share
                grt.send_progress(accumulated_progress, 'First pass of table reverse engineering for schema %s completed!' % schema_name)
        
                # Reverse engineer views:
                step_progress_share = schema_progress_share * (view_count_per_schema[schema_name] / (total_count_per_schema[schema_name] + 1e-10))
                if get_views:
                    cls.check_interruption()
                    grt.send_info('Reverse engineering views from %s' % schema_name)
                    grt.begin_progress_step(accumulated_progress, accumulated_progress + step_progress_share)
                    cls.reverseEngineerViews(connection, schema)
                    grt.end_progress_step()
        
                accumulated_progress += step_progress_share
                grt.send_progress(accumulated_progress, 'Reverse engineering of views for schema %s completed!' % schema_name)
        
                # Reverse engineer routines:
                step_progress_share = schema_progress_share * (routine_count_per_schema[schema_name] / (total_count_per_schema[schema_name] + 1e-10))
                if get_routines:
                    cls.check_interruption()
                    grt.send_info('Reverse engineering routines from %s' % schema_name)
                    grt.begin_progress_step(accumulated_progress, accumulated_progress + step_progress_share)
                    grt.begin_progress_step(0.0, 0.5)
                    cls.reverseEngineerProcedures(connection, schema)
                    cls.check_interruption()
                    grt.end_progress_step()
                    grt.begin_progress_step(0.5, 1.0)
                    reverseEngineerFunctions(connection, schema)
                    grt.end_progress_step()
                    grt.end_progress_step()
        
                accumulated_progress += step_progress_share
                grt.send_progress(accumulated_progress, 'Reverse engineering of routines for schema %s completed!' % schema_name)
        
                # Reverse engineer triggers:
                step_progress_share = schema_progress_share * (trigger_count_per_schema[schema_name] / (total_count_per_schema[schema_name] + 1e-10))
                if get_triggers:
                    cls.check_interruption()
                    grt.send_info('Reverse engineering triggers from %s' % schema_name)
                    grt.begin_progress_step(accumulated_progress, accumulated_progress + step_progress_share)
                    cls.reverseEngineerTriggers(connection, schema)
                    grt.end_progress_step()
        
                accumulated_progress = 0.8
                grt.send_progress(accumulated_progress, 'Reverse engineering of triggers for schema %s completed!' % schema_name)
            else:  # No schema with the given name was found
                grt.send_warning('The schema %s was not found in the catalog %s. Skipping it.' % (schema_name, catalog_name) )
                
        # Now the second pass for reverse engineering tables:
        if get_tables:
            total_tables = sum(table_count_per_schema[schema.name] for schema in catalog.schemata if schema.name in schemata_list)
            for schema in catalog.schemata:
                if schema.name not in schemata_list:
                    continue
                cls.check_interruption()
                step_progress_share = 0.2 * (table_count_per_schema[schema.name] / (total_tables + 1e-10))
                grt.send_info('Reverse engineering foreign keys for tables in schema %s' % schema.name)
                grt.begin_progress_step(accumulated_progress, accumulated_progress + step_progress_share)
                cls.reverseEngineerTables(connection, schema)
                grt.end_progress_step()
        
                accumulated_progress += step_progress_share
                grt.send_progress(accumulated_progress, 'Second pass of table reverse engineering for schema %s completed!' % schema_name)
            

        grt.send_progress(1.0, 'Reverse engineering completed!')
        return catalog
def reverseEngineerUserDatatypes(connection, catalog):
    base_types = dict((
        (34, 'IMAGE'),
        (35, 'TEXT'),
        (36, 'EXTENDED TYPE'),
        (37, 'TIMESTAMP'),
        (38, 'INTN'),
        (39, 'VARCHAR'),
        (45, 'BINARY'),
        (47, 'CHAR'),
        (48, 'TINYINT'),
        (49, 'DATE'),
        (50, 'BIT'),
        (51, 'TIME'),
        (52, 'SMALLINT'),
        (55, 'DECIMAL'),
        (56, 'INT'),
        (58, 'SMALLDATETIME'),
        (59, 'REAL'),
        (60, 'MONEY'),
        (61, 'DATETIME'),
        (62, 'FLOAT'),
        (63, 'NUMERIC'),
        (65, 'USMALLINT'),
        (66, 'UINT'),
        (67, 'UBIGINT'),
        (68, 'UINTN'),
        (106, 'DECIMALN'),
        (108, 'NUMERICN'),
        (109, 'FLOATN'),
        (110, 'MONEYN'),
        (111, 'DATETIMN'),
        (122, 'SMALLMONEY'),
        (123, 'DATEN'),
        (135, 'UNICHAR'),
        (147, 'TIMEN'),
        (155, 'UNIVARCHAR'),
        (169, 'TEXT_LOCATOR'),
        (170, 'IMAGE_LOCATOR'),
        (171, 'UNITEXT_LOCATOR'),
        (174, 'UNITEXT'),
        (187, 'BIGDATETIMEN'),
        (188, 'BIGTIMEN'),
        (189, 'BIGDATETIME'),
        (190, 'BIGTIME'),
        (191, 'BIGINT'),
    ))

    query = """SELECT name, length, prec, scale, allownulls, type
FROM systypes
WHERE accessrule != NULL"""

    execute_query(connection, 'USE %s' % quoteIdentifier(catalog.name))
    sybase_rdbms_instance = get_sybase_rdbms_instance()
    catalog.userDatatypes.remove_all()
    for name, length, precision, scale, is_nullable, base_type in execute_query(
            connection, query):
        datatype = grt.classes.db_sybase_UserDatatype()
        datatype.name = name.upper()
        datatype.characterMaximumLength = length
        datatype.numericPrecision = precision
        datatype.numericScale = scale
        datatype.isNullable = is_nullable
        simple_type = find_object_with_name(
            sybase_rdbms_instance.simpleDatatypes, base_types[base_type])
        if simple_type:
            datatype.actualType = simple_type
        else:
            grt.send_warning(
                'Sybase reverseEngineerUserDatatypes',
                'Could not found base type "%s" for user defined type "%s"' %
                (base_type, name))
        catalog.userDatatypes.append(datatype)
    return 0