def task_test_target(self):
     grt.send_progress(-1, "Connecting to target...")
     attempt = 0
     if self.main.plan.migrationTarget.connection.hostIdentifier == self.main.plan.migrationSource.connection.hostIdentifier:
         if self.main.plan.migrationTarget.connection.parameterValues[
                 'userName'] == self.main.plan.migrationSource.connection.parameterValues[
                     'userName']:
             self.main.plan.migrationTarget.password = self.main.plan.migrationSource.password
     force_password = False
     while True:
         try:
             if not self.main.plan.migrationTarget.checkConnection():
                 raise Exception("Could not connect to target RDBMS")
             if self.main.plan.migrationTarget.password is None:  # no password and succeeded, assume blank
                 self.main.plan.migrationTarget.password = ""
             self.main.plan.migrationTarget.checkVersion()
             break
         except (DBLoginError, SystemError), e:
             if attempt > 0:
                 if isinstance(e, DBLoginError) and not force_password:
                     force_password = True
                 else:
                     raise e
             attempt += 1
             self.main.plan.migrationTarget.password = request_password(
                 self.main.plan.migrationTarget.connection)
 def task_connect(self):
     grt.send_progress(-1, "Connecting to source...")
     force_password = False
     attempt = 0
     while True:
         try:
             if not self.main.plan.migrationSource.connect():
                 raise Exception("Could not connect to source RDBMS")
             if self.main.plan.migrationSource.password is None: # no password and succeeded, assume blank
                 self.main.plan.migrationSource.password = ""
             self.main.plan.migrationSource.checkVersion()
             break
         except (DBLoginError, SystemError), e:
             if attempt == 0:
                 if "[Driver Manager]" in e.message and "image not found" in e.message:
                     show_missing_driver_error(e)
                     return
             if attempt > 0:
                 if isinstance(e, DBLoginError) and not force_password:
                     force_password = True
                 else:
                     #if mforms.Utilities.show_error("Connect to Source RDBMS", str(e), "Retry", "Cancel", "") != mforms.ResultOk:
                     raise e
             attempt += 1
             self.main.plan.migrationSource.password = request_password(self.main.plan.migrationSource.connection, force_password)
 def task_connect(self):
     grt.send_progress(-1, "Connecting to source...")
     force_password = False
     attempt = 0
     while True:
         try:
             if not self.main.plan.migrationSource.connect():
                 raise Exception("Could not connect to source RDBMS")
             if self.main.plan.migrationSource.password is None: # no password and succeeded, assume blank
                 self.main.plan.migrationSource.password = ""
             self.main.plan.migrationSource.checkVersion()
             break
         except (DBLoginError, SystemError), e:
             if attempt == 0:
                 if "[Driver Manager]" in e.message and "image not found" in e.message:
                     show_missing_driver_error(e)
                     return
             if attempt > 0:
                 if isinstance(e, DBLoginError) and not force_password:
                     force_password = True
                 else:
                     #if mforms.Utilities.show_error("Connect to Source RDBMS", str(e), "Retry", "Cancel", "") != mforms.ResultOk:
                     raise e
             attempt += 1
             self.main.plan.migrationSource.password = request_password(self.main.plan.migrationSource.connection, force_password)
示例#4
0
    def reverseEngineerTables(cls, connection, schema):
        # Since there are some reverse engineering stages that requires all table names and table columns
        # in the database to be set, these should be done after a first pass that rev engs their requirements
        progress_flags = cls._connections[connection.__id__].setdefault('_rev_eng_progress_flags', [])
        is_first_pass = not ('%s_tables_first_pass' % schema.name) in progress_flags

        if is_first_pass:
            catalog = schema.owner
            schema.tables.remove_all()
            table_names = cls.getTableNames(connection, catalog.name, schema.name)
            total = len(table_names) + 1e-10
            i = 0.0
            for table_name in table_names:
                grt.send_progress(i / total, 'Retrieving table %s.%s...' % (schema.name, table_name))
                table = grt.classes.db_Table()
                table.name = table_name
                schema.tables.append(table)
                table.owner = schema
        
                cls.reverseEngineerTableColumns(connection, table)
                cls.reverseEngineerTablePK(connection, table)
                cls.reverseEngineerTableIndices(connection, table)
        
                i += 1.0
            progress_flags.add('%s_tables_first_pass' % schema.name)
        else:  # Second pass
            i = 0.0
            total = len(schema.tables) + 1e-10
            for table in schema.tables:
                cls.reverseEngineerTableFKs(connection, table)
                grt.send_progress(i / total, 'Reverse engineering of foreign keys in table %s.%s completed' % (schema.name, table.name))
                i += 1.0

        return 0
def reverseEngineerProcedures(connection, schema):
    execute_query(connection, 'USE %s' % schema.owner.name)  # catalog

    query = """SELECT so.name AS procedure_name, sc.text as procedure_definition
FROM sysobjects so INNER JOIN syscomments sc ON so.id=sc.id
WHERE so.uid = USER_ID(?) AND so.type = 'P'
ORDER BY so.name, sc.colid"""

    step = 1.0 / (len(
        getProcedureNames(connection, schema.owner.name, schema.name)) + 1e-10)
    cursor = execute_query(connection, query, schema.name)
    if cursor:
        for idx, procedure_name, procedure_definition in join_multiline_content(
                'procedure_name', 'procedure_definition', cursor):
            grt.send_progress(
                idx * step, 'Reverse engineering procedure %s.%s' %
                (schema.name, procedure_name))
            procedure = grt.classes.db_sybase_Routine()
            procedure.owner = schema
            procedure.name = procedure_name or ''
            procedure.routineType = 'PROCEDURE'
            procedure.sqlDefinition = procedure_definition
            schema.routines.append(procedure)
    grt.send_progress(
        1.0, 'Finished reverse engineering of procedures for the %s schema.' %
        schema.name)
    return 0
示例#6
0
    def _validate_existing_schemata(self):

        grt.send_progress(
            0.0, "Validating for existing schemas on target MySQL Server...")
        schema_set = set(
            schema.name.upper()
            for schema in self.main.plan.migrationTarget.catalog.schemata)
        target_schema_set = set(
            schema.upper() for schema in grt.modules.DbMySQLFE.getSchemaNames(
                self.main.plan.migrationTarget.connection))

        existing_schemas = list(schema_set.intersection(target_schema_set))

        continue_migration = True
        if len(existing_schemas) > 0:
            if self.main.plan.state.objectCreationParams.get(
                    "KeepSchemata", False):
                message = ''
                for schema in self.main.plan.migrationTarget.catalog.schemata:
                    if not schema.name.upper() in existing_schemas:
                        continue
                    target_schema_tables = set(
                        table_name.upper()
                        for table_name in grt.modules.DbMySQLFE.getTableNames(
                            self.main.plan.migrationTarget.connection,
                            schema.name))
                    existing_tables = [
                        table.name for table in schema.tables
                        if table.name.upper() in target_schema_tables
                    ]
                    if existing_tables:
                        message += 'In schema ' + schema.name + ':\n    '
                        message += ', '.join(existing_tables) + '\n'

                if message:
                    if mforms.Utilities.show_message(
                            "Existing Tables",
                            "The following tables already exist in their "
                            "target schemas:\n%sThey won't be recreated. Delete those "
                            "tables before continuing if you want them to be recreated. "
                            "Do you want to continue?" % message, "Yes", "No",
                            "") == mforms.ResultCancel:
                        continue_migration = False

            elif mforms.Utilities.show_message(
                    "Existing Schemas", "The %s %s " %
                ('schema' if len(existing_schemas) == 1 else 'schemas',
                 ", ".join(existing_schemas)) +
                    "will be dropped in the target MySQL Server and all the existing data will be"
                    + " lost. Do you want to continue?", "Yes", "No",
                    "") == mforms.ResultCancel:
                continue_migration = False

        self._db_task3.set_enabled(continue_migration)
def reverseEngineerTriggers(connection, schema):
    execute_query(connection, 'USE %s' % schema.owner.name)  # catalog

    tables_with_triggers_query = """SELECT name, deltrig, instrig, updtrig
FROM sysobjects
WHERE uid = USER_ID(?) AND type='U'
AND(deltrig != 0 OR instrig != 0 OR updtrig != 0)"""

    trigger_details_query = """SELECT so.name AS trigger_name, sc.id AS trigger_id, sc.text AS trigger_definition
FROM syscomments sc JOIN sysobjects so ON sc.id=so.id
WHERE sc.id IN (%s)
ORDER BY so.name, sc.colid"""

    triggers = {}
    for row in execute_query(connection, tables_with_triggers_query, schema.name):
        if row[1] != 0:
            triggers.setdefault(row[1], [row[0], ''])[1] += ';DELETE'
        if row[2] != 0:
            triggers.setdefault(row[2], [row[0], ''])[1] += ';INSERT'
        if row[3] != 0:
            triggers.setdefault(row[3], [row[0], ''])[1] += ';UPDATE'

    step = 1.0 / (len(getTriggerNames(connection, schema.owner.name, schema.name)) + 1e-10)
    all_triggers = execute_query(connection, trigger_details_query % ', '.join(str(trig_id) for trig_id in triggers)) if triggers else None
    trigger_name2id = {}
    def register_trigger_name(row):
        trigger_name2id[row[0]] = row[1]
    if all_triggers:
        for idx, trigger_name, trigger_definition in join_multiline_content('trigger_name', 'trigger_definition',
                                                                            all_triggers, register_trigger_name):
            grt.send_progress(idx * step, 'Reverse engineering trigger %s.%s' % (schema.name, trigger_name))
            trigger = grt.classes.db_sybase_Trigger()
            trigger.name = trigger_name or ''
            trigger.sqlDefinition = trigger_definition
            trigger.timing = 'AFTER'  # All Sybase ASE triggers are fired after the data is changed
#            trigger.orientation = 'ROW'  # TODO: This needs extra analysis
            trigger.enabled = 1  # TODO: Figure out how to tell the actual value
            trigger_table, trigger_events = triggers[trigger_name2id[trigger_name]]
            trigger.event = trigger_events.strip(';')  # It would take values as 'INSERT;UPDATE'
            trigger.owner = find_object_with_name(schema.tables, trigger_table)

            if trigger.owner:
                trigger.owner.triggers.append(trigger)
            else:
                grt.send_warning('Sybase reverseEngineerTriggers', 'Parent table not found for trigger "%s"' % trigger.name)

    grt.send_progress(1.0, 'Finished reverse engineering of triggers for the %s schema.' % schema.name)
    return 0
示例#8
0
def reverseEngineerTriggers(connection, schema):
    execute_query(connection, 'USE %s' % schema.owner.name)  # catalog

    tables_with_triggers_query = """SELECT name, deltrig, instrig, updtrig
FROM sysobjects
WHERE uid = USER_ID(?) AND type='U'
AND(deltrig != 0 OR instrig != 0 OR updtrig != 0)"""

    trigger_details_query = """SELECT so.name AS trigger_name, sc.id AS trigger_id, sc.text AS trigger_definition
FROM syscomments sc JOIN sysobjects so ON sc.id=so.id
WHERE sc.id IN (%s)
ORDER BY so.name, sc.colid"""

    triggers = {}
    for row in execute_query(connection, tables_with_triggers_query, schema.name):
        if row[1] != 0:
            triggers.setdefault(row[1], [row[0], ''])[1] += ';DELETE'
        if row[2] != 0:
            triggers.setdefault(row[2], [row[0], ''])[1] += ';INSERT'
        if row[3] != 0:
            triggers.setdefault(row[3], [row[0], ''])[1] += ';UPDATE'

    step = 1.0 / (len(getTriggerNames(connection, schema.owner.name, schema.name)) + 1e-10)
    all_triggers = execute_query(connection, trigger_details_query % ', '.join(str(trig_id) for trig_id in triggers)) if triggers else None
    trigger_name2id = {}
    def register_trigger_name(row):
        trigger_name2id[row[0]] = row[1]
    if all_triggers:
        for idx, trigger_name, trigger_definition in join_multiline_content('trigger_name', 'trigger_definition',
                                                                            all_triggers, register_trigger_name):
            grt.send_progress(idx * step, 'Reverse engineering trigger %s.%s' % (schema.name, trigger_name))
            trigger = grt.classes.db_sybase_Trigger()
            trigger.name = trigger_name or ''
            trigger.sqlDefinition = trigger_definition
            trigger.timing = 'AFTER'  # All Sybase ASE triggers are fired after the data is changed
#            trigger.orientation = 'ROW'  # TODO: This needs extra analysis
            trigger.enabled = 1  # TODO: Figure out how to tell the actual value
            trigger_table, trigger_events = triggers[trigger_name2id[trigger_name]]
            trigger.event = trigger_events.strip(';')  # It would take values as 'INSERT;UPDATE'
            trigger.owner = find_object_with_name(schema.tables, trigger_table)

            if trigger.owner:
                trigger.owner.triggers.append(trigger)
            else:
                grt.send_warning('Sybase reverseEngineerTriggers', 'Parent table not found for trigger "%s"' % trigger.name)

    grt.send_progress(1.0, 'Finished reverse engineering of triggers for the %s schema.' % schema.name)
    return 0
示例#9
0
    def migrateSchema(self, state, sourceSchema, targetCatalog):
        targetSchema = grt.classes.db_mysql_Schema()
        targetSchema.owner = targetCatalog
        log = state.addMigrationLogEntry(0, sourceSchema, targetSchema, "")
        targetSchema.defaultCharacterSetName, targetSchema.defaultCollationName = self.migrateCharsetCollation(state, sourceSchema.defaultCharacterSetName, sourceSchema.defaultCollationName, sourceSchema, targetSchema)
        targetSchema.name = self.migrateIdentifier(sourceSchema.name, log)
        targetSchema.oldName = sourceSchema.name
        targetSchema.comment = sourceSchema.comment
        
        grt.send_progress(0.2, 'Migrating schema contents for schema %s' % sourceSchema.name)
        if True:
            grt.begin_progress_step(0.2, 1.0)
            self.migrateSchemaContents(state, targetSchema, sourceSchema)
            grt.end_progress_step()

        return targetSchema
    def _validate_existing_schemata(self):
        
        grt.send_progress(0.0, "Validating for existing schemas on target MySQL Server...")
        schema_set = set(schema.name.upper() for schema in self.main.plan.migrationTarget.catalog.schemata)
        target_schema_set = set(schema.upper() for schema in grt.modules.DbMySQLFE.getSchemaNames(self.main.plan.migrationTarget.connection))

        existing_schemas = list(schema_set.intersection(target_schema_set))

        continue_migration = True
        if len(existing_schemas) > 0:
            if mforms.Utilities.show_message("Existing Schemas", "The %s %s " % ( 'schema' if len(existing_schemas) == 1 else 'schemata', ", ".join(existing_schemas)) +
                    "will be dropped in the target MySQL Server and all the existing data will be" +
                    " lost. Do you want to continue?" , "Yes", "No", "") == mforms.ResultCancel:
                continue_migration = False

        self._db_task3.set_enabled(continue_migration)
示例#11
0
def reverseEngineerTables(connection, schema):
    # Since there are some reverse engineering stages that requires all table names and table columns
    # in the database to be set, these should be done after a first pass that rev engs their requirements
    progress_flags = _connections[connection.__id__].setdefault(
        '_rev_eng_progress_flags', set())
    is_first_pass = not ('%s_tables_first_pass' %
                         schema.name) in progress_flags

    if is_first_pass:
        catalog = schema.owner
        execute_query(connection, 'USE %s' % quoteIdentifier(catalog.name))

        schema.tables.remove_all()
        # TODO: Add real create table comments instead of empty string in the following line:
        table_names = [(table_name, '') for table_name in getTableNames(
            connection, catalog.name, schema.name)]
        total = len(table_names) + 1e-10
        i = 0.0
        for table_name, table_comment in table_names:
            grt.send_progress(
                i / total,
                'Retrieving table %s.%s...' % (schema.name, table_name))
            table = grt.classes.db_sybase_Table()
            table.name = table_name
            schema.tables.append(table)
            table.owner = schema
            table.comment = table_comment or ''  # table_comment can be None

            reverseEngineerTableColumns(connection, table)
            reverseEngineerTablePK(connection, table)
            reverseEngineerTableIndices(connection, table)

            i += 1.0
        progress_flags.add('%s_tables_first_pass' % schema.name)
    else:  # Second pass
        i = 1.0
        total = len(schema.tables) + 1e-10
        for table in schema.tables:
            reverseEngineerTableFKs(connection, table)
            grt.send_progress(
                i / total,
                'Reverse engineering of foreign keys in table %s.%s completed'
                % (schema.name, table.name))
            i += 1.0

    return 0
 def reverseEngineerViews(cls, connection, schema):
     query = """SELECT TABLE_NAME, VIEW_DEFINITION
     FROM INFORMATION_SCHEMA.VIEWS
     WHERE TABLE_CATALOG ='%s' AND TABLE_SCHEMA = '%s'""" % (schema.owner.name, schema.name)
     schema.views.remove_all()
     view_count = len(cls.getViewNames(connection, schema.owner.name, schema.name))
     step = 1.0 / (view_count + 1e-10)
     idx = 0
     for view_name, view_definition in cls.execute_query(connection, query):
         grt.send_progress(idx * step, 'Reverse engineering view %s.%s' % (schema.name, view_name))
         view = grt.classes.db_View()
         view.name = view_name or ''
         view.owner = schema
         view.sqlDefinition = view_definition
         schema.views.append(view)
         idx += 1
     return 0
示例#13
0
    def migrateCatalog(self, state, source_catalog):
        target_catalog = grt.classes.db_mysql_Catalog()
        log = state.addMigrationLogEntry(0, source_catalog, target_catalog, "")
        target_catalog.name = self.migrateIdentifier(source_catalog.name, log)
        target_catalog.oldName = source_catalog.name
        targetRdbms = state.targetConnection.driver.owner
        target_catalog.simpleDatatypes.extend(targetRdbms.simpleDatatypes)
        state.targetCatalog = target_catalog

        # set the version of the target database
        if state.targetVersion:
            aTargetVersion = state.targetVersion
            targetVersion = grt.classes.GrtVersion()
            targetVersion.owner = target_catalog
            targetVersion.majorNumber, targetVersion.minorNumber, targetVersion.releaseNumber, targetVersion.buildNumber = aTargetVersion.majorNumber, aTargetVersion.minorNumber, aTargetVersion.releaseNumber, aTargetVersion.buildNumber
            targetVersion.name = aTargetVersion.name
            target_catalog.version = targetVersion
        else:
            targetVersion = grt.classes.GrtVersion()
            targetVersion.owner = target_catalog
            targetVersion.majorNumber, targetVersion.minorNumber, targetVersion.releaseNumber, targetVersion.buildNumber = (5, 5, 0, 0)
            targetVersion.name = "5.5.0"
            target_catalog.version = targetVersion

        if True:
            grt.send_progress(0.0, "Migrating...")
            
            i = 0.0
            # migrate all source schemata to target schemata
            for sourceSchema in source_catalog.schemata:
                grt.begin_progress_step(0.9*i / (len(source_catalog.schemata)+1e-10), 0.9 *(i+1) / (len(source_catalog.schemata)+1e-10))
                grt.send_progress(0.9 * i / (len(source_catalog.schemata)+1e-10), "Migrating schema %s..." % sourceSchema.name)
                # migrate schema
                targetSchema = self.migrateSchema(state, sourceSchema, target_catalog)
                if targetSchema:
                    # add generated schema to target_catalog
                    target_catalog.schemata.append(targetSchema)
                grt.end_progress_step()
                i += 1
            
            grt.send_progress(0.9, "Finalizing foreign key migration...")

            # migrate foreign keys last, as they need the referenced objects which can be from different schemas to be ready
            for sourceSchema in source_catalog.schemata:
                global key_names
                key_names[sourceSchema.name] = set()
                targetSchema = self.findMatchingTargetObject(state, sourceSchema)
                for sourceTable in sourceSchema.tables:
                    if not self.shouldMigrate(state, 'tables', sourceTable):
                        continue
                    targetTable = self.findMatchingTargetObject(state, sourceTable)
                    self.migrateTableToMySQL2ndPass(state, sourceTable, targetTable)

            grt.send_progress(1.0, "Migration finished")
        
        return target_catalog
示例#14
0
    def reverseEngineer(cls, connection, catalog_name, schemata_list, context):
        from grt.modules import MysqlSqlFacade
        grt.send_progress(0, "Reverse engineering catalog information")
        cls.check_interruption()
        catalog = cls.reverseEngineerCatalog(connection, catalog_name)

        # calculate total workload 1st
        grt.send_progress(0.1, 'Preparing...')

        get_tables = context.get("reverseEngineerTables", True)

        # 10% of the progress is for preparation
        total = 1e-10  # total should not be zero to avoid DivisionByZero exceptions
        accumulated_progress = 0.1
        total += len(cls.getTableNames(connection, catalog_name,
                                       '')) if get_tables else 0

        grt.send_progress(0.1, "Gathered stats")

        # Now the second pass for reverse engineering tables:
        if get_tables:
            idx = 0
            for object_type, name, tbl_name, _, sql in cls.execute_query(
                    connection, "SELECT * FROM sqlite_master"):
                if type in ('view', 'trigger'
                            ) or not sql or tbl_name.startswith('sqlite_'):
                    continue

                sql = sql.replace('[', '').replace(']', '')

                grt.log_debug('SQLiteReverseEngineering',
                              'Procesing this sql:\n%s;' % sql)

                MysqlSqlFacade.parseSqlScriptString(catalog, sql)

                cls.check_interruption()
                grt.send_progress(0.1 + idx / total,
                                  'Object %s reverse engineered!' % name)
                idx += 1

        grt.send_progress(1.0, 'Reverse engineering completed!')
        return catalog
示例#15
0
def reverseEngineerViews(connection, schema):
    execute_query(connection, 'USE %s' % schema.owner.name)  # catalog

    query = """SELECT so.name AS view_name, sc.text AS view_definition
FROM sysobjects so JOIN syscomments sc on so.id=sc.id
WHERE so.uid=USER_ID(?) AND so.type='V'
ORDER BY so.name, sc.colid""" 

    schema.views.remove_all()
    step = 1.0 / (len(getViewNames(connection, schema.owner.name, schema.name)) + 1e-10)
    cursor = execute_query(connection, query, schema.name)
    if cursor:
        for idx, view_name, view_definition in join_multiline_content('view_name', 'view_definition', cursor):
            grt.send_progress(idx * step, 'Reverse engineering view %s.%s' % (schema.name, view_name))
            view = grt.classes.db_sybase_View()
            view.owner = schema
            view.name = view_name or ''
            view.sqlDefinition = view_definition
            schema.views.append(view)
    return 0
def reverseEngineerViews(connection, schema):
    execute_query(connection, 'USE %s' % schema.owner.name)  # catalog

    query = """SELECT so.name AS view_name, sc.text AS view_definition
FROM sysobjects so JOIN syscomments sc on so.id=sc.id
WHERE so.uid=USER_ID(?) AND so.type='V'
ORDER BY so.name, sc.colid""" 

    schema.views.remove_all()
    step = 1.0 / (len(getViewNames(connection, schema.owner.name, schema.name)) + 1e-10)
    cursor = execute_query(connection, query, schema.name)
    if cursor:
        for idx, view_name, view_definition in join_multiline_content('view_name', 'view_definition', cursor):
            grt.send_progress(idx * step, 'Reverse engineering view %s.%s' % (schema.name, view_name))
            view = grt.classes.db_sybase_View()
            view.owner = schema
            view.name = view_name or ''
            view.sqlDefinition = view_definition
            schema.views.append(view)
    return 0
 def task_test_target(self):
     grt.send_progress(-1, "Connecting to target...")
     attempt = 0
     if self.main.plan.migrationTarget.connection.hostIdentifier == self.main.plan.migrationSource.connection.hostIdentifier:
         if self.main.plan.migrationTarget.connection.parameterValues['userName'] == self.main.plan.migrationSource.connection.parameterValues['userName']:
             self.main.plan.migrationTarget.password = self.main.plan.migrationSource.password
     force_password = False
     while True:
         try:
             if not self.main.plan.migrationTarget.checkConnection():
                 raise Exception("Could not connect to target RDBMS")
             self.main.plan.migrationTarget.checkVersion()
             break
         except (DBLoginError, SystemError), e:
             if attempt > 0:
                 if isinstance(e, DBLoginError) and not force_password:
                     force_password = True
                 else:
                     raise e
             attempt += 1
             self.main.plan.migrationTarget.password = request_password(self.main.plan.migrationTarget.connection)
def reverseEngineerProcedures(connection, schema):
    execute_query(connection, 'USE %s' % schema.owner.name)  # catalog

    query = """SELECT so.name AS procedure_name, sc.text as procedure_definition
FROM sysobjects so INNER JOIN syscomments sc ON so.id=sc.id
WHERE so.uid = USER_ID(?) AND so.type = 'P'
ORDER BY so.name, sc.colid""" 

    step = 1.0 / (len(getProcedureNames(connection, schema.owner.name, schema.name)) + 1e-10)
    cursor = execute_query(connection, query, schema.name)
    if cursor:
        for idx, procedure_name, procedure_definition in join_multiline_content('procedure_name', 'procedure_definition', cursor):
            grt.send_progress(idx * step, 'Reverse engineering procedure %s.%s' % (schema.name, procedure_name))
            procedure = grt.classes.db_sybase_Routine()
            procedure.owner = schema
            procedure.name = procedure_name or ''
            procedure.routineType = 'PROCEDURE'
            procedure.sqlDefinition = procedure_definition
            schema.routines.append(procedure)
    grt.send_progress(1.0, 'Finished reverse engineering of procedures for the %s schema.' % schema.name)
    return 0
示例#19
0
    def migrateSchemaContents(self, state, targetSchema, sourceSchema):
        total = ( len([table for table in sourceSchema.tables if self.shouldMigrate(state, "tables", table)]) + 
                  len([view for view in sourceSchema.views if self.shouldMigrate(state, "views", view)]) + 
                  len([routine for routine in sourceSchema.routines if self.shouldMigrate(state, "routines", routine)]) +
                  1e-10
                )

        i = 0.0
        for source_table in sourceSchema.tables:
            if self.shouldMigrate(state, "tables", source_table):
                target_table = self.migrateTableToMySQL(state, source_table, targetSchema)
                if target_table:
                    targetSchema.tables.append(target_table)
                grt.send_progress(i/total, 'Table %s.%s migrated' % (sourceSchema.name, source_table.name))
                i += 1

        for source_view in sourceSchema.views:
            if self.shouldMigrate(state, "views", source_view):
                target_view = self.migrateViewToMySQL(state, source_view, targetSchema)
                if target_view:
                    targetSchema.views.append(target_view)
                grt.send_progress(i/total, 'View %s.%s migrated' % (sourceSchema.name, source_view.name))
                i += 1

        for source_routine in sourceSchema.routines:
            if self.shouldMigrate(state, "routines", source_routine):
                target_routine = self.migrateRoutineToMySQL(state, source_routine, targetSchema)
                if target_routine:
                    targetSchema.routines.append(target_routine)
                grt.send_progress(i/total, 'Routine %s.%s migrated' % (sourceSchema.name, source_routine.name))
                i += 1
def reverseEngineerTables(connection, schema):
    # Since there are some reverse engineering stages that requires all table names and table columns
    # in the database to be set, these should be done after a first pass that rev engs their requirements
    progress_flags = _connections[connection.__id__].setdefault('_rev_eng_progress_flags', set())
    is_first_pass = not ('%s_tables_first_pass' % schema.name) in progress_flags

    if is_first_pass:
        catalog = schema.owner
        execute_query(connection, 'USE %s' % quoteIdentifier(catalog.name))
    
        schema.tables.remove_all()
        # TODO: Add real create table comments instead of empty string in the following line:
        table_names = [(table_name, '') for table_name in getTableNames(connection, catalog.name, schema.name) ]
        total = len(table_names) + 1e-10
        i = 0.0
        for table_name, table_comment in table_names:
            grt.send_progress(i / total, 'Retrieving table %s.%s...' % (schema.name, table_name))
            table = grt.classes.db_sybase_Table()
            table.name = table_name
            schema.tables.append(table)
            table.owner = schema
            table.comment = table_comment or ''  # table_comment can be None

            reverseEngineerTableColumns(connection, table)
            reverseEngineerTablePK(connection, table)
            reverseEngineerTableIndices(connection, table)
    
            i += 1.0
        progress_flags.add('%s_tables_first_pass' % schema.name)
    else:  # Second pass
        i = 1.0
        total = len(schema.tables) + 1e-10
        for table in schema.tables:
            reverseEngineerTableFKs(connection, table)
            grt.send_progress(i / total, 'Reverse engineering of foreign keys in table %s.%s completed' % (schema.name, table.name))
            i += 1.0

    return 0
    def reverseEngineerTables(cls, connection, schema):
        # Since there are some reverse engineering stages that requires all table names and table columns
        # in the database to be set, these should be done after a first pass that rev engs their requirements
        progress_flags = cls._connections[connection.__id__].setdefault('_rev_eng_progress_flags', [])
        is_first_pass = not ('%s_tables_first_pass' % schema.name) in progress_flags

        if is_first_pass:
            catalog = schema.owner
            schema.tables.remove_all()
            table_names = cls.getTableNames(connection, catalog.name, schema.name)
            getCommentForTable = cls.getCommentForTable if hasattr(cls, 'getCommentForTable') else lambda conn, tbl:''
            total = len(table_names) + 1e-10
            i = 0.0
            for table_name in table_names:
                grt.send_progress(i / total, 'Retrieving table %s.%s...' % (schema.name, table_name))
                table = grt.classes.db_Table()
                table.name = table_name
                schema.tables.append(table)
                table.owner = schema
                table.comment = getCommentForTable(connection, table)
        
                cls.reverseEngineerTableColumns(connection, table)
                cls.reverseEngineerTablePK(connection, table)
                cls.reverseEngineerTableIndices(connection, table)
        
                i += 1.0
            progress_flags.add('%s_tables_first_pass' % schema.name)
        else:  # Second pass
            i = 0.0
            total = len(schema.tables) + 1e-10
            cls._connections[connection.__id__]['fk_names'] = {}
            for table in schema.tables:
                cls.reverseEngineerTableFKs(connection, table)
                grt.send_progress(i / total, 'Reverse engineering of foreign keys in table %s.%s completed' % (schema.name, table.name))
                i += 1.0

        return 0
    def reverseEngineer(cls, connection, catalog_name, schemata_list, context):
        from grt.modules import MysqlSqlFacade
        grt.send_progress(0, "Reverse engineering catalog information")
        cls.check_interruption()
        catalog = cls.reverseEngineerCatalog(connection, catalog_name)

        # calculate total workload 1st
        grt.send_progress(0.1, 'Preparing...')

        get_tables = context.get("reverseEngineerTables", True)

        # 10% of the progress is for preparation
        total = 1e-10  # total should not be zero to avoid DivisionByZero exceptions
        accumulated_progress = 0.1
        total += len(cls.getTableNames(connection, catalog_name, '')) if get_tables else 0

        grt.send_progress(0.1, "Gathered stats")

        # Now the second pass for reverse engineering tables:
        if get_tables:
            idx = 0
            for object_type, name, tbl_name, _, sql in cls.execute_query(connection, "SELECT * FROM sqlite_master"):
                if type in ('view', 'trigger') or not sql or tbl_name.startswith('sqlite_'):
                    continue

                sql = sql.replace('[', '').replace(']', '')

                grt.log_debug('SQLiteReverseEngineering', 'Procesing this sql:\n%s;' % sql)

                MysqlSqlFacade.parseSqlScriptString(catalog, sql)

                cls.check_interruption()
                grt.send_progress(0.1 + idx / total, 'Object %s reverse engineered!' % name)
                idx += 1

        grt.send_progress(1.0, 'Reverse engineering completed!')
        return catalog
    def _validate_existing_schemata(self):
        
        grt.send_progress(0.0, "Validating for existing schemas on target MySQL Server...")
        schema_set = set(schema.name.upper() for schema in self.main.plan.migrationTarget.catalog.schemata)
        target_schema_set = set(schema.upper() for schema in grt.modules.DbMySQLFE.getSchemaNames(self.main.plan.migrationTarget.connection))

        existing_schemas = list(schema_set.intersection(target_schema_set))

        continue_migration = True
        if len(existing_schemas) > 0:
            if self.main.plan.state.objectCreationParams.get("KeepSchemata", False):
                message = ''
                for schema in self.main.plan.migrationTarget.catalog.schemata:
                    if not schema.name.upper() in existing_schemas:
                        continue
                    target_schema_tables = set(table_name.upper() for table_name in
                                               grt.modules.DbMySQLFE.getTableNames(self.main.plan.migrationTarget.connection, schema.name) )
                    existing_tables = [table.name for table in schema.tables if table.name.upper() in target_schema_tables]
                    if existing_tables:
                        message += 'In schema ' + schema.name + ':\n    '
                        message += ', '.join(existing_tables) + '\n'

                if message:
                    if mforms.Utilities.show_message("Existing Tables", "The following tables already exist in their "
                                                     "target schemas:\n%sThey won't be recreated. Delete those "
                                                     "tables before continuing if you want them to be recreated. "
                                                     "Do you want to continue?" % message , "Yes", "No", "") == mforms.ResultCancel:
                        continue_migration = False

                    
            elif mforms.Utilities.show_message("Existing Schemas", "The %s %s " % ( 'schema' if len(existing_schemas) == 1 else 'schemata', ", ".join(existing_schemas)) +
                    "will be dropped in the target MySQL Server and all the existing data will be" +
                    " lost. Do you want to continue?" , "Yes", "No", "") == mforms.ResultCancel:
                continue_migration = False

        self._db_task3.set_enabled(continue_migration)
示例#24
0
    def doFetchSchemaNames(self, only_these_catalogs=[]):
        """Fetch list of schema names in catalog.schema format and stores them in the migration.sourceSchemataNames node"""

        grt.send_progress(0.0, "Checking connection...")
        self.connect()
        if self.rdbms.doesSupportCatalogs > 0:
            grt.send_progress(0.1, "Fetching catalog names...")
            self.state.sourceSchemataNames.remove_all()
            catalog_names = self.getCatalogNames()
            if only_these_catalogs:
                inexistent_catalogs = set(only_these_catalogs).difference(
                    catalog_names)
                if inexistent_catalogs:
                    grt.send_warning(
                        'The following catalogs were not found: ' +
                        ', '.join(list(inexistent_catalogs)))
                catalog_names = list(
                    set(only_these_catalogs).difference(
                        inexistent_catalogs)) or self.getCatalogNames()
            self._catalog_names = catalog_names
            grt.send_progress(0.1, "Fetching schema names...")
            accumulated_progress = 0.1
            step_progress_share = 1.0 / (len(catalog_names) + 1e-10)
            for catalog in catalog_names:
                if not catalog:
                    continue
                grt.send_progress(accumulated_progress,
                                  'Fetching schema names from %s...' % catalog)
                schema_names = self.getSchemaNames(catalog)
                for schema in schema_names:
                    if not schema:
                        continue
                    self.state.sourceSchemataNames.append(
                        "%s.%s" % (self._db_module.quoteIdentifier(catalog),
                                   self._db_module.quoteIdentifier(schema)))
                accumulated_progress += 0.9 * step_progress_share
        elif self.rdbms.doesSupportCatalogs == 0:  # The rdbms doesn't support catalogs
            grt.send_progress(0.1, "Fetching schema names...")
            schema_names = self.getSchemaNames('')
            if only_these_catalogs:  # Here only_these_catalogs would rather mean only these schemata
                inexistent_schemata = set(only_these_catalogs).difference(
                    schema_names)
                if inexistent_schemata:
                    grt.send_warning(
                        'The following schemas where not found: ' +
                        ', '.join(list(inexistent_schemata)))
                schema_names = list(
                    set(only_these_catalogs).difference(
                        inexistent_schemata)) or self.getSchemaNames('')
            self._catalog_names = []
            self.state.sourceSchemataNames.remove_all()
            for schema in schema_names:
                self.state.sourceSchemataNames.append(
                    '%s.%s' % (self._db_module.quoteIdentifier('def'),
                               self._db_module.quoteIdentifier(schema)))
        else:  # no schema either
            self._catalog_names = []
            self.state.sourceSchemataNames.remove_all()
            for schema in self.getSchemaNames(''):
                self.state.sourceSchemataNames.append(
                    self._db_module.quoteIdentifier(schema))
        grt.send_progress(1.0, "Finished")
示例#25
0
def createCatalogObjects(connection, catalog, objectCreationParams,
                         creationLog):
    """Create catalog objects in the server for the specified connection. The catalog must have been 
    previously processed with generateSQLCreateStatements(), so that the objects have their temp_sql 
    attributes set with their respective SQL CREATE statements.
    """
    def makeLogObject(obj):
        if creationLog is not None:
            log = grt.classes.GrtLogObject()
            log.logObject = obj
            creationLog.append(log)
            return log
        else:
            return None

    try:
        grt.send_progress(
            0.0, "Creating schema in target MySQL server at %s..." %
            connection.hostIdentifier)

        preamble = catalog.customData["migration:preamble"]
        grt.send_progress(0.0, "Executing preamble script...")
        execute_script(connection, preamble.temp_sql, makeLogObject(preamble))

        i = 0.0
        for schema in catalog.schemata:
            grt.begin_progress_step(i, i + 1.0 / len(catalog.schemata))
            i += 1.0 / len(catalog.schemata)

            if schema.commentedOut:
                grt.send_progress(1.0, "Skipping schema %s... " % schema.name)
                grt.end_progress_step()
                continue

            total = len(schema.tables) + len(schema.views) + len(
                schema.routines) + sum(
                    [len(table.triggers) for table in schema.tables])

            grt.send_progress(0.0, "Creating schema %s..." % schema.name)
            execute_script(connection, schema.temp_sql, makeLogObject(schema))

            tcount = 0
            vcount = 0
            rcount = 0
            trcount = 0
            o = 0
            for table in schema.tables:
                if table.commentedOut:
                    grt.send_progress(
                        float(o) / total,
                        "Skipping table %s.%s" % (schema.name, table.name))
                else:
                    grt.send_progress(
                        float(o) / total,
                        "Creating table %s.%s" % (schema.name, table.name))
                o += 1
                if not table.commentedOut and execute_script(
                        connection, table.temp_sql, makeLogObject(table)):
                    tcount += 1

            for view in schema.views:
                if view.commentedOut:
                    grt.send_progress(
                        float(o) / total,
                        "Skipping view %s.%s" % (schema.name, view.name))
                else:
                    grt.send_progress(
                        float(o) / total,
                        "Creating view %s.%s" % (schema.name, view.name))
                o += 1
                if not view.commentedOut and execute_script(
                        connection, view.temp_sql, makeLogObject(view)):
                    vcount += 1

            for routine in schema.routines:
                if routine.commentedOut:
                    grt.send_progress(
                        float(o) / total,
                        "Skipping routine %s.%s" % (schema.name, routine.name))
                else:
                    grt.send_progress(
                        float(o) / total,
                        "Creating routine %s.%s" % (schema.name, routine.name))
                o += 1
                if not routine.commentedOut and execute_script(
                        connection, routine.temp_sql, makeLogObject(routine)):
                    rcount += 1

            for table in schema.tables:
                for trigger in table.triggers:
                    if trigger.commentedOut:
                        grt.send_progress(
                            float(o) / total, "Skipping trigger %s.%s.%s" %
                            (schema.name, table.name, trigger.name))
                    else:
                        grt.send_progress(
                            float(o) / total, "Creating trigger %s.%s.%s" %
                            (schema.name, table.name, trigger.name))
                    o += 1
                    if not trigger.commentedOut and execute_script(
                            connection, trigger.temp_sql,
                            makeLogObject(trigger)):
                        trcount += 1

            grt.send_info(
                "Scripts for %i tables, %i views and %i routines were executed for schema %s"
                % (tcount, vcount, rcount, schema.name))
            grt.end_progress_step()

        postamble = catalog.customData["migration:postamble"]
        grt.send_progress(1.0, "Executing postamble script...")
        execute_script(connection, postamble.temp_sql,
                       makeLogObject(postamble))

        grt.send_progress(1.0, "Schema created")
    except grt.UserInterrupt:
        grt.send_info(
            "Cancellation request detected, interrupting schema creation.")
        raise

    return 1
def reverseEngineer(connection, catalog_name, schemata_list, options):
    """Reverse engineers a Sybase ASE database.

    This is the function that will be called by the Migration Wizard to reverse engineer
    a Sybase database. All the other reverseEngineer* functions are not actually required
    and should not be considered part of this module API even though they are currently
    being exposed. This function calls the other reverseEngineer* functions to complete
    the full reverse engineer process.
    """
    grt.send_progress(0, "Reverse engineering catalog information")
    catalog = grt.classes.db_sybase_Catalog()
    catalog.name = catalog_name
    catalog.simpleDatatypes.remove_all()
    catalog.simpleDatatypes.extend(connection.driver.owner.simpleDatatypes)
    catalog.defaultCollationName = '' #   FIXME: Find out the right collation for the catalog
    
    grt.send_progress(0.05, "Reverse engineering User Data Types...")
    check_interruption()  #
    reverseEngineerUserDatatypes(connection, catalog)

    # calculate total workload 1st
    grt.send_progress(0.1, 'Preparing...')
    table_count_per_schema = {}
    view_count_per_schema = {}
    routine_count_per_schema = {}
    trigger_count_per_schema = {}
    total_count_per_schema = {}

    get_tables = options.get("reverseEngineerTables", True)
    get_triggers = options.get("reverseEngineerTriggers", True)
    get_views = options.get("reverseEngineerViews", True)
    get_routines = options.get("reverseEngineerRoutines", True)

    # 10% of the progress is for preparation
    total = 1e-10  # total should not be zero to avoid DivisionByZero exceptions
    i = 1.0
    accumulated_progress = 0.1
    for schema_name in schemata_list:
        check_interruption()
        table_count_per_schema[schema_name] = len(getTableNames(connection, catalog_name, schema_name)) if get_tables else 0
        view_count_per_schema[schema_name] = len(getViewNames(connection, catalog_name, schema_name)) if get_views else 0
        check_interruption()
        routine_count_per_schema[schema_name] = len(getProcedureNames(connection, catalog_name, schema_name)) + len(getFunctionNames(connection, catalog_name, schema_name)) if get_routines else 0
        trigger_count_per_schema[schema_name] = len(getTriggerNames(connection, catalog_name, schema_name)) if get_triggers else 0

        total_count_per_schema[schema_name] = (table_count_per_schema[schema_name] + view_count_per_schema[schema_name] +
                                               routine_count_per_schema[schema_name] + trigger_count_per_schema[schema_name] + 1e-10)
        total += total_count_per_schema[schema_name]

        grt.send_progress(accumulated_progress + 0.1 * (i / (len(schemata_list) + 1e-10) ), "Gathered stats for %s" % schema_name)
        i += 1.0

    # Now take 60% in the first pass of reverse engineering:
    accumulated_progress = 0.2
    grt.reset_progress_steps()
    grt.begin_progress_step(accumulated_progress, accumulated_progress + 0.6)
    accumulated_schema_progress = 0.0
    for schema_name in schemata_list:
        schema_progress_share = total_count_per_schema.get(schema_name, 0.0) / total

        grt.begin_progress_step(accumulated_schema_progress, accumulated_schema_progress + schema_progress_share)
        
        this_schema_progress = 0.0

        schema = grt.classes.db_sybase_Schema()
        schema.owner = catalog
        schema.name = schema_name
        schema.defaultCollationName = catalog.defaultCollationName
        catalog.schemata.append(schema)

        # Reverse engineer tables:
        step_progress_share = table_count_per_schema[schema_name] / (total_count_per_schema[schema_name] + 1e-10)
        if get_tables:
            check_interruption()
            grt.send_info('Reverse engineering %i tables from %s' % (table_count_per_schema[schema_name], schema_name))
            grt.begin_progress_step(this_schema_progress, this_schema_progress + step_progress_share)
            # Remove previous first pass marks that may exist if the user goes back and attempt rev eng again:
            progress_flags = _connections[connection.__id__].setdefault('_rev_eng_progress_flags', set())
            progress_flags.discard('%s_tables_first_pass' % schema_name)
            reverseEngineerTables(connection, schema)
            grt.end_progress_step()

        this_schema_progress += step_progress_share
        grt.send_progress(this_schema_progress, 'First pass of table reverse engineering for schema %s completed!' % schema_name)

        # Reverse engineer views:
        step_progress_share = view_count_per_schema[schema_name] / (total_count_per_schema[schema_name] + 1e-10)
        if get_views:
            check_interruption()
            grt.send_info('Reverse engineering %i views from %s' % (view_count_per_schema[schema_name], schema_name))
            grt.begin_progress_step(this_schema_progress, this_schema_progress + step_progress_share)
            reverseEngineerViews(connection, schema)
            grt.end_progress_step()

        this_schema_progress += step_progress_share
        grt.send_progress(this_schema_progress, 'Reverse engineering of views for schema %s completed!' % schema_name)

        # Reverse engineer routines:
        step_progress_share = routine_count_per_schema[schema_name] / (total_count_per_schema[schema_name] + 1e-10)
        if get_routines:
            check_interruption()
            grt.send_info('Reverse engineering %i routines from %s' % (routine_count_per_schema[schema_name], schema_name))
            grt.begin_progress_step(this_schema_progress, this_schema_progress + step_progress_share/2)
            schema.routines.remove_all()
            reverseEngineerProcedures(connection, schema)
            grt.end_progress_step()
            check_interruption()
            grt.begin_progress_step(this_schema_progress + step_progress_share/2, this_schema_progress + step_progress_share)
            reverseEngineerFunctions(connection, schema)
            grt.end_progress_step()

        this_schema_progress += step_progress_share
        grt.send_progress(this_schema_progress, 'Reverse engineering of routines for schema %s completed!' % schema_name)

        # Reverse engineer triggers:
        step_progress_share = trigger_count_per_schema[schema_name] / (total_count_per_schema[schema_name] + 1e-10)
        if get_triggers:
            check_interruption()
            grt.send_info('Reverse engineering %i triggers from %s' % (trigger_count_per_schema[schema_name], schema_name))
            grt.begin_progress_step(this_schema_progress, this_schema_progress + step_progress_share)
            reverseEngineerTriggers(connection, schema)
            grt.end_progress_step()

        this_schema_progress += step_progress_share
        grt.send_progress(this_schema_progress, 'Reverse engineering of triggers for schema %s completed!' % schema_name)
    
        accumulated_schema_progress += schema_progress_share
        grt.end_progress_step()

    grt.end_progress_step()

    # Now the second pass for reverse engineering tables:
    accumulated_progress = 0.8
    if get_tables:
        total_tables = sum(table_count_per_schema[schema.name] for schema in catalog.schemata)
        for schema in catalog.schemata:
            check_interruption()
            step_progress_share = 0.2 * (table_count_per_schema[schema.name] / (total_tables + 1e-10))
            grt.send_info('Reverse engineering foreign keys for tables in schema %s' % schema.name)
            grt.begin_progress_step(accumulated_progress, accumulated_progress + step_progress_share)
            reverseEngineerTables(connection, schema)
            grt.end_progress_step()
    
            accumulated_progress += step_progress_share
            grt.send_progress(accumulated_progress, 'Second pass of table reverse engineering for schema %s completed!' % schema_name)
        

    grt.send_progress(1.0, 'Reverse engineering completed!')
    return catalog
def createCatalogObjects(connection, catalog, objectCreationParams, creationLog):
    """Create catalog objects in the server for the specified connection. The catalog must have been 
    previously processed with generateSQLCreateStatements(), so that the objects have their temp_sql 
    attributes set with their respective SQL CREATE statements.
    """

    def makeLogObject(obj):
        if creationLog is not None:
            log = grt.classes.GrtLogObject()
            log.logObject = obj
            creationLog.append(log)
            return log
        else:
            return None
    
    try:
        grt.send_progress(0.0, "Creating schema in target MySQL server at %s..." % connection.hostIdentifier)
        
        preamble = catalog.customData["migration:preamble"]
        grt.send_progress(0.0, "Executing preamble script...")
        execute_script(connection, preamble.temp_sql, makeLogObject(preamble))

        i = 0.0
        for schema in catalog.schemata:
            grt.begin_progress_step(i, i + 1.0 / len(catalog.schemata))
            i += 1.0 / len(catalog.schemata)

            if schema.commentedOut:
                grt.send_progress(1.0, "Skipping schema %s... " % schema.name)
                grt.end_progress_step()
                continue

            total = len(schema.tables) + len(schema.views) + len(schema.routines) + sum([len(table.triggers) for table in schema.tables])

            grt.send_progress(0.0, "Creating schema %s..." % schema.name)
            execute_script(connection, schema.temp_sql, makeLogObject(schema))

            tcount = 0
            vcount = 0
            rcount = 0
            trcount = 0
            o = 0
            for table in schema.tables:
                if table.commentedOut:
                    grt.send_progress(float(o) / total, "Skipping table %s.%s" % (schema.name, table.name))
                else:
                    grt.send_progress(float(o) / total, "Creating table %s.%s" % (schema.name, table.name))
                o += 1
                if not table.commentedOut and execute_script(connection, table.temp_sql, makeLogObject(table)):
                    tcount += 1

            for view in schema.views:
                if view.commentedOut:
                    grt.send_progress(float(o) / total, "Skipping view %s.%s" % (schema.name, view.name))
                else:
                    grt.send_progress(float(o) / total, "Creating view %s.%s" % (schema.name, view.name))
                o += 1
                if not view.commentedOut and execute_script(connection, view.temp_sql, makeLogObject(view)):
                    vcount += 1

            for routine in schema.routines:
                if routine.commentedOut:
                    grt.send_progress(float(o) / total, "Skipping routine %s.%s" % (schema.name, routine.name))
                else:
                    grt.send_progress(float(o) / total, "Creating routine %s.%s" % (schema.name, routine.name))
                o += 1
                if not routine.commentedOut and execute_script(connection, routine.temp_sql, makeLogObject(routine)):
                    rcount += 1

            for table in schema.tables:
                for trigger in table.triggers:
                    if trigger.commentedOut:
                        grt.send_progress(float(o) / total, "Skipping trigger %s.%s.%s" % (schema.name, table.name, trigger.name))
                    else:
                        grt.send_progress(float(o) / total, "Creating trigger %s.%s.%s" % (schema.name, table.name, trigger.name))
                    o += 1
                    if not trigger.commentedOut and execute_script(connection, trigger.temp_sql, makeLogObject(trigger)):
                        trcount += 1

            grt.send_info("Scripts for %i tables, %i views and %i routines were executed for schema %s" % (tcount, vcount, rcount, schema.name))
            grt.end_progress_step()

        postamble = catalog.customData["migration:postamble"]
        grt.send_progress(1.0, "Executing postamble script...")
        execute_script(connection, postamble.temp_sql, makeLogObject(postamble))

        grt.send_progress(1.0, "Schema created")
    except grt.UserInterrupt:
        grt.send_info("Cancellation request detected, interrupting schema creation.")
        raise
    
    return 1
示例#28
0
def reverseEngineerFunctions(connection, schema):
    # TODO: Find a way to reverse engineer functions in Sybase ASE
    grt.send_progress(
        1.0, 'Finished reverse engineering of functions for the %s schema.' %
        schema.name)
    return 0
示例#29
0
def reverseEngineer(connection, catalog_name, schemata_list, options):
    """Reverse engineers a Sybase ASE database.

    This is the function that will be called by the Migration Wizard to reverse engineer
    a Sybase database. All the other reverseEngineer* functions are not actually required
    and should not be considered part of this module API even though they are currently
    being exposed. This function calls the other reverseEngineer* functions to complete
    the full reverse engineer process.
    """
    grt.send_progress(0, "Reverse engineering catalog information")
    catalog = grt.classes.db_sybase_Catalog()
    catalog.name = catalog_name
    catalog.simpleDatatypes.remove_all()
    catalog.simpleDatatypes.extend(connection.driver.owner.simpleDatatypes)
    catalog.defaultCollationName = ''  #   FIXME: Find out the right collation for the catalog

    grt.send_progress(0.05, "Reverse engineering User Data Types...")
    check_interruption()  #
    reverseEngineerUserDatatypes(connection, catalog)

    # calculate total workload 1st
    grt.send_progress(0.1, 'Preparing...')
    table_count_per_schema = {}
    view_count_per_schema = {}
    routine_count_per_schema = {}
    trigger_count_per_schema = {}
    total_count_per_schema = {}

    get_tables = options.get("reverseEngineerTables", True)
    get_triggers = options.get("reverseEngineerTriggers", True)
    get_views = options.get("reverseEngineerViews", True)
    get_routines = options.get("reverseEngineerRoutines", True)

    # 10% of the progress is for preparation
    total = 1e-10  # total should not be zero to avoid DivisionByZero exceptions
    i = 1.0
    accumulated_progress = 0.1
    for schema_name in schemata_list:
        check_interruption()
        table_count_per_schema[schema_name] = len(
            getTableNames(connection, catalog_name,
                          schema_name)) if get_tables else 0
        view_count_per_schema[schema_name] = len(
            getViewNames(connection, catalog_name,
                         schema_name)) if get_views else 0
        check_interruption()
        routine_count_per_schema[schema_name] = len(
            getProcedureNames(connection, catalog_name, schema_name)) + len(
                getFunctionNames(connection, catalog_name,
                                 schema_name)) if get_routines else 0
        trigger_count_per_schema[schema_name] = len(
            getTriggerNames(connection, catalog_name,
                            schema_name)) if get_triggers else 0

        total_count_per_schema[schema_name] = (
            table_count_per_schema[schema_name] +
            view_count_per_schema[schema_name] +
            routine_count_per_schema[schema_name] +
            trigger_count_per_schema[schema_name] + 1e-10)
        total += total_count_per_schema[schema_name]

        grt.send_progress(
            accumulated_progress + 0.1 * (i / (len(schemata_list) + 1e-10)),
            "Gathered stats for %s" % schema_name)
        i += 1.0

    # Now take 60% in the first pass of reverse engineering:
    accumulated_progress = 0.2
    grt.reset_progress_steps()
    grt.begin_progress_step(accumulated_progress, accumulated_progress + 0.6)
    accumulated_schema_progress = 0.0
    for schema_name in schemata_list:
        schema_progress_share = total_count_per_schema.get(schema_name,
                                                           0.0) / total

        grt.begin_progress_step(
            accumulated_schema_progress,
            accumulated_schema_progress + schema_progress_share)

        this_schema_progress = 0.0

        schema = grt.classes.db_sybase_Schema()
        schema.owner = catalog
        schema.name = schema_name
        schema.defaultCollationName = catalog.defaultCollationName
        catalog.schemata.append(schema)

        # Reverse engineer tables:
        step_progress_share = table_count_per_schema[schema_name] / (
            total_count_per_schema[schema_name] + 1e-10)
        if get_tables:
            check_interruption()
            grt.send_info('Reverse engineering %i tables from %s' %
                          (table_count_per_schema[schema_name], schema_name))
            grt.begin_progress_step(this_schema_progress,
                                    this_schema_progress + step_progress_share)
            # Remove previous first pass marks that may exist if the user goes back and attempt rev eng again:
            progress_flags = _connections[connection.__id__].setdefault(
                '_rev_eng_progress_flags', set())
            progress_flags.discard('%s_tables_first_pass' % schema_name)
            reverseEngineerTables(connection, schema)
            grt.end_progress_step()

        this_schema_progress += step_progress_share
        grt.send_progress(
            this_schema_progress,
            'First pass of table reverse engineering for schema %s completed!'
            % schema_name)

        # Reverse engineer views:
        step_progress_share = view_count_per_schema[schema_name] / (
            total_count_per_schema[schema_name] + 1e-10)
        if get_views:
            check_interruption()
            grt.send_info('Reverse engineering %i views from %s' %
                          (view_count_per_schema[schema_name], schema_name))
            grt.begin_progress_step(this_schema_progress,
                                    this_schema_progress + step_progress_share)
            reverseEngineerViews(connection, schema)
            grt.end_progress_step()

        this_schema_progress += step_progress_share
        grt.send_progress(
            this_schema_progress,
            'Reverse engineering of views for schema %s completed!' %
            schema_name)

        # Reverse engineer routines:
        step_progress_share = routine_count_per_schema[schema_name] / (
            total_count_per_schema[schema_name] + 1e-10)
        if get_routines:
            check_interruption()
            grt.send_info('Reverse engineering %i routines from %s' %
                          (routine_count_per_schema[schema_name], schema_name))
            grt.begin_progress_step(
                this_schema_progress,
                this_schema_progress + step_progress_share / 2)
            schema.routines.remove_all()
            reverseEngineerProcedures(connection, schema)
            grt.end_progress_step()
            check_interruption()
            grt.begin_progress_step(
                this_schema_progress + step_progress_share / 2,
                this_schema_progress + step_progress_share)
            reverseEngineerFunctions(connection, schema)
            grt.end_progress_step()

        this_schema_progress += step_progress_share
        grt.send_progress(
            this_schema_progress,
            'Reverse engineering of routines for schema %s completed!' %
            schema_name)

        # Reverse engineer triggers:
        step_progress_share = trigger_count_per_schema[schema_name] / (
            total_count_per_schema[schema_name] + 1e-10)
        if get_triggers:
            check_interruption()
            grt.send_info('Reverse engineering %i triggers from %s' %
                          (trigger_count_per_schema[schema_name], schema_name))
            grt.begin_progress_step(this_schema_progress,
                                    this_schema_progress + step_progress_share)
            reverseEngineerTriggers(connection, schema)
            grt.end_progress_step()

        this_schema_progress += step_progress_share
        grt.send_progress(
            this_schema_progress,
            'Reverse engineering of triggers for schema %s completed!' %
            schema_name)

        accumulated_schema_progress += schema_progress_share
        grt.end_progress_step()

    grt.end_progress_step()

    # Now the second pass for reverse engineering tables:
    accumulated_progress = 0.8
    if get_tables:
        total_tables = sum(table_count_per_schema[schema.name]
                           for schema in catalog.schemata)
        for schema in catalog.schemata:
            check_interruption()
            step_progress_share = 0.2 * (table_count_per_schema[schema.name] /
                                         (total_tables + 1e-10))
            grt.send_info(
                'Reverse engineering foreign keys for tables in schema %s' %
                schema.name)
            grt.begin_progress_step(accumulated_progress,
                                    accumulated_progress + step_progress_share)
            reverseEngineerTables(connection, schema)
            grt.end_progress_step()

            accumulated_progress += step_progress_share
            grt.send_progress(
                accumulated_progress,
                'Second pass of table reverse engineering for schema %s completed!'
                % schema_name)

    grt.send_progress(1.0, 'Reverse engineering completed!')
    return catalog
def reverseEngineer(connection, catalog_name, schemata_list, context):
    catalog = grt.classes.db_mysql_Catalog()
    catalog.name = catalog_name
    catalog.simpleDatatypes.remove_all()
    catalog.simpleDatatypes.extend(connection.driver.owner.simpleDatatypes)
    
    table_names_per_schema = {}
    routine_names_per_schema = {}
    trigger_names_per_schema = {}
    
    def filter_warnings(mtype, text, detail):
        # filter out parser warnings about stub creation/reuse from the message stream, since
        # they're harmless
        if mtype == "WARNING" and (" stub " in text or "Stub " in text):
            grt.send_info(text)
            return True
        return False
    
    version = getServerVersion(connection)
    
    get_tables = context.get("reverseEngineerTables", True)
    get_triggers = context.get("reverseEngineerTriggers", True) and (version.majorNumber, version.minorNumber, version.releaseNumber) >= (5, 1, 21)
    get_views = context.get("reverseEngineerViews", True)
    get_routines = context.get("reverseEngineerRoutines", True)
    
    # calculate total workload 1st
    
    # 10% of the progress is for preparation
    
    grt.send_progress(0, "Preparing...")
    total = 0
    i = 0.0
    for schema_name in schemata_list:
        check_interruption()
        if get_tables and get_views:
            table_names = getAllTableNames(connection, catalog_name, schema_name)
        elif get_tables:
            table_names = getTableNames(connection, catalog_name, schema_name)
        elif get_views:
            table_names = getViewNames(connection, catalog_name, schema_name)
        else:
            table_name = []
        total += len(table_names)
        table_names_per_schema[schema_name] = table_names
        check_interruption()
        if get_routines:
            procedure_names = getProcedureNames(connection, catalog_name, schema_name)
            check_interruption()
            function_names = getFunctionNames(connection, catalog_name, schema_name)
            check_interruption()
            total += len(procedure_names)
            total += len(function_names)
            routine_names_per_schema[schema_name] = procedure_names, function_names
        else:
            routine_names_per_schema[schema_name] = [], []
        if get_triggers:
            trigger_names = getTriggerNames(connection, catalog_name, schema_name)
            total += len(trigger_names)
        else:
            trigger_names = []
        trigger_names_per_schema[schema_name] = trigger_names
        
        grt.send_progress(0.1 * (i/len(schemata_list)), "Preparing...")
        i += 1.0

    def wrap_sql(sql, schema):
        return "USE `%s`;\n%s"%(escape_sql_identifier(schema), sql)

    def wrap_routine_sql(sql):
        return "DELIMITER $$\n"+sql

    i = 0.0
    for schema_name in schemata_list:
        schema = grt.classes.db_mysql_Schema()
        schema.owner = catalog
        schema.name = schema_name
        catalog.schemata.append(schema)
        context = grt.modules.MySQLParserServices.createParserContext(catalog.characterSets, getServerVersion(connection), getServerMode(connection), 1)
        options = {}

        if get_tables or get_views:
            grt.send_info("Reverse engineering tables from %s" % schema_name)
            for table_name in table_names_per_schema[schema_name]:
                check_interruption()
                grt.send_progress(0.1 + 0.9 * (i / total), "Retrieving table %s.%s..." % (schema_name, table_name))
                result = execute_query(connection, "SHOW CREATE TABLE `%s`.`%s`" % (escape_sql_identifier(schema_name), escape_sql_identifier(table_name)))
                i += 0.5
                grt.send_progress(0.1 + 0.9 * (i / total), "Reverse engineering %s.%s..." % (schema_name, table_name))
                if result and result.nextRow():
                    sql = result.stringByIndex(2)
                    grt.push_message_handler(filter_warnings)
                    grt.begin_progress_step(0.1 + 0.9 * (i / total), 0.1 + 0.9 * ((i+0.5) / total))
                    grt.modules.MySQLParserServices.parseSQLIntoCatalogSql(context, catalog, wrap_sql(sql, schema_name), options)
                    grt.end_progress_step()
                    grt.pop_message_handler()
                    i += 0.5
                else:
                    raise Exception("Could not fetch table information for %s.%s" % (schema_name, table_name))

        if get_triggers:
            grt.send_info("Reverse engineering triggers from %s" % schema_name)
            for trigger_name in trigger_names_per_schema[schema_name]:
                check_interruption()
                grt.send_progress(0.1 + 0.9 * (i / total), "Retrieving trigger %s.%s..." % (schema_name, trigger_name))
                result = execute_query(connection, "SHOW CREATE TRIGGER `%s`.`%s`" % (escape_sql_identifier(schema_name), escape_sql_identifier(trigger_name)))
                i += 0.5
                grt.send_progress(0.1 + 0.9 * (i / total), "Reverse engineering %s.%s..." % (schema_name, trigger_name))
                if result and result.nextRow():
                    sql = result.stringByName("SQL Original Statement")
                    grt.begin_progress_step(0.1 + 0.9 * (i / total), 0.1 + 0.9 * ((i+0.5) / total))
                    grt.modules.MySQLParserServices.parseSQLIntoCatalogSql(context, catalog, wrap_sql(wrap_routine_sql(sql), schema_name), options)
                    grt.end_progress_step()
                    i += 0.5
                else:
                    raise Exception("Could not fetch trigger information for %s.%s" % (schema_name, trigger_name))
        
        if get_routines:
            grt.send_info("Reverse engineering stored procedures from %s" % schema_name)
            procedure_names, function_names = routine_names_per_schema[schema_name]
            for name in procedure_names:
                check_interruption()
                grt.send_progress(0.1 + 0.9 * (i / total), "Retrieving stored procedure %s.%s..." % (schema_name, name))
                result = execute_query(connection, "SHOW CREATE PROCEDURE `%s`.`%s`" % (escape_sql_identifier(schema_name), escape_sql_identifier(name)))
                i += 0.5
                grt.send_progress(0.1 + 0.9 * (i / total), "Reverse engineering %s.%s..." % (schema_name, name))
                if result and result.nextRow():
                    sql = result.stringByName("Create Procedure")
                    grt.begin_progress_step(0.1 + 0.9 * (i / total), 0.1 + 0.9 * ((i+0.5) / total))
                    grt.modules.MySQLParserServices.parseSQLIntoCatalogSql(context, catalog, wrap_sql(wrap_routine_sql(sql), schema_name), options)
                    grt.end_progress_step()
                    i += 0.5
                else:
                    raise Exception("Could not fetch procedure information for %s.%s" % (schema_name, name))

            grt.send_info("Reverse engineering functions from %s" % schema_name)
            for name in function_names:
                check_interruption()
                grt.send_progress(0.1 + 0.9 * (i / total), "Retrieving function %s.%s..." % (schema_name, name))
                result = execute_query(connection, "SHOW CREATE FUNCTION `%s`.`%s`" % (escape_sql_identifier(schema_name), escape_sql_identifier(name)))
                i += 0.5
                grt.send_progress(0.1 + 0.9 * (i / total), "Reverse engineering %s.%s..." % (schema_name, name))
                if result and result.nextRow():
                    sql = result.stringByName("Create Function")
                    grt.begin_progress_step(0.1 + 0.9 * (i / total), 0.1 + 0.9 * ((i+0.5) / total))
                    grt.modules.MySQLParserServices.parseSQLIntoCatalogSql(context, catalog, wrap_sql(wrap_routine_sql(sql), schema_name), options)
                    grt.end_progress_step()
                    i += 0.5
                else:
                    raise Exception("Could not fetch function information for %s.%s" % (schema_name, name))

    grt.send_progress(1.0, "Reverse engineered %i objects" % total)
    
    # check for any stub tables left
    empty_schemas = []
    for schema in catalog.schemata:
        schema_has_stub_tables = False
        for table in reversed(schema.tables):
            if table.isStub:
                grt.send_warning("Table %s was referenced from another table, but was not reverse engineered" % table.name)
                schema.tables.remove(table)
                schema_has_stub_tables = True
        if not schema.tables and not schema.views and not schema.routines and schema_has_stub_tables:
            empty_schemas.append(schema)
    for schema in empty_schemas:
        catalog.schemata.remove(schema)

    return catalog
示例#31
0
def reverseEngineer(connection, catalog_name, schemata_list, context):
    catalog = grt.classes.db_mysql_Catalog()
    catalog.name = catalog_name
    catalog.simpleDatatypes.remove_all()
    catalog.simpleDatatypes.extend(connection.driver.owner.simpleDatatypes)
    
    table_names_per_schema = {}
    routine_names_per_schema = {}
    trigger_names_per_schema = {}
    
    def filter_warnings(mtype, text, detail):
        # filter out parser warnings about stub creation/reuse from the message stream, since
        # they're harmless
        if mtype == "WARNING" and (" stub " in text or "Stub " in text):
            grt.send_info(text)
            return True
        return False
    
    version = getServerVersion(connection)
    
    get_tables = context.get("reverseEngineerTables", True)
    get_triggers = context.get("reverseEngineerTriggers", True) and (version.majorNumber, version.minorNumber, version.releaseNumber) >= (5, 1, 21)
    get_views = context.get("reverseEngineerViews", True)
    get_routines = context.get("reverseEngineerRoutines", True)
    
    # calculate total workload 1st
    
    # 10% of the progress is for preparation
    
    grt.send_progress(0, "Preparing...")
    total = 0
    i = 0.0
    for schema_name in schemata_list:
        check_interruption()
        if get_tables and get_views:
            table_names = getAllTableNames(connection, catalog_name, schema_name)
        elif get_tables:
            table_names = getTableNames(connection, catalog_name, schema_name)
        elif get_views:
            table_names = getViewNames(connection, catalog_name, schema_name)
        else:
            table_name = []
        total += len(table_names)
        table_names_per_schema[schema_name] = table_names
        check_interruption()
        if get_routines:
            procedure_names = getProcedureNames(connection, catalog_name, schema_name)
            check_interruption()
            function_names = getFunctionNames(connection, catalog_name, schema_name)
            check_interruption()
            total += len(procedure_names)
            total += len(function_names)
            routine_names_per_schema[schema_name] = procedure_names, function_names
        else:
            routine_names_per_schema[schema_name] = [], []
        if get_triggers:
            trigger_names = getTriggerNames(connection, catalog_name, schema_name)
            total += len(trigger_names)
        else:
            trigger_names = []
        trigger_names_per_schema[schema_name] = trigger_names
        
        grt.send_progress(0.1 * (i/len(schemata_list)), "Preparing...")
        i += 1.0

    def wrap_sql(sql, schema):
        return "USE `%s`;\n%s"%(escape_sql_identifier(schema), sql)

    def wrap_routine_sql(sql):
        return "DELIMITER $$\n"+sql

    i = 0.0
    for schema_name in schemata_list:
        schema = grt.classes.db_mysql_Schema()
        schema.owner = catalog
        schema.name = schema_name
        catalog.schemata.append(schema)

        if get_tables or get_views:
            grt.send_info("Reverse engineering tables from %s" % schema_name)
            for table_name in table_names_per_schema[schema_name]:
                check_interruption()
                grt.send_progress(0.1 + 0.9 * (i / total), "Retrieving table %s.%s..." % (schema_name, table_name))
                result = execute_query(connection, "SHOW CREATE TABLE `%s`.`%s`" % (escape_sql_identifier(schema_name), escape_sql_identifier(table_name)))
                i += 0.5
                grt.send_progress(0.1 + 0.9 * (i / total), "Reverse engineering %s.%s..." % (schema_name, table_name))
                if result and result.nextRow():
                    sql = result.stringByIndex(2)
                    grt.push_message_handler(filter_warnings)
                    grt.begin_progress_step(0.1 + 0.9 * (i / total), 0.1 + 0.9 * ((i+0.5) / total))
                    grt.modules.MysqlSqlFacade.parseSqlScriptString(catalog, wrap_sql(sql, schema_name))
                    grt.end_progress_step()
                    grt.pop_message_handler()
                    i += 0.5
                else:
                    raise Exception("Could not fetch table information for %s.%s" % (schema_name, table_name))

        if get_triggers:
            grt.send_info("Reverse engineering triggers from %s" % schema_name)
            for trigger_name in trigger_names_per_schema[schema_name]:
                check_interruption()
                grt.send_progress(0.1 + 0.9 * (i / total), "Retrieving trigger %s.%s..." % (schema_name, trigger_name))
                result = execute_query(connection, "SHOW CREATE TRIGGER `%s`.`%s`" % (escape_sql_identifier(schema_name), escape_sql_identifier(trigger_name)))
                i += 0.5
                grt.send_progress(0.1 + 0.9 * (i / total), "Reverse engineering %s.%s..." % (schema_name, trigger_name))
                if result and result.nextRow():
                    sql = result.stringByName("SQL Original Statement")
                    grt.begin_progress_step(0.1 + 0.9 * (i / total), 0.1 + 0.9 * ((i+0.5) / total))
                    grt.modules.MysqlSqlFacade.parseSqlScriptString(catalog, wrap_sql(wrap_routine_sql(sql), schema_name))
                    grt.end_progress_step()
                    i += 0.5
                else:
                    raise Exception("Could not fetch trigger information for %s.%s" % (schema_name, trigger_name))
        
        if get_routines:
            grt.send_info("Reverse engineering stored procedures from %s" % schema_name)
            procedure_names, function_names = routine_names_per_schema[schema_name]
            for name in procedure_names:
                check_interruption()
                grt.send_progress(0.1 + 0.9 * (i / total), "Retrieving stored procedure %s.%s..." % (schema_name, name))
                result = execute_query(connection, "SHOW CREATE PROCEDURE `%s`.`%s`" % (escape_sql_identifier(schema_name), escape_sql_identifier(name)))
                i += 0.5
                grt.send_progress(0.1 + 0.9 * (i / total), "Reverse engineering %s.%s..." % (schema_name, name))
                if result and result.nextRow():
                    sql = result.stringByName("Create Procedure")
                    grt.begin_progress_step(0.1 + 0.9 * (i / total), 0.1 + 0.9 * ((i+0.5) / total))
                    grt.modules.MysqlSqlFacade.parseSqlScriptString(catalog, wrap_sql(wrap_routine_sql(sql), schema_name))
                    grt.end_progress_step()
                    i += 0.5
                else:
                    raise Exception("Could not fetch procedure information for %s.%s" % (schema_name, name))

            grt.send_info("Reverse engineering functions from %s" % schema_name)
            for name in function_names:
                check_interruption()
                grt.send_progress(0.1 + 0.9 * (i / total), "Retrieving function %s.%s..." % (schema_name, name))
                result = execute_query(connection, "SHOW CREATE FUNCTION `%s`.`%s`" % (escape_sql_identifier(schema_name), escape_sql_identifier(name)))
                i += 0.5
                grt.send_progress(0.1 + 0.9 * (i / total), "Reverse engineering %s.%s..." % (schema_name, name))
                if result and result.nextRow():
                    sql = result.stringByName("Create Function")
                    grt.begin_progress_step(0.1 + 0.9 * (i / total), 0.1 + 0.9 * ((i+0.5) / total))
                    grt.modules.MysqlSqlFacade.parseSqlScriptString(catalog, wrap_sql(wrap_routine_sql(sql), schema_name))
                    grt.end_progress_step()
                    i += 0.5
                else:
                    raise Exception("Could not fetch function information for %s.%s" % (schema_name, name))

    grt.send_progress(1.0, "Reverse engineered %i objects" % total)
    
    # check for any stub tables left
    empty_schemas = []
    for schema in catalog.schemata:
        schema_has_stub_tables = False
        for table in reversed(schema.tables):
            if table.isStub:
                grt.send_warning("Table %s was referenced from another table, but was not reverse engineered" % table.name)
                schema.tables.remove(table)
                schema_has_stub_tables = True
        if not schema.tables and not schema.views and not schema.routines and schema_has_stub_tables:
            empty_schemas.append(schema)
    for schema in empty_schemas:
        catalog.schemata.remove(schema)

    return catalog
def reverseEngineerFunctions(connection, schema):
    # TODO: Find a way to reverse engineer functions in Sybase ASE
    grt.send_progress(1.0, 'Finished reverse engineering of functions for the %s schema.' % schema.name)
    return 0
    def reverseEngineer(cls, connection, catalog_name, schemata_list, context):
        grt.send_progress(0, "Reverse engineering catalog information")
        cls.check_interruption()
        catalog = cls.reverseEngineerCatalog(connection, catalog_name)

        # calculate total workload 1st
        grt.send_progress(0.1, 'Preparing...')
        table_count_per_schema = {}
        view_count_per_schema = {}
        routine_count_per_schema = {}
        trigger_count_per_schema = {}
        total_count_per_schema = {}

        get_tables = context.get("reverseEngineerTables", True)
        get_triggers = context.get("reverseEngineerTriggers", True)
        get_views = context.get("reverseEngineerViews", True)
        get_routines = context.get("reverseEngineerRoutines", True)

        # 10% of the progress is for preparation
        total = 1e-10  # total should not be zero to avoid DivisionByZero exceptions
        i = 0.0
        accumulated_progress = 0.1
        for schema_name in schemata_list:
            cls.check_interruption()
            table_count_per_schema[schema_name] = len(cls.getTableNames(connection, catalog_name, schema_name)) if get_tables else 0
            view_count_per_schema[schema_name] = len(cls.getViewNames(connection, catalog_name, schema_name)) if get_views else 0
            cls.check_interruption()
            routine_count_per_schema[schema_name] = len(cls.getProcedureNames(connection, catalog_name, schema_name)) + len(cls.getFunctionNames(connection, catalog_name, schema_name)) if get_routines else 0
            trigger_count_per_schema[schema_name] = len(cls.getTriggerNames(connection, catalog_name, schema_name)) if get_triggers else 0

            total_count_per_schema[schema_name] = (table_count_per_schema[schema_name] + view_count_per_schema[schema_name] +
                                                   routine_count_per_schema[schema_name] + trigger_count_per_schema[schema_name] + 1e-10)
            total += total_count_per_schema[schema_name]

            grt.send_progress(accumulated_progress + 0.1 * (i / (len(schemata_list) + 1e-10) ), "Gathered stats for %s" % schema_name)
            i += 1.0

        # Now take 60% in the first pass of reverse engineering:
        accumulated_progress = 0.2
        for schema_name in schemata_list:
            schema_progress_share = 0.6 * (total_count_per_schema.get(schema_name, 0.0) / total)
            schema = find_object_with_name(catalog.schemata, schema_name) 

            if schema:
                # Reverse engineer tables:
                step_progress_share = schema_progress_share * (table_count_per_schema[schema_name] / (total_count_per_schema[schema_name] + 1e-10))
                if get_tables:
                    cls.check_interruption()
                    grt.send_info('Reverse engineering tables from %s' % schema_name)
                    grt.begin_progress_step(accumulated_progress, accumulated_progress + step_progress_share)
                    # Remove previous first pass marks that may exist if the user goes back and attempt rev eng again:
                    progress_flags = cls._connections[connection.__id__].setdefault('_rev_eng_progress_flags', set())
                    progress_flags.discard('%s_tables_first_pass' % schema_name)
                    cls.reverseEngineerTables(connection, schema)
                    grt.end_progress_step()
        
                accumulated_progress += step_progress_share
                grt.send_progress(accumulated_progress, 'First pass of table reverse engineering for schema %s completed!' % schema_name)
        
                # Reverse engineer views:
                step_progress_share = schema_progress_share * (view_count_per_schema[schema_name] / (total_count_per_schema[schema_name] + 1e-10))
                if get_views:
                    cls.check_interruption()
                    grt.send_info('Reverse engineering views from %s' % schema_name)
                    grt.begin_progress_step(accumulated_progress, accumulated_progress + step_progress_share)
                    cls.reverseEngineerViews(connection, schema)
                    grt.end_progress_step()
        
                accumulated_progress += step_progress_share
                grt.send_progress(accumulated_progress, 'Reverse engineering of views for schema %s completed!' % schema_name)
        
                # Reverse engineer routines:
                step_progress_share = schema_progress_share * (routine_count_per_schema[schema_name] / (total_count_per_schema[schema_name] + 1e-10))
                if get_routines:
                    cls.check_interruption()
                    grt.send_info('Reverse engineering routines from %s' % schema_name)
                    grt.begin_progress_step(accumulated_progress, accumulated_progress + step_progress_share)
                    grt.begin_progress_step(0.0, 0.5)
                    cls.reverseEngineerProcedures(connection, schema)
                    cls.check_interruption()
                    grt.end_progress_step()
                    grt.begin_progress_step(0.5, 1.0)
                    reverseEngineerFunctions(connection, schema)
                    grt.end_progress_step()
                    grt.end_progress_step()
        
                accumulated_progress += step_progress_share
                grt.send_progress(accumulated_progress, 'Reverse engineering of routines for schema %s completed!' % schema_name)
        
                # Reverse engineer triggers:
                step_progress_share = schema_progress_share * (trigger_count_per_schema[schema_name] / (total_count_per_schema[schema_name] + 1e-10))
                if get_triggers:
                    cls.check_interruption()
                    grt.send_info('Reverse engineering triggers from %s' % schema_name)
                    grt.begin_progress_step(accumulated_progress, accumulated_progress + step_progress_share)
                    cls.reverseEngineerTriggers(connection, schema)
                    grt.end_progress_step()
        
                accumulated_progress = 0.8
                grt.send_progress(accumulated_progress, 'Reverse engineering of triggers for schema %s completed!' % schema_name)
            else:  # No schema with the given name was found
                grt.send_warning('The schema %s was not found in the catalog %s. Skipping it.' % (schema_name, catalog_name) )
                
        # Now the second pass for reverse engineering tables:
        if get_tables:
            total_tables = sum(table_count_per_schema[schema.name] for schema in catalog.schemata if schema.name in schemata_list)
            for schema in catalog.schemata:
                if schema.name not in schemata_list:
                    continue
                cls.check_interruption()
                step_progress_share = 0.2 * (table_count_per_schema[schema.name] / (total_tables + 1e-10))
                grt.send_info('Reverse engineering foreign keys for tables in schema %s' % schema.name)
                grt.begin_progress_step(accumulated_progress, accumulated_progress + step_progress_share)
                cls.reverseEngineerTables(connection, schema)
                grt.end_progress_step()
        
                accumulated_progress += step_progress_share
                grt.send_progress(accumulated_progress, 'Second pass of table reverse engineering for schema %s completed!' % schema_name)
            

        grt.send_progress(1.0, 'Reverse engineering completed!')
        return catalog
示例#34
0
 def task_generate_sql(self):
     grt.send_progress(0, "Generating SQL...")
     self.main.plan.generateSQL()
示例#35
0
    def reverseEngineer(cls, connection, catalog_name, schemata_list, context):
        grt.send_progress(0, "Reverse engineering catalog information")
        cls.check_interruption()
        catalog = cls.reverseEngineerCatalog(connection, catalog_name)

        # calculate total workload 1st
        grt.send_progress(0.1, 'Preparing...')
        table_count_per_schema = {}
        view_count_per_schema = {}
        routine_count_per_schema = {}
        trigger_count_per_schema = {}
        total_count_per_schema = {}

        get_tables = context.get("reverseEngineerTables", True)
        get_triggers = context.get("reverseEngineerTriggers", True)
        get_views = context.get("reverseEngineerViews", True)
        get_routines = context.get("reverseEngineerRoutines", True)

        # 10% of the progress is for preparation
        total = 1e-10  # total should not be zero to avoid DivisionByZero exceptions
        i = 0.0
        accumulated_progress = 0.1
        for schema_name in schemata_list:
            cls.check_interruption()
            table_count_per_schema[schema_name] = len(cls.getTableNames(connection, catalog_name, schema_name)) if get_tables else 0
            view_count_per_schema[schema_name] = len(cls.getViewNames(connection, catalog_name, schema_name)) if get_views else 0
            cls.check_interruption()
            routine_count_per_schema[schema_name] = len(cls.getProcedureNames(connection, catalog_name, schema_name)) + len(cls.getFunctionNames(connection, catalog_name, schema_name)) if get_routines else 0
            trigger_count_per_schema[schema_name] = len(cls.getTriggerNames(connection, catalog_name, schema_name)) if get_triggers else 0

            total_count_per_schema[schema_name] = (table_count_per_schema[schema_name] + view_count_per_schema[schema_name] +
                                                   routine_count_per_schema[schema_name] + trigger_count_per_schema[schema_name] + 1e-10)
            total += total_count_per_schema[schema_name]

            grt.send_progress(accumulated_progress + 0.1 * (i / (len(schemata_list) + 1e-10) ), "Gathered stats for %s" % schema_name)
            i += 1.0

        # Now take 60% in the first pass of reverse engineering:
        accumulated_progress = 0.2
        for schema_name in schemata_list:
            schema_progress_share = 0.6 * (total_count_per_schema.get(schema_name, 0.0) / total)
            schema = find_object_with_name(catalog.schemata, schema_name) 

            if schema:
                # Reverse engineer tables:
                step_progress_share = schema_progress_share * (table_count_per_schema[schema_name] / (total_count_per_schema[schema_name] + 1e-10))
                if get_tables:
                    cls.check_interruption()
                    grt.send_info('Reverse engineering tables from %s' % schema_name)
                    grt.begin_progress_step(accumulated_progress, accumulated_progress + step_progress_share)
                    # Remove previous first pass marks that may exist if the user goes back and attempt rev eng again:
                    progress_flags = cls._connections[connection.__id__].setdefault('_rev_eng_progress_flags', set())
                    progress_flags.discard('%s_tables_first_pass' % schema_name)
                    cls.reverseEngineerTables(connection, schema)
                    grt.end_progress_step()
        
                accumulated_progress += step_progress_share
                grt.send_progress(accumulated_progress, 'First pass of table reverse engineering for schema %s completed!' % schema_name)
        
                # Reverse engineer views:
                step_progress_share = schema_progress_share * (view_count_per_schema[schema_name] / (total_count_per_schema[schema_name] + 1e-10))
                if get_views:
                    cls.check_interruption()
                    grt.send_info('Reverse engineering views from %s' % schema_name)
                    grt.begin_progress_step(accumulated_progress, accumulated_progress + step_progress_share)
                    cls.reverseEngineerViews(connection, schema)
                    grt.end_progress_step()
        
                accumulated_progress += step_progress_share
                grt.send_progress(accumulated_progress, 'Reverse engineering of views for schema %s completed!' % schema_name)
        
                # Reverse engineer routines:
                step_progress_share = schema_progress_share * (routine_count_per_schema[schema_name] / (total_count_per_schema[schema_name] + 1e-10))
                if get_routines:
                    cls.check_interruption()
                    grt.send_info('Reverse engineering routines from %s' % schema_name)
                    grt.begin_progress_step(accumulated_progress, accumulated_progress + step_progress_share)
                    grt.begin_progress_step(0.0, 0.5)
                    cls.reverseEngineerProcedures(connection, schema)
                    cls.check_interruption()
                    grt.end_progress_step()
                    grt.begin_progress_step(0.5, 1.0)
                    reverseEngineerFunctions(connection, schema)
                    grt.end_progress_step()
                    grt.end_progress_step()
        
                accumulated_progress += step_progress_share
                grt.send_progress(accumulated_progress, 'Reverse engineering of routines for schema %s completed!' % schema_name)
        
                # Reverse engineer triggers:
                step_progress_share = schema_progress_share * (trigger_count_per_schema[schema_name] / (total_count_per_schema[schema_name] + 1e-10))
                if get_triggers:
                    cls.check_interruption()
                    grt.send_info('Reverse engineering triggers from %s' % schema_name)
                    grt.begin_progress_step(accumulated_progress, accumulated_progress + step_progress_share)
                    cls.reverseEngineerTriggers(connection, schema)
                    grt.end_progress_step()
        
                accumulated_progress = 0.8
                grt.send_progress(accumulated_progress, 'Reverse engineering of triggers for schema %s completed!' % schema_name)
            else:  # No schema with the given name was found
                grt.send_warning('The schema %s was not found in the catalog %s. Skipping it.' % (schema_name, catalog_name) )
                
        # Now the second pass for reverse engineering tables:
        if get_tables:
            total_tables = sum(table_count_per_schema[schema.name] for schema in catalog.schemata if schema.name in schemata_list)
            for schema in catalog.schemata:
                if schema.name not in schemata_list:
                    continue
                cls.check_interruption()
                step_progress_share = 0.2 * (table_count_per_schema[schema.name] / (total_tables + 1e-10))
                grt.send_info('Reverse engineering foreign keys for tables in schema %s' % schema.name)
                grt.begin_progress_step(accumulated_progress, accumulated_progress + step_progress_share)
                cls.reverseEngineerTables(connection, schema)
                grt.end_progress_step()
        
                accumulated_progress += step_progress_share
                grt.send_progress(accumulated_progress, 'Second pass of table reverse engineering for schema %s completed!' % schema_name)
            

        grt.send_progress(1.0, 'Reverse engineering completed!')
        return catalog
示例#36
0
 def task_connect(self):
     grt.send_progress(-1, "Connecting...")
     if not self.main.plan.migrationSource.connect():
         raise Exception("Could not connect to source RDBMS")
     return True
 def task_connect(self):
     grt.send_progress(-1, "Connecting...")
     if not self.main.plan.migrationSource.connect():
         raise Exception("Could not connect to source RDBMS")
     return True
 def task_generate_sql(self):
     grt.send_progress(0, "Generating SQL...")
     report = self.main.plan.generateSQL()
示例#39
0
 def doFetchSchemaNames(self, only_these_catalogs=[]):
     """Fetch list of schema names in catalog.schema format and stores them in the migration.sourceSchemataNames node"""
     
     grt.send_progress(0.0, "Checking connection...")
     self.connect()
     if self.rdbms.doesSupportCatalogs > 0:
         grt.send_progress(0.1, "Fetching catalog names...")
         self.state.sourceSchemataNames.remove_all()
         catalog_names = self.getCatalogNames()
         if only_these_catalogs:
             inexistent_catalogs = set(only_these_catalogs).difference(catalog_names)
             if inexistent_catalogs:
                 grt.send_warning('The following catalogs were not found: ' + ', '.join(list(inexistent_catalogs)))
             catalog_names = list(set(only_these_catalogs).difference(inexistent_catalogs)) or self.getCatalogNames()
         self._catalog_names = catalog_names
         grt.send_progress(0.1, "Fetching schema names...")
         accumulated_progress = 0.1
         step_progress_share = 1.0 / (len(catalog_names) + 1e-10)
         for catalog in catalog_names:
             if not catalog:
                 continue
             grt.send_progress(accumulated_progress, 'Fetching schema names from %s...' % catalog)
             schema_names = self.getSchemaNames(catalog)
             for schema in schema_names:
                 if not schema:
                     continue
                 self.state.sourceSchemataNames.append("%s.%s" % (self._db_module.quoteIdentifier(catalog), self._db_module.quoteIdentifier(schema)))
             accumulated_progress += 0.9 * step_progress_share
     elif self.rdbms.doesSupportCatalogs == 0:  # The rdbms doesn't support catalogs
         grt.send_progress(0.1, "Fetching schema names...")
         schema_names = self.getSchemaNames('')
         if only_these_catalogs:  # Here only_these_catalogs would rather mean only these schemata
             inexistent_schemata = set(only_these_catalogs).difference(schema_names)
             if inexistent_schemata:
                 grt.send_warning('The following schemas where not found: ' + ', '.join(list(inexistent_schemata)))
             schema_names = list(set(only_these_catalogs).difference(inexistent_schemata))  or self.getSchemaNames('')
         self._catalog_names = []
         self.state.sourceSchemataNames.remove_all()
         for schema in schema_names:
             self.state.sourceSchemataNames.append('%s.%s' % (self._db_module.quoteIdentifier('def'), self._db_module.quoteIdentifier(schema)))
     else: # no schema either
         self._catalog_names = []
         self.state.sourceSchemataNames.remove_all()
         for schema in self.getSchemaNames(''):
             self.state.sourceSchemataNames.append(self._db_module.quoteIdentifier(schema))
     grt.send_progress(1.0, "Finished")