def migrateCatalog(self, state, source_catalog): target_catalog = grt.classes.db_mysql_Catalog() log = state.addMigrationLogEntry(0, source_catalog, target_catalog, "") target_catalog.name = self.migrateIdentifier(source_catalog.name, log) target_catalog.oldName = source_catalog.name targetRdbms = state.targetConnection.driver.owner target_catalog.simpleDatatypes.extend(targetRdbms.simpleDatatypes) state.targetCatalog = target_catalog # set the version of the target database if state.targetVersion: aTargetVersion = state.targetVersion targetVersion = grt.classes.GrtVersion() targetVersion.owner = target_catalog targetVersion.majorNumber, targetVersion.minorNumber, targetVersion.releaseNumber, targetVersion.buildNumber = aTargetVersion.majorNumber, aTargetVersion.minorNumber, aTargetVersion.releaseNumber, aTargetVersion.buildNumber targetVersion.name = aTargetVersion.name target_catalog.version = targetVersion else: targetVersion = grt.classes.GrtVersion() targetVersion.owner = target_catalog targetVersion.majorNumber, targetVersion.minorNumber, targetVersion.releaseNumber, targetVersion.buildNumber = (5, 5, 0, 0) targetVersion.name = "5.5.0" target_catalog.version = targetVersion if True: grt.send_progress(0.0, "Migrating...") i = 0.0 # migrate all source schemata to target schemata for sourceSchema in source_catalog.schemata: grt.begin_progress_step(0.9*i / (len(source_catalog.schemata)+1e-10), 0.9 *(i+1) / (len(source_catalog.schemata)+1e-10)) grt.send_progress(0.9 * i / (len(source_catalog.schemata)+1e-10), "Migrating schema %s..." % sourceSchema.name) # migrate schema targetSchema = self.migrateSchema(state, sourceSchema, target_catalog) if targetSchema: # add generated schema to target_catalog target_catalog.schemata.append(targetSchema) grt.end_progress_step() i += 1 grt.send_progress(0.9, "Finalizing foreign key migration...") # migrate foreign keys last, as they need the referenced objects which can be from different schemas to be ready for sourceSchema in source_catalog.schemata: global key_names key_names[sourceSchema.name] = set() targetSchema = self.findMatchingTargetObject(state, sourceSchema) for sourceTable in sourceSchema.tables: if not self.shouldMigrate(state, 'tables', sourceTable): continue targetTable = self.findMatchingTargetObject(state, sourceTable) self.migrateTableToMySQL2ndPass(state, sourceTable, targetTable) grt.send_progress(1.0, "Migration finished") return target_catalog
def migrateSchema(self, state, sourceSchema, targetCatalog): targetSchema = grt.classes.db_mysql_Schema() targetSchema.owner = targetCatalog log = state.addMigrationLogEntry(0, sourceSchema, targetSchema, "") targetSchema.defaultCharacterSetName, targetSchema.defaultCollationName = self.migrateCharsetCollation(state, sourceSchema.defaultCharacterSetName, sourceSchema.defaultCollationName, sourceSchema, targetSchema) targetSchema.name = self.migrateIdentifier(sourceSchema.name, log) targetSchema.oldName = sourceSchema.name targetSchema.comment = sourceSchema.comment grt.send_progress(0.2, 'Migrating schema contents for schema %s' % sourceSchema.name) if True: grt.begin_progress_step(0.2, 1.0) self.migrateSchemaContents(state, targetSchema, sourceSchema) grt.end_progress_step() return targetSchema
def createCatalogObjects(connection, catalog, objectCreationParams, creationLog): """Create catalog objects in the server for the specified connection. The catalog must have been previously processed with generateSQLCreateStatements(), so that the objects have their temp_sql attributes set with their respective SQL CREATE statements. """ def makeLogObject(obj): if creationLog is not None: log = grt.classes.GrtLogObject() log.logObject = obj creationLog.append(log) return log else: return None try: grt.send_progress( 0.0, "Creating schema in target MySQL server at %s..." % connection.hostIdentifier) preamble = catalog.customData["migration:preamble"] grt.send_progress(0.0, "Executing preamble script...") execute_script(connection, preamble.temp_sql, makeLogObject(preamble)) i = 0.0 for schema in catalog.schemata: grt.begin_progress_step(i, i + 1.0 / len(catalog.schemata)) i += 1.0 / len(catalog.schemata) if schema.commentedOut: grt.send_progress(1.0, "Skipping schema %s... " % schema.name) grt.end_progress_step() continue total = len(schema.tables) + len(schema.views) + len( schema.routines) + sum( [len(table.triggers) for table in schema.tables]) grt.send_progress(0.0, "Creating schema %s..." % schema.name) execute_script(connection, schema.temp_sql, makeLogObject(schema)) tcount = 0 vcount = 0 rcount = 0 trcount = 0 o = 0 for table in schema.tables: if table.commentedOut: grt.send_progress( float(o) / total, "Skipping table %s.%s" % (schema.name, table.name)) else: grt.send_progress( float(o) / total, "Creating table %s.%s" % (schema.name, table.name)) o += 1 if not table.commentedOut and execute_script( connection, table.temp_sql, makeLogObject(table)): tcount += 1 for view in schema.views: if view.commentedOut: grt.send_progress( float(o) / total, "Skipping view %s.%s" % (schema.name, view.name)) else: grt.send_progress( float(o) / total, "Creating view %s.%s" % (schema.name, view.name)) o += 1 if not view.commentedOut and execute_script( connection, view.temp_sql, makeLogObject(view)): vcount += 1 for routine in schema.routines: if routine.commentedOut: grt.send_progress( float(o) / total, "Skipping routine %s.%s" % (schema.name, routine.name)) else: grt.send_progress( float(o) / total, "Creating routine %s.%s" % (schema.name, routine.name)) o += 1 if not routine.commentedOut and execute_script( connection, routine.temp_sql, makeLogObject(routine)): rcount += 1 for table in schema.tables: for trigger in table.triggers: if trigger.commentedOut: grt.send_progress( float(o) / total, "Skipping trigger %s.%s.%s" % (schema.name, table.name, trigger.name)) else: grt.send_progress( float(o) / total, "Creating trigger %s.%s.%s" % (schema.name, table.name, trigger.name)) o += 1 if not trigger.commentedOut and execute_script( connection, trigger.temp_sql, makeLogObject(trigger)): trcount += 1 grt.send_info( "Scripts for %i tables, %i views and %i routines were executed for schema %s" % (tcount, vcount, rcount, schema.name)) grt.end_progress_step() postamble = catalog.customData["migration:postamble"] grt.send_progress(1.0, "Executing postamble script...") execute_script(connection, postamble.temp_sql, makeLogObject(postamble)) grt.send_progress(1.0, "Schema created") except grt.UserInterrupt: grt.send_info( "Cancellation request detected, interrupting schema creation.") raise return 1
def reverseEngineer(connection, catalog_name, schemata_list, options): """Reverse engineers a Sybase ASE database. This is the function that will be called by the Migration Wizard to reverse engineer a Sybase database. All the other reverseEngineer* functions are not actually required and should not be considered part of this module API even though they are currently being exposed. This function calls the other reverseEngineer* functions to complete the full reverse engineer process. """ grt.send_progress(0, "Reverse engineering catalog information") catalog = grt.classes.db_sybase_Catalog() catalog.name = catalog_name catalog.simpleDatatypes.remove_all() catalog.simpleDatatypes.extend(connection.driver.owner.simpleDatatypes) catalog.defaultCollationName = '' # FIXME: Find out the right collation for the catalog grt.send_progress(0.05, "Reverse engineering User Data Types...") check_interruption() # reverseEngineerUserDatatypes(connection, catalog) # calculate total workload 1st grt.send_progress(0.1, 'Preparing...') table_count_per_schema = {} view_count_per_schema = {} routine_count_per_schema = {} trigger_count_per_schema = {} total_count_per_schema = {} get_tables = options.get("reverseEngineerTables", True) get_triggers = options.get("reverseEngineerTriggers", True) get_views = options.get("reverseEngineerViews", True) get_routines = options.get("reverseEngineerRoutines", True) # 10% of the progress is for preparation total = 1e-10 # total should not be zero to avoid DivisionByZero exceptions i = 1.0 accumulated_progress = 0.1 for schema_name in schemata_list: check_interruption() table_count_per_schema[schema_name] = len(getTableNames(connection, catalog_name, schema_name)) if get_tables else 0 view_count_per_schema[schema_name] = len(getViewNames(connection, catalog_name, schema_name)) if get_views else 0 check_interruption() routine_count_per_schema[schema_name] = len(getProcedureNames(connection, catalog_name, schema_name)) + len(getFunctionNames(connection, catalog_name, schema_name)) if get_routines else 0 trigger_count_per_schema[schema_name] = len(getTriggerNames(connection, catalog_name, schema_name)) if get_triggers else 0 total_count_per_schema[schema_name] = (table_count_per_schema[schema_name] + view_count_per_schema[schema_name] + routine_count_per_schema[schema_name] + trigger_count_per_schema[schema_name] + 1e-10) total += total_count_per_schema[schema_name] grt.send_progress(accumulated_progress + 0.1 * (i / (len(schemata_list) + 1e-10) ), "Gathered stats for %s" % schema_name) i += 1.0 # Now take 60% in the first pass of reverse engineering: accumulated_progress = 0.2 grt.reset_progress_steps() grt.begin_progress_step(accumulated_progress, accumulated_progress + 0.6) accumulated_schema_progress = 0.0 for schema_name in schemata_list: schema_progress_share = total_count_per_schema.get(schema_name, 0.0) / total grt.begin_progress_step(accumulated_schema_progress, accumulated_schema_progress + schema_progress_share) this_schema_progress = 0.0 schema = grt.classes.db_sybase_Schema() schema.owner = catalog schema.name = schema_name schema.defaultCollationName = catalog.defaultCollationName catalog.schemata.append(schema) # Reverse engineer tables: step_progress_share = table_count_per_schema[schema_name] / (total_count_per_schema[schema_name] + 1e-10) if get_tables: check_interruption() grt.send_info('Reverse engineering %i tables from %s' % (table_count_per_schema[schema_name], schema_name)) grt.begin_progress_step(this_schema_progress, this_schema_progress + step_progress_share) # Remove previous first pass marks that may exist if the user goes back and attempt rev eng again: progress_flags = _connections[connection.__id__].setdefault('_rev_eng_progress_flags', set()) progress_flags.discard('%s_tables_first_pass' % schema_name) reverseEngineerTables(connection, schema) grt.end_progress_step() this_schema_progress += step_progress_share grt.send_progress(this_schema_progress, 'First pass of table reverse engineering for schema %s completed!' % schema_name) # Reverse engineer views: step_progress_share = view_count_per_schema[schema_name] / (total_count_per_schema[schema_name] + 1e-10) if get_views: check_interruption() grt.send_info('Reverse engineering %i views from %s' % (view_count_per_schema[schema_name], schema_name)) grt.begin_progress_step(this_schema_progress, this_schema_progress + step_progress_share) reverseEngineerViews(connection, schema) grt.end_progress_step() this_schema_progress += step_progress_share grt.send_progress(this_schema_progress, 'Reverse engineering of views for schema %s completed!' % schema_name) # Reverse engineer routines: step_progress_share = routine_count_per_schema[schema_name] / (total_count_per_schema[schema_name] + 1e-10) if get_routines: check_interruption() grt.send_info('Reverse engineering %i routines from %s' % (routine_count_per_schema[schema_name], schema_name)) grt.begin_progress_step(this_schema_progress, this_schema_progress + step_progress_share/2) schema.routines.remove_all() reverseEngineerProcedures(connection, schema) grt.end_progress_step() check_interruption() grt.begin_progress_step(this_schema_progress + step_progress_share/2, this_schema_progress + step_progress_share) reverseEngineerFunctions(connection, schema) grt.end_progress_step() this_schema_progress += step_progress_share grt.send_progress(this_schema_progress, 'Reverse engineering of routines for schema %s completed!' % schema_name) # Reverse engineer triggers: step_progress_share = trigger_count_per_schema[schema_name] / (total_count_per_schema[schema_name] + 1e-10) if get_triggers: check_interruption() grt.send_info('Reverse engineering %i triggers from %s' % (trigger_count_per_schema[schema_name], schema_name)) grt.begin_progress_step(this_schema_progress, this_schema_progress + step_progress_share) reverseEngineerTriggers(connection, schema) grt.end_progress_step() this_schema_progress += step_progress_share grt.send_progress(this_schema_progress, 'Reverse engineering of triggers for schema %s completed!' % schema_name) accumulated_schema_progress += schema_progress_share grt.end_progress_step() grt.end_progress_step() # Now the second pass for reverse engineering tables: accumulated_progress = 0.8 if get_tables: total_tables = sum(table_count_per_schema[schema.name] for schema in catalog.schemata) for schema in catalog.schemata: check_interruption() step_progress_share = 0.2 * (table_count_per_schema[schema.name] / (total_tables + 1e-10)) grt.send_info('Reverse engineering foreign keys for tables in schema %s' % schema.name) grt.begin_progress_step(accumulated_progress, accumulated_progress + step_progress_share) reverseEngineerTables(connection, schema) grt.end_progress_step() accumulated_progress += step_progress_share grt.send_progress(accumulated_progress, 'Second pass of table reverse engineering for schema %s completed!' % schema_name) grt.send_progress(1.0, 'Reverse engineering completed!') return catalog
def createCatalogObjects(connection, catalog, objectCreationParams, creationLog): """Create catalog objects in the server for the specified connection. The catalog must have been previously processed with generateSQLCreateStatements(), so that the objects have their temp_sql attributes set with their respective SQL CREATE statements. """ def makeLogObject(obj): if creationLog is not None: log = grt.classes.GrtLogObject() log.logObject = obj creationLog.append(log) return log else: return None try: grt.send_progress(0.0, "Creating schema in target MySQL server at %s..." % connection.hostIdentifier) preamble = catalog.customData["migration:preamble"] grt.send_progress(0.0, "Executing preamble script...") execute_script(connection, preamble.temp_sql, makeLogObject(preamble)) i = 0.0 for schema in catalog.schemata: grt.begin_progress_step(i, i + 1.0 / len(catalog.schemata)) i += 1.0 / len(catalog.schemata) if schema.commentedOut: grt.send_progress(1.0, "Skipping schema %s... " % schema.name) grt.end_progress_step() continue total = len(schema.tables) + len(schema.views) + len(schema.routines) + sum([len(table.triggers) for table in schema.tables]) grt.send_progress(0.0, "Creating schema %s..." % schema.name) execute_script(connection, schema.temp_sql, makeLogObject(schema)) tcount = 0 vcount = 0 rcount = 0 trcount = 0 o = 0 for table in schema.tables: if table.commentedOut: grt.send_progress(float(o) / total, "Skipping table %s.%s" % (schema.name, table.name)) else: grt.send_progress(float(o) / total, "Creating table %s.%s" % (schema.name, table.name)) o += 1 if not table.commentedOut and execute_script(connection, table.temp_sql, makeLogObject(table)): tcount += 1 for view in schema.views: if view.commentedOut: grt.send_progress(float(o) / total, "Skipping view %s.%s" % (schema.name, view.name)) else: grt.send_progress(float(o) / total, "Creating view %s.%s" % (schema.name, view.name)) o += 1 if not view.commentedOut and execute_script(connection, view.temp_sql, makeLogObject(view)): vcount += 1 for routine in schema.routines: if routine.commentedOut: grt.send_progress(float(o) / total, "Skipping routine %s.%s" % (schema.name, routine.name)) else: grt.send_progress(float(o) / total, "Creating routine %s.%s" % (schema.name, routine.name)) o += 1 if not routine.commentedOut and execute_script(connection, routine.temp_sql, makeLogObject(routine)): rcount += 1 for table in schema.tables: for trigger in table.triggers: if trigger.commentedOut: grt.send_progress(float(o) / total, "Skipping trigger %s.%s.%s" % (schema.name, table.name, trigger.name)) else: grt.send_progress(float(o) / total, "Creating trigger %s.%s.%s" % (schema.name, table.name, trigger.name)) o += 1 if not trigger.commentedOut and execute_script(connection, trigger.temp_sql, makeLogObject(trigger)): trcount += 1 grt.send_info("Scripts for %i tables, %i views and %i routines were executed for schema %s" % (tcount, vcount, rcount, schema.name)) grt.end_progress_step() postamble = catalog.customData["migration:postamble"] grt.send_progress(1.0, "Executing postamble script...") execute_script(connection, postamble.temp_sql, makeLogObject(postamble)) grt.send_progress(1.0, "Schema created") except grt.UserInterrupt: grt.send_info("Cancellation request detected, interrupting schema creation.") raise return 1
def reverseEngineer(connection, catalog_name, schemata_list, context): catalog = grt.classes.db_mysql_Catalog() catalog.name = catalog_name catalog.simpleDatatypes.remove_all() catalog.simpleDatatypes.extend(connection.driver.owner.simpleDatatypes) table_names_per_schema = {} routine_names_per_schema = {} trigger_names_per_schema = {} def filter_warnings(mtype, text, detail): # filter out parser warnings about stub creation/reuse from the message stream, since # they're harmless if mtype == "WARNING" and (" stub " in text or "Stub " in text): grt.send_info(text) return True return False version = getServerVersion(connection) get_tables = context.get("reverseEngineerTables", True) get_triggers = context.get("reverseEngineerTriggers", True) and (version.majorNumber, version.minorNumber, version.releaseNumber) >= (5, 1, 21) get_views = context.get("reverseEngineerViews", True) get_routines = context.get("reverseEngineerRoutines", True) # calculate total workload 1st # 10% of the progress is for preparation grt.send_progress(0, "Preparing...") total = 0 i = 0.0 for schema_name in schemata_list: check_interruption() if get_tables and get_views: table_names = getAllTableNames(connection, catalog_name, schema_name) elif get_tables: table_names = getTableNames(connection, catalog_name, schema_name) elif get_views: table_names = getViewNames(connection, catalog_name, schema_name) else: table_name = [] total += len(table_names) table_names_per_schema[schema_name] = table_names check_interruption() if get_routines: procedure_names = getProcedureNames(connection, catalog_name, schema_name) check_interruption() function_names = getFunctionNames(connection, catalog_name, schema_name) check_interruption() total += len(procedure_names) total += len(function_names) routine_names_per_schema[schema_name] = procedure_names, function_names else: routine_names_per_schema[schema_name] = [], [] if get_triggers: trigger_names = getTriggerNames(connection, catalog_name, schema_name) total += len(trigger_names) else: trigger_names = [] trigger_names_per_schema[schema_name] = trigger_names grt.send_progress(0.1 * (i/len(schemata_list)), "Preparing...") i += 1.0 def wrap_sql(sql, schema): return "USE `%s`;\n%s"%(escape_sql_identifier(schema), sql) def wrap_routine_sql(sql): return "DELIMITER $$\n"+sql i = 0.0 for schema_name in schemata_list: schema = grt.classes.db_mysql_Schema() schema.owner = catalog schema.name = schema_name catalog.schemata.append(schema) context = grt.modules.MySQLParserServices.createParserContext(catalog.characterSets, getServerVersion(connection), getServerMode(connection), 1) options = {} if get_tables or get_views: grt.send_info("Reverse engineering tables from %s" % schema_name) for table_name in table_names_per_schema[schema_name]: check_interruption() grt.send_progress(0.1 + 0.9 * (i / total), "Retrieving table %s.%s..." % (schema_name, table_name)) result = execute_query(connection, "SHOW CREATE TABLE `%s`.`%s`" % (escape_sql_identifier(schema_name), escape_sql_identifier(table_name))) i += 0.5 grt.send_progress(0.1 + 0.9 * (i / total), "Reverse engineering %s.%s..." % (schema_name, table_name)) if result and result.nextRow(): sql = result.stringByIndex(2) grt.push_message_handler(filter_warnings) grt.begin_progress_step(0.1 + 0.9 * (i / total), 0.1 + 0.9 * ((i+0.5) / total)) grt.modules.MySQLParserServices.parseSQLIntoCatalogSql(context, catalog, wrap_sql(sql, schema_name), options) grt.end_progress_step() grt.pop_message_handler() i += 0.5 else: raise Exception("Could not fetch table information for %s.%s" % (schema_name, table_name)) if get_triggers: grt.send_info("Reverse engineering triggers from %s" % schema_name) for trigger_name in trigger_names_per_schema[schema_name]: check_interruption() grt.send_progress(0.1 + 0.9 * (i / total), "Retrieving trigger %s.%s..." % (schema_name, trigger_name)) result = execute_query(connection, "SHOW CREATE TRIGGER `%s`.`%s`" % (escape_sql_identifier(schema_name), escape_sql_identifier(trigger_name))) i += 0.5 grt.send_progress(0.1 + 0.9 * (i / total), "Reverse engineering %s.%s..." % (schema_name, trigger_name)) if result and result.nextRow(): sql = result.stringByName("SQL Original Statement") grt.begin_progress_step(0.1 + 0.9 * (i / total), 0.1 + 0.9 * ((i+0.5) / total)) grt.modules.MySQLParserServices.parseSQLIntoCatalogSql(context, catalog, wrap_sql(wrap_routine_sql(sql), schema_name), options) grt.end_progress_step() i += 0.5 else: raise Exception("Could not fetch trigger information for %s.%s" % (schema_name, trigger_name)) if get_routines: grt.send_info("Reverse engineering stored procedures from %s" % schema_name) procedure_names, function_names = routine_names_per_schema[schema_name] for name in procedure_names: check_interruption() grt.send_progress(0.1 + 0.9 * (i / total), "Retrieving stored procedure %s.%s..." % (schema_name, name)) result = execute_query(connection, "SHOW CREATE PROCEDURE `%s`.`%s`" % (escape_sql_identifier(schema_name), escape_sql_identifier(name))) i += 0.5 grt.send_progress(0.1 + 0.9 * (i / total), "Reverse engineering %s.%s..." % (schema_name, name)) if result and result.nextRow(): sql = result.stringByName("Create Procedure") grt.begin_progress_step(0.1 + 0.9 * (i / total), 0.1 + 0.9 * ((i+0.5) / total)) grt.modules.MySQLParserServices.parseSQLIntoCatalogSql(context, catalog, wrap_sql(wrap_routine_sql(sql), schema_name), options) grt.end_progress_step() i += 0.5 else: raise Exception("Could not fetch procedure information for %s.%s" % (schema_name, name)) grt.send_info("Reverse engineering functions from %s" % schema_name) for name in function_names: check_interruption() grt.send_progress(0.1 + 0.9 * (i / total), "Retrieving function %s.%s..." % (schema_name, name)) result = execute_query(connection, "SHOW CREATE FUNCTION `%s`.`%s`" % (escape_sql_identifier(schema_name), escape_sql_identifier(name))) i += 0.5 grt.send_progress(0.1 + 0.9 * (i / total), "Reverse engineering %s.%s..." % (schema_name, name)) if result and result.nextRow(): sql = result.stringByName("Create Function") grt.begin_progress_step(0.1 + 0.9 * (i / total), 0.1 + 0.9 * ((i+0.5) / total)) grt.modules.MySQLParserServices.parseSQLIntoCatalogSql(context, catalog, wrap_sql(wrap_routine_sql(sql), schema_name), options) grt.end_progress_step() i += 0.5 else: raise Exception("Could not fetch function information for %s.%s" % (schema_name, name)) grt.send_progress(1.0, "Reverse engineered %i objects" % total) # check for any stub tables left empty_schemas = [] for schema in catalog.schemata: schema_has_stub_tables = False for table in reversed(schema.tables): if table.isStub: grt.send_warning("Table %s was referenced from another table, but was not reverse engineered" % table.name) schema.tables.remove(table) schema_has_stub_tables = True if not schema.tables and not schema.views and not schema.routines and schema_has_stub_tables: empty_schemas.append(schema) for schema in empty_schemas: catalog.schemata.remove(schema) return catalog
def reverseEngineer(cls, connection, catalog_name, schemata_list, context): grt.send_progress(0, "Reverse engineering catalog information") cls.check_interruption() catalog = cls.reverseEngineerCatalog(connection, catalog_name) # calculate total workload 1st grt.send_progress(0.1, 'Preparing...') table_count_per_schema = {} view_count_per_schema = {} routine_count_per_schema = {} trigger_count_per_schema = {} total_count_per_schema = {} get_tables = context.get("reverseEngineerTables", True) get_triggers = context.get("reverseEngineerTriggers", True) get_views = context.get("reverseEngineerViews", True) get_routines = context.get("reverseEngineerRoutines", True) # 10% of the progress is for preparation total = 1e-10 # total should not be zero to avoid DivisionByZero exceptions i = 0.0 accumulated_progress = 0.1 for schema_name in schemata_list: cls.check_interruption() table_count_per_schema[schema_name] = len(cls.getTableNames(connection, catalog_name, schema_name)) if get_tables else 0 view_count_per_schema[schema_name] = len(cls.getViewNames(connection, catalog_name, schema_name)) if get_views else 0 cls.check_interruption() routine_count_per_schema[schema_name] = len(cls.getProcedureNames(connection, catalog_name, schema_name)) + len(cls.getFunctionNames(connection, catalog_name, schema_name)) if get_routines else 0 trigger_count_per_schema[schema_name] = len(cls.getTriggerNames(connection, catalog_name, schema_name)) if get_triggers else 0 total_count_per_schema[schema_name] = (table_count_per_schema[schema_name] + view_count_per_schema[schema_name] + routine_count_per_schema[schema_name] + trigger_count_per_schema[schema_name] + 1e-10) total += total_count_per_schema[schema_name] grt.send_progress(accumulated_progress + 0.1 * (i / (len(schemata_list) + 1e-10) ), "Gathered stats for %s" % schema_name) i += 1.0 # Now take 60% in the first pass of reverse engineering: accumulated_progress = 0.2 for schema_name in schemata_list: schema_progress_share = 0.6 * (total_count_per_schema.get(schema_name, 0.0) / total) schema = find_object_with_name(catalog.schemata, schema_name) if schema: # Reverse engineer tables: step_progress_share = schema_progress_share * (table_count_per_schema[schema_name] / (total_count_per_schema[schema_name] + 1e-10)) if get_tables: cls.check_interruption() grt.send_info('Reverse engineering tables from %s' % schema_name) grt.begin_progress_step(accumulated_progress, accumulated_progress + step_progress_share) # Remove previous first pass marks that may exist if the user goes back and attempt rev eng again: progress_flags = cls._connections[connection.__id__].setdefault('_rev_eng_progress_flags', set()) progress_flags.discard('%s_tables_first_pass' % schema_name) cls.reverseEngineerTables(connection, schema) grt.end_progress_step() accumulated_progress += step_progress_share grt.send_progress(accumulated_progress, 'First pass of table reverse engineering for schema %s completed!' % schema_name) # Reverse engineer views: step_progress_share = schema_progress_share * (view_count_per_schema[schema_name] / (total_count_per_schema[schema_name] + 1e-10)) if get_views: cls.check_interruption() grt.send_info('Reverse engineering views from %s' % schema_name) grt.begin_progress_step(accumulated_progress, accumulated_progress + step_progress_share) cls.reverseEngineerViews(connection, schema) grt.end_progress_step() accumulated_progress += step_progress_share grt.send_progress(accumulated_progress, 'Reverse engineering of views for schema %s completed!' % schema_name) # Reverse engineer routines: step_progress_share = schema_progress_share * (routine_count_per_schema[schema_name] / (total_count_per_schema[schema_name] + 1e-10)) if get_routines: cls.check_interruption() grt.send_info('Reverse engineering routines from %s' % schema_name) grt.begin_progress_step(accumulated_progress, accumulated_progress + step_progress_share) grt.begin_progress_step(0.0, 0.5) cls.reverseEngineerProcedures(connection, schema) cls.check_interruption() grt.end_progress_step() grt.begin_progress_step(0.5, 1.0) reverseEngineerFunctions(connection, schema) grt.end_progress_step() grt.end_progress_step() accumulated_progress += step_progress_share grt.send_progress(accumulated_progress, 'Reverse engineering of routines for schema %s completed!' % schema_name) # Reverse engineer triggers: step_progress_share = schema_progress_share * (trigger_count_per_schema[schema_name] / (total_count_per_schema[schema_name] + 1e-10)) if get_triggers: cls.check_interruption() grt.send_info('Reverse engineering triggers from %s' % schema_name) grt.begin_progress_step(accumulated_progress, accumulated_progress + step_progress_share) cls.reverseEngineerTriggers(connection, schema) grt.end_progress_step() accumulated_progress = 0.8 grt.send_progress(accumulated_progress, 'Reverse engineering of triggers for schema %s completed!' % schema_name) else: # No schema with the given name was found grt.send_warning('The schema %s was not found in the catalog %s. Skipping it.' % (schema_name, catalog_name) ) # Now the second pass for reverse engineering tables: if get_tables: total_tables = sum(table_count_per_schema[schema.name] for schema in catalog.schemata if schema.name in schemata_list) for schema in catalog.schemata: if schema.name not in schemata_list: continue cls.check_interruption() step_progress_share = 0.2 * (table_count_per_schema[schema.name] / (total_tables + 1e-10)) grt.send_info('Reverse engineering foreign keys for tables in schema %s' % schema.name) grt.begin_progress_step(accumulated_progress, accumulated_progress + step_progress_share) cls.reverseEngineerTables(connection, schema) grt.end_progress_step() accumulated_progress += step_progress_share grt.send_progress(accumulated_progress, 'Second pass of table reverse engineering for schema %s completed!' % schema_name) grt.send_progress(1.0, 'Reverse engineering completed!') return catalog
def reverseEngineer(connection, catalog_name, schemata_list, context): catalog = grt.classes.db_mysql_Catalog() catalog.name = catalog_name catalog.simpleDatatypes.remove_all() catalog.simpleDatatypes.extend(connection.driver.owner.simpleDatatypes) table_names_per_schema = {} routine_names_per_schema = {} trigger_names_per_schema = {} def filter_warnings(mtype, text, detail): # filter out parser warnings about stub creation/reuse from the message stream, since # they're harmless if mtype == "WARNING" and (" stub " in text or "Stub " in text): grt.send_info(text) return True return False version = getServerVersion(connection) get_tables = context.get("reverseEngineerTables", True) get_triggers = context.get("reverseEngineerTriggers", True) and (version.majorNumber, version.minorNumber, version.releaseNumber) >= (5, 1, 21) get_views = context.get("reverseEngineerViews", True) get_routines = context.get("reverseEngineerRoutines", True) # calculate total workload 1st # 10% of the progress is for preparation grt.send_progress(0, "Preparing...") total = 0 i = 0.0 for schema_name in schemata_list: check_interruption() if get_tables and get_views: table_names = getAllTableNames(connection, catalog_name, schema_name) elif get_tables: table_names = getTableNames(connection, catalog_name, schema_name) elif get_views: table_names = getViewNames(connection, catalog_name, schema_name) else: table_name = [] total += len(table_names) table_names_per_schema[schema_name] = table_names check_interruption() if get_routines: procedure_names = getProcedureNames(connection, catalog_name, schema_name) check_interruption() function_names = getFunctionNames(connection, catalog_name, schema_name) check_interruption() total += len(procedure_names) total += len(function_names) routine_names_per_schema[schema_name] = procedure_names, function_names else: routine_names_per_schema[schema_name] = [], [] if get_triggers: trigger_names = getTriggerNames(connection, catalog_name, schema_name) total += len(trigger_names) else: trigger_names = [] trigger_names_per_schema[schema_name] = trigger_names grt.send_progress(0.1 * (i/len(schemata_list)), "Preparing...") i += 1.0 def wrap_sql(sql, schema): return "USE `%s`;\n%s"%(escape_sql_identifier(schema), sql) def wrap_routine_sql(sql): return "DELIMITER $$\n"+sql i = 0.0 for schema_name in schemata_list: schema = grt.classes.db_mysql_Schema() schema.owner = catalog schema.name = schema_name catalog.schemata.append(schema) if get_tables or get_views: grt.send_info("Reverse engineering tables from %s" % schema_name) for table_name in table_names_per_schema[schema_name]: check_interruption() grt.send_progress(0.1 + 0.9 * (i / total), "Retrieving table %s.%s..." % (schema_name, table_name)) result = execute_query(connection, "SHOW CREATE TABLE `%s`.`%s`" % (escape_sql_identifier(schema_name), escape_sql_identifier(table_name))) i += 0.5 grt.send_progress(0.1 + 0.9 * (i / total), "Reverse engineering %s.%s..." % (schema_name, table_name)) if result and result.nextRow(): sql = result.stringByIndex(2) grt.push_message_handler(filter_warnings) grt.begin_progress_step(0.1 + 0.9 * (i / total), 0.1 + 0.9 * ((i+0.5) / total)) grt.modules.MysqlSqlFacade.parseSqlScriptString(catalog, wrap_sql(sql, schema_name)) grt.end_progress_step() grt.pop_message_handler() i += 0.5 else: raise Exception("Could not fetch table information for %s.%s" % (schema_name, table_name)) if get_triggers: grt.send_info("Reverse engineering triggers from %s" % schema_name) for trigger_name in trigger_names_per_schema[schema_name]: check_interruption() grt.send_progress(0.1 + 0.9 * (i / total), "Retrieving trigger %s.%s..." % (schema_name, trigger_name)) result = execute_query(connection, "SHOW CREATE TRIGGER `%s`.`%s`" % (escape_sql_identifier(schema_name), escape_sql_identifier(trigger_name))) i += 0.5 grt.send_progress(0.1 + 0.9 * (i / total), "Reverse engineering %s.%s..." % (schema_name, trigger_name)) if result and result.nextRow(): sql = result.stringByName("SQL Original Statement") grt.begin_progress_step(0.1 + 0.9 * (i / total), 0.1 + 0.9 * ((i+0.5) / total)) grt.modules.MysqlSqlFacade.parseSqlScriptString(catalog, wrap_sql(wrap_routine_sql(sql), schema_name)) grt.end_progress_step() i += 0.5 else: raise Exception("Could not fetch trigger information for %s.%s" % (schema_name, trigger_name)) if get_routines: grt.send_info("Reverse engineering stored procedures from %s" % schema_name) procedure_names, function_names = routine_names_per_schema[schema_name] for name in procedure_names: check_interruption() grt.send_progress(0.1 + 0.9 * (i / total), "Retrieving stored procedure %s.%s..." % (schema_name, name)) result = execute_query(connection, "SHOW CREATE PROCEDURE `%s`.`%s`" % (escape_sql_identifier(schema_name), escape_sql_identifier(name))) i += 0.5 grt.send_progress(0.1 + 0.9 * (i / total), "Reverse engineering %s.%s..." % (schema_name, name)) if result and result.nextRow(): sql = result.stringByName("Create Procedure") grt.begin_progress_step(0.1 + 0.9 * (i / total), 0.1 + 0.9 * ((i+0.5) / total)) grt.modules.MysqlSqlFacade.parseSqlScriptString(catalog, wrap_sql(wrap_routine_sql(sql), schema_name)) grt.end_progress_step() i += 0.5 else: raise Exception("Could not fetch procedure information for %s.%s" % (schema_name, name)) grt.send_info("Reverse engineering functions from %s" % schema_name) for name in function_names: check_interruption() grt.send_progress(0.1 + 0.9 * (i / total), "Retrieving function %s.%s..." % (schema_name, name)) result = execute_query(connection, "SHOW CREATE FUNCTION `%s`.`%s`" % (escape_sql_identifier(schema_name), escape_sql_identifier(name))) i += 0.5 grt.send_progress(0.1 + 0.9 * (i / total), "Reverse engineering %s.%s..." % (schema_name, name)) if result and result.nextRow(): sql = result.stringByName("Create Function") grt.begin_progress_step(0.1 + 0.9 * (i / total), 0.1 + 0.9 * ((i+0.5) / total)) grt.modules.MysqlSqlFacade.parseSqlScriptString(catalog, wrap_sql(wrap_routine_sql(sql), schema_name)) grt.end_progress_step() i += 0.5 else: raise Exception("Could not fetch function information for %s.%s" % (schema_name, name)) grt.send_progress(1.0, "Reverse engineered %i objects" % total) # check for any stub tables left empty_schemas = [] for schema in catalog.schemata: schema_has_stub_tables = False for table in reversed(schema.tables): if table.isStub: grt.send_warning("Table %s was referenced from another table, but was not reverse engineered" % table.name) schema.tables.remove(table) schema_has_stub_tables = True if not schema.tables and not schema.views and not schema.routines and schema_has_stub_tables: empty_schemas.append(schema) for schema in empty_schemas: catalog.schemata.remove(schema) return catalog
def reverseEngineer(connection, catalog_name, schemata_list, options): """Reverse engineers a Sybase ASE database. This is the function that will be called by the Migration Wizard to reverse engineer a Sybase database. All the other reverseEngineer* functions are not actually required and should not be considered part of this module API even though they are currently being exposed. This function calls the other reverseEngineer* functions to complete the full reverse engineer process. """ grt.send_progress(0, "Reverse engineering catalog information") catalog = grt.classes.db_sybase_Catalog() catalog.name = catalog_name catalog.simpleDatatypes.remove_all() catalog.simpleDatatypes.extend(connection.driver.owner.simpleDatatypes) catalog.defaultCollationName = '' # FIXME: Find out the right collation for the catalog grt.send_progress(0.05, "Reverse engineering User Data Types...") check_interruption() # reverseEngineerUserDatatypes(connection, catalog) # calculate total workload 1st grt.send_progress(0.1, 'Preparing...') table_count_per_schema = {} view_count_per_schema = {} routine_count_per_schema = {} trigger_count_per_schema = {} total_count_per_schema = {} get_tables = options.get("reverseEngineerTables", True) get_triggers = options.get("reverseEngineerTriggers", True) get_views = options.get("reverseEngineerViews", True) get_routines = options.get("reverseEngineerRoutines", True) # 10% of the progress is for preparation total = 1e-10 # total should not be zero to avoid DivisionByZero exceptions i = 1.0 accumulated_progress = 0.1 for schema_name in schemata_list: check_interruption() table_count_per_schema[schema_name] = len( getTableNames(connection, catalog_name, schema_name)) if get_tables else 0 view_count_per_schema[schema_name] = len( getViewNames(connection, catalog_name, schema_name)) if get_views else 0 check_interruption() routine_count_per_schema[schema_name] = len( getProcedureNames(connection, catalog_name, schema_name)) + len( getFunctionNames(connection, catalog_name, schema_name)) if get_routines else 0 trigger_count_per_schema[schema_name] = len( getTriggerNames(connection, catalog_name, schema_name)) if get_triggers else 0 total_count_per_schema[schema_name] = ( table_count_per_schema[schema_name] + view_count_per_schema[schema_name] + routine_count_per_schema[schema_name] + trigger_count_per_schema[schema_name] + 1e-10) total += total_count_per_schema[schema_name] grt.send_progress( accumulated_progress + 0.1 * (i / (len(schemata_list) + 1e-10)), "Gathered stats for %s" % schema_name) i += 1.0 # Now take 60% in the first pass of reverse engineering: accumulated_progress = 0.2 grt.reset_progress_steps() grt.begin_progress_step(accumulated_progress, accumulated_progress + 0.6) accumulated_schema_progress = 0.0 for schema_name in schemata_list: schema_progress_share = total_count_per_schema.get(schema_name, 0.0) / total grt.begin_progress_step( accumulated_schema_progress, accumulated_schema_progress + schema_progress_share) this_schema_progress = 0.0 schema = grt.classes.db_sybase_Schema() schema.owner = catalog schema.name = schema_name schema.defaultCollationName = catalog.defaultCollationName catalog.schemata.append(schema) # Reverse engineer tables: step_progress_share = table_count_per_schema[schema_name] / ( total_count_per_schema[schema_name] + 1e-10) if get_tables: check_interruption() grt.send_info('Reverse engineering %i tables from %s' % (table_count_per_schema[schema_name], schema_name)) grt.begin_progress_step(this_schema_progress, this_schema_progress + step_progress_share) # Remove previous first pass marks that may exist if the user goes back and attempt rev eng again: progress_flags = _connections[connection.__id__].setdefault( '_rev_eng_progress_flags', set()) progress_flags.discard('%s_tables_first_pass' % schema_name) reverseEngineerTables(connection, schema) grt.end_progress_step() this_schema_progress += step_progress_share grt.send_progress( this_schema_progress, 'First pass of table reverse engineering for schema %s completed!' % schema_name) # Reverse engineer views: step_progress_share = view_count_per_schema[schema_name] / ( total_count_per_schema[schema_name] + 1e-10) if get_views: check_interruption() grt.send_info('Reverse engineering %i views from %s' % (view_count_per_schema[schema_name], schema_name)) grt.begin_progress_step(this_schema_progress, this_schema_progress + step_progress_share) reverseEngineerViews(connection, schema) grt.end_progress_step() this_schema_progress += step_progress_share grt.send_progress( this_schema_progress, 'Reverse engineering of views for schema %s completed!' % schema_name) # Reverse engineer routines: step_progress_share = routine_count_per_schema[schema_name] / ( total_count_per_schema[schema_name] + 1e-10) if get_routines: check_interruption() grt.send_info('Reverse engineering %i routines from %s' % (routine_count_per_schema[schema_name], schema_name)) grt.begin_progress_step( this_schema_progress, this_schema_progress + step_progress_share / 2) schema.routines.remove_all() reverseEngineerProcedures(connection, schema) grt.end_progress_step() check_interruption() grt.begin_progress_step( this_schema_progress + step_progress_share / 2, this_schema_progress + step_progress_share) reverseEngineerFunctions(connection, schema) grt.end_progress_step() this_schema_progress += step_progress_share grt.send_progress( this_schema_progress, 'Reverse engineering of routines for schema %s completed!' % schema_name) # Reverse engineer triggers: step_progress_share = trigger_count_per_schema[schema_name] / ( total_count_per_schema[schema_name] + 1e-10) if get_triggers: check_interruption() grt.send_info('Reverse engineering %i triggers from %s' % (trigger_count_per_schema[schema_name], schema_name)) grt.begin_progress_step(this_schema_progress, this_schema_progress + step_progress_share) reverseEngineerTriggers(connection, schema) grt.end_progress_step() this_schema_progress += step_progress_share grt.send_progress( this_schema_progress, 'Reverse engineering of triggers for schema %s completed!' % schema_name) accumulated_schema_progress += schema_progress_share grt.end_progress_step() grt.end_progress_step() # Now the second pass for reverse engineering tables: accumulated_progress = 0.8 if get_tables: total_tables = sum(table_count_per_schema[schema.name] for schema in catalog.schemata) for schema in catalog.schemata: check_interruption() step_progress_share = 0.2 * (table_count_per_schema[schema.name] / (total_tables + 1e-10)) grt.send_info( 'Reverse engineering foreign keys for tables in schema %s' % schema.name) grt.begin_progress_step(accumulated_progress, accumulated_progress + step_progress_share) reverseEngineerTables(connection, schema) grt.end_progress_step() accumulated_progress += step_progress_share grt.send_progress( accumulated_progress, 'Second pass of table reverse engineering for schema %s completed!' % schema_name) grt.send_progress(1.0, 'Reverse engineering completed!') return catalog