def process_fk(catalog, table, fk_name, fk_rows): foreign_key = grt.classes.db_ForeignKey() foreign_key.name = fk_name foreign_key.owner = table foreign_key.deleteRule = get_action(fk_rows[0].delete_rule) foreign_key.updateRule = get_action(fk_rows[0].update_rule) foreign_key.modelOnly = 0 # Find the referenced table: referenced_schema = find_object_with_name(catalog.schemata, fk_rows[0].pktable_schem) if fk_rows[0].pktable_schem else schema foreign_key.referencedTable = find_object_with_name(referenced_schema.tables, fk_rows[0].pktable_name) if fk_rows[0].pktable_name else table for fk_row in fk_rows: column = find_object_with_name(table.columns, fk_row.fkcolumn_name) if not column: grt.send_error('Migration: reverseEngineerTableFKs: Column "%s" not found in table "%s"' % (fk_row.fkcolumn_name, table.name) ) continue ref_column = find_object_with_name(foreign_key.referencedTable.columns, fk_row.pkcolumn_name) if not ref_column: grt.send_error('Migration: reverseEngineerTableFKs: Column "%s" not found in table "%s"' % (fk_row.pkcolumn_name, foreign_key.referencedTable.name) ) continue foreign_key.columns.append(column) foreign_key.referencedColumns.append(ref_column) table.foreignKeys.append(foreign_key)
def connect(cls, connection, password): '''Establishes a connection to the server and stores the connection object in the connections pool. It first looks for a connection with the given connection parameters in the connections pool to reuse existent connections. If such connection is found it queries the server to ensure that the connection is alive and reestablishes it if is dead. If no suitable connection is found in the connections pool, a new one is created and stored in the pool. Parameters: =========== connection: an object of the class db_mgmt_Connection storing the parameters for the connection. password: a string with the password to use for the connection (ignored for SQLite). ''' con = None try: con = cls.get_connection(connection) try: if not con.cursor().execute('SELECT 1'): raise Exception('connection error') except Exception, exc: grt.send_info('Connection to %s apparently lost, reconnecting...' % connection.hostIdentifier) raise NotConnectedError('Connection error') except NotConnectedError, exc: grt.send_info('Connecting to %s...' % connection.hostIdentifier) con = sqlite3.connect(connection.parameterValues['dbfile']) if not con: grt.send_error('Connection failed', str(exc)) raise connection.parameterValues['wbcopytables_connection_string'] = "'" + connection.parameterValues['dbfile'] + "'" grt.send_info('Connected') cls._connections[connection.__id__] = {'connection': con}
def connect(cls, connection, password): '''Establishes a connection to the server and stores the connection object in the connections pool. It first looks for a connection with the given connection parameters in the connections pool to reuse existent connections. If such connection is found it queries the server to ensure that the connection is alive and reestablishes it if is dead. If no suitable connection is found in the connections pool, a new one is created and stored in the pool. Parameters: =========== connection: an object of the class db_mgmt_Connection storing the parameters for the connection. password: a string with the password to use for the connection. ''' try: con = cls.get_connection(connection) try: if not con.cursor().execute('SELECT 1'): raise Exception("connection error") except Exception, exc: grt.send_info("Connection to %s apparently lost, reconnecting..." % connection.hostIdentifier) raise NotConnectedError("Connection error") except NotConnectedError, exc: grt.send_info("Connecting to %s..." % connection.hostIdentifier) con = db_driver.connect(connection, password) if not con: grt.send_error('Connection failed', str(exc)) raise grt.send_info("Connected") cls._connections[connection.__id__] = {"connection": con}
def create_options(self, box, options): optlist = [] for option in options: cont = None if option.paramType == "boolean": opt = mforms.newCheckBox() opt.set_active(self.defaultValue == "1") box.add(opt, False, True) getter = opt.get_string_value elif option.paramType == "string": hbox = mforms.newBox(True) hbox.set_spacing(8) hbox.add(mforms.newLabel(option.caption), False, True) opt = mforms.newTextEntry() opt.set_value(option.defaultValue) hbox.add(opt, True, True) l = mforms.newLabel(option.description) l.set_style(mforms.SmallHelpTextStyle) hbox.add(l, False, True) box.add(hbox, False, True) cont = hbox getter = opt.get_string_value else: grt.send_error("MigrationWizard", "migrationOption() for source has an invalid parameter of type %s (%s)" % (option.paramType, option.name)) continue optlist.append((cont or opt, option.name, getter)) return optlist
def process_fk(catalog, table, fk_name, fk_rows): foreign_key = grt.classes.db_ForeignKey() if fk_name in cls._connections[connection.__id__]['fk_names']: while True: suffix = '_%06d' % random.randint(0, 999999) if fk_name + suffix not in cls._connections[connection.__id__]['fk_names']: break fk_name += suffix foreign_key.name = fk_name foreign_key.owner = table foreign_key.deleteRule = get_action(fk_rows[0].delete_rule) foreign_key.updateRule = get_action(fk_rows[0].update_rule) foreign_key.modelOnly = 0 # Find the referenced table: referenced_schema = find_object_with_name(catalog.schemata, fk_rows[0].pktable_schem) if fk_rows[0].pktable_schem else schema foreign_key.referencedTable = find_object_with_name(referenced_schema.tables, fk_rows[0].pktable_name) if fk_rows[0].pktable_name else table for fk_row in fk_rows: column = find_object_with_name(table.columns, fk_row.fkcolumn_name) if not column: grt.send_error('Migration: reverseEngineerTableFKs: Column "%s" not found in table "%s"' % (fk_row.fkcolumn_name, table.name) ) continue ref_column = find_object_with_name(foreign_key.referencedTable.columns, fk_row.pkcolumn_name) if not ref_column: grt.send_error('Migration: reverseEngineerTableFKs: Column "%s" not found in table "%s"' % (fk_row.pkcolumn_name, foreign_key.referencedTable.name) ) continue foreign_key.columns.append(column) foreign_key.referencedColumns.append(ref_column) cls._connections[connection.__id__]['fk_names'][foreign_key.name] = table table.foreignKeys.append(foreign_key)
def connect(cls, connection, password): '''Establishes a connection to the server and stores the connection object in the connections pool. It first looks for a connection with the given connection parameters in the connections pool to reuse existent connections. If such a connection is found, it queries the server to ensure that the connection is alive and reestablishes it if is dead. If no suitable connection is found in the connections pool, a new one is created and stored in the pool. Parameters: =========== connection: an object of the class db_mgmt_Connection storing the parameters for the connection. password: a string with the password to use for the connection. ''' try: con = cls.get_connection(connection) try: if not con.cursor().execute('SELECT 1'): raise Exception("connection error") except Exception as exc: grt.send_info("Connection to %s apparently lost, reconnecting..." % connection.hostIdentifier) raise NotConnectedError("Connection error") except NotConnectedError as exc: grt.send_info("Connecting to %s..." % connection.hostIdentifier) con = db_driver.connect(connection, password) if not con: grt.send_error('Connection failed', str(exc)) raise grt.send_info("Connected") cls._connections[connection.__id__] = {"connection": con} return 1
def reverseEngineerTablePK(connection, table): """Reverse engineers the primary key for the given table.""" schema = table.owner catalog = schema.owner execute_query(connection, 'USE %s' % catalog.name) query = """SELECT sc.name FROM sysobjects so JOIN syskeys sk ON so.id=sk.id JOIN syscolumns sc ON sc.id=sk.id AND sc.colid IN (sk.key1, sk.key2, sk.key3, sk.key4, sk.key5, sk.key6, sk.key7, sk.key8) WHERE so.uid=USER_ID(?) AND sk.id=OBJECT_ID(?) AND sk.type=1""" if len(table.columns) == 0: grt.send_error( 'Sybase reverseEngineerTablePK', "Reverse engineer of table's %s.%s primary key was attempted but the table has no columns attribute" % (schema.name, table.name)) return 1 # Table must have columns reverse engineered before we can rev eng its primary key pk_col_names = [ row[0] for row in execute_query(connection, query, schema.name, table.name) ] for pk_column in pk_col_names: column = find_object_with_name(table.columns, pk_column) if column: table.addPrimaryKeyColumn(column) return 0
def connect(connection, password): '''Establishes a connection to the server and stores the connection object in the connections pool. It first looks for a connection with the given connection parameters in the connections pool to reuse existent connections. If such connection is found it queries the server to ensure that the connection is alive and reestablishes it if is dead. If no suitable connection is found in the connections pool, a new one is created and stored in the pool. Parameters: =========== connection: an object of the class db_mgmt_Connection storing the parameters for the connection. password: a string with the password to use for the connection. ''' con = None host_identifier = connection.hostIdentifier try: con = get_connection(connection) try: if not con.cursor().execute('SELECT 1'): raise Exception("connection error") except Exception as exc: grt.send_info("Connection to %s apparently lost, reconnecting..." % connection.hostIdentifier) raise NotConnectedError("Connection error") except NotConnectedError as exc: grt.send_info("Connecting to %s..." % host_identifier) import pyodbc try: con = db_driver.connect(connection, password) # Sybase metadata query SPs use things that don't work inside transactions, so enable autocommit con.autocommit = True # Adds data type conversion functions for pyodbc # if connection.driver.driverLibraryName == 'pyodbc': # cursor = con.cursor() # version = con.execute("SELECT CAST(SERVERPROPERTY('ProductVersion') AS VARCHAR)").fetchone()[0] # majorVersion = int(version.split('.', 1)[0]) # if majorVersion >= 9: # con.add_output_converter(-150, lambda value: value if value is None else value.decode('utf-16')) # con.add_output_converter(0, lambda value: value if value is None else value.decode('utf-16')) # else: # con.add_output_converter(-150, lambda value: value if value is None else str(value)) # con.add_output_converter(0, lambda value: value if value is None else str(value)) except pyodbc.Error as odbc_err: # 28000 is from native SQL Server driver... 42000 seems to be from FreeTDS # FIXME: This should be tuned for Sybase if len(odbc_err.args) == 2 and odbc_err.args[0] in ('28000', '42000') and "(18456)" in odbc_err.args[1]: raise grt.DBLoginError(odbc_err.args[1]) if not con: grt.send_error('Connection failed', str(exc)) raise _connections[connection.__id__] = {"connection" : con } _connections[connection.__id__]["version"] = getServerVersion(connection) version = execute_query(connection, "SELECT @@version").fetchone()[0] grt.send_info("Connected to %s, %s", (host_identifier, version)) return 1
def reverseEngineerTablePK(cls, connection, table): """Reverse engineers the primary key(s) for the given table.""" schema = table.owner catalog = schema.owner if len(table.columns) == 0: # Table must have columns reverse engineered before we can rev eng its primary key(s) grt.send_error('Migration: reverseEngineerTablePK: Reverse engineering of table %s was attempted but the table has no columns attribute' % table.name) return 1 # Primary keys and indices come together in the SYSIDX system view, so we'll rev eng them at once: query = """SELECT st.table_id, si.index_id, si.index_name, si.index_category, si."unique" FROM SYSIDX si JOIN SYSTAB st ON si.table_id=st.table_id JOIN SYSUSER su ON st.creator=su.user_id WHERE st.table_name='%s' AND su.user_name='%s' ORDER BY si.index_id""" % (table.name, schema.name) idx_cursor = cls.get_connection(connection).cursor() for table_id, index_id, index_name, index_category, index_unique in idx_cursor.execute(query): index = grt.classes.db_Index() index.name = index_name index.isPrimary = 1 if index_category == 1 else 0 index.unique = 1 if index_unique in (1, 2) else 0 if index_category == 1: index.indexType = 'PRIMARY' elif index_category == 2: continue # This is a foreign key, will be handled when reverse engineering them elif index_category == 3: # Can be a regular index or a unique constraint if index_unique == 2: index.indexType = 'UNIQUE' else: index.indexType = 'INDEX' else: index.indexType = 'FULLTEXT' # index.hasFilter = False # TODO: Find out if there's a way to determine this # Get the columns for the index: idx_cols_query = """SELECT sc.column_name, sic."order" FROM SYSIDXCOL sic JOIN SYSTAB st ON sic.table_id=st.table_id JOIN SYSTABCOL sc ON (sc.column_id = sic.column_id AND sc.table_id = sic.table_id) WHERE st.table_id=%s AND sic.index_id=%s ORDER BY sic.sequence""" % (table_id, index_id) idx_cols_cursor = cls.get_connection(connection).cursor() for column_name, order in idx_cols_cursor.execute(idx_cols_query): column = find_object_with_name(table.columns, column_name) if column: index_column = grt.classes.db_IndexColumn() index_column.name = index_name + '.' + column_name index_column.referencedColumn = column index_column.descend = 1 if order and order.upper() == 'D' else 0 index.columns.append(index_column) table.addIndex(index) if index.isPrimary: table.primaryKey = index return 0
def reverseEngineerTablePK(cls, connection, table): """Reverse engineers the primary key(s) for the given table.""" schema = table.owner if len(table.columns) == 0: # Table must have columns reverse engineered before we can rev eng its primary key(s) grt.send_error('Migration: reverseEngineerTablePK: Reverse engineering of table %s was attempted but the table has no columns attribute' % table.name) return 1 # Primary keys and indices come together in the SYSIDX system view, so we'll rev eng them at once: query = """SELECT st.table_id, si.index_id, si.index_name, si.index_category, si."unique" FROM SYSIDX si JOIN SYSTAB st ON si.table_id=st.table_id JOIN SYSUSER su ON st.creator=su.user_id WHERE st.table_name='%s' AND su.user_name='%s' ORDER BY si.index_id""" % (table.name, schema.name) idx_cursor = cls.get_connection(connection).cursor() for table_id, index_id, index_name, index_category, index_unique in idx_cursor.execute(query): index = grt.classes.db_Index() index.name = index_name index.isPrimary = 1 if index_category == 1 else 0 index.unique = 1 if index_unique in (1, 2) else 0 if index_category == 1: index.indexType = 'PRIMARY' elif index_category == 2: continue # This is a foreign key, will be handled when reverse engineering them elif index_category == 3: # Can be a regular index or a unique constraint if index_unique == 2: index.indexType = 'UNIQUE' else: index.indexType = 'INDEX' else: index.indexType = 'FULLTEXT' # index.hasFilter = False # TODO: Find out if there's a way to determine this # Get the columns for the index: idx_cols_query = """SELECT sc.column_name, sic."order" FROM SYSIDXCOL sic JOIN SYSTAB st ON sic.table_id=st.table_id JOIN SYSTABCOL sc ON (sc.column_id = sic.column_id AND sc.table_id = sic.table_id) WHERE st.table_id=%s AND sic.index_id=%s ORDER BY sic.sequence""" % (table_id, index_id) idx_cols_cursor = cls.get_connection(connection).cursor() for column_name, order in idx_cols_cursor.execute(idx_cols_query): column = find_object_with_name(table.columns, column_name) if column: index_column = grt.classes.db_IndexColumn() index_column.name = index_name + '.' + column_name index_column.referencedColumn = column index_column.descend = 1 if order and order.upper() == 'D' else 0 index.columns.append(index_column) table.addIndex(index) if index.isPrimary: table.primaryKey = index return 0
def reverseEngineerTablePK(cls, connection, table): """Reverse engineers the primary key(s) for the given table.""" schema = table.owner catalog = schema.owner query = """SELECT tc.CONSTRAINT_NAME, kcu.COLUMN_NAME FROM INFORMATION_SCHEMA.TABLE_CONSTRAINTS AS tc JOIN INFORMATION_SCHEMA.KEY_COLUMN_USAGE AS kcu ON kcu.CONSTRAINT_SCHEMA = tc.CONSTRAINT_SCHEMA AND kcu.CONSTRAINT_NAME = tc.CONSTRAINT_NAME AND kcu.TABLE_SCHEMA = tc.TABLE_SCHEMA AND kcu.TABLE_NAME = tc.TABLE_NAME WHERE tc.CONSTRAINT_TYPE='PRIMARY KEY' AND tc.TABLE_CATALOG = '%s' AND tc.TABLE_SCHEMA = '%s' AND tc.TABLE_NAME = '%s' ORDER BY tc.CONSTRAINT_NAME, kcu.ORDINAL_POSITION""" % ( catalog.name, schema.name, table.name, ) if ( len(table.columns) == 0 ): # Table must have columns reverse engineered before we can rev eng its primary key(s) grt.send_error( "%s reverseEngineerTablePK: Reverse engineer of table %s was attempted but the table has " "no columns attribute" % (cls.getTargetDBMSName(), table.name) ) return 1 fk_rows = cls.execute_query(connection, query).fetchall() if fk_rows: index = grt.classes.db_Index() index.name = fk_rows[0][0] index.isPrimary = 1 index.unique = 1 index.indexType = "PRIMARY" for _, pk_col in fk_rows: table_column = find_object_with_name(table.columns, pk_col) if not table_column: grt.send_warning( '%s reverseEngineerTablePK: Could not find column "%s" in table "%s" referenced ' 'by primary key constraint "%s". The primary key will not be added.' % (cls.getTargetDBMSName(), pk_col, table.name, index.name) ) return 0 index_column = grt.classes.db_IndexColumn() index_column.name = index.name + "." + pk_col index_column.referencedColumn = table_column index.columns.append(index_column) table.primaryKey = index table.addIndex(index) return 0
def reverseEngineerTableIndices(cls, connection, table): schema = table.owner if len(table.columns) == 0: grt.send_error('%s: reverseEngineerTableIndices', 'Reverse engineer of table %s.%s was attempted but the table has no columns attribute' % (cls.getTargetDBMSName(), schema.name, table.name) ) return 1 # Table must have columns reverse engineered before we can rev eng its indices all_indices_query = """SELECT c2.relname, i.indisunique::int, i.indisclustered::int, i.indnatts, i.indkey FROM pg_catalog.pg_class c, pg_catalog.pg_class c2, pg_catalog.pg_namespace n, pg_catalog.pg_index i WHERE c.oid = i.indrelid AND i.indexrelid = c2.oid AND c.relnamespace = n.oid AND n.nspname = '%s' AND c.relname = '%s' AND i.indisprimary = False ORDER BY c2.relname""" % (schema.name, table.name) index_columns_query = """SELECT a.attname FROM unnest(ARRAY%r) attrid JOIN pg_catalog.pg_attribute a ON attrid=a.attnum JOIN pg_catalog.pg_class c ON c.oid = a.attrelid JOIN pg_catalog.pg_namespace n ON c.relnamespace = n.oid WHERE n.nspname = '%s' AND c.relname = '%s'""" index_rows = cls.execute_query(connection, all_indices_query).fetchall() for index_name, is_unique, is_clustered, column_count, column_refs in index_rows: index = grt.classes.db_Index() index.name = index_name index.isPrimary = 0 index.unique = is_unique index.indexType = ('UNIQUE' if is_unique else 'INDEX') #index.clustered = is_clustered # Get the columns for the index: cols = [ int(col) for col in column_refs.split() ] if column_count != len(cols): grt.send_warning('%s: reverseEngineerTableIndices' % cls.getTargetDBMSName(), 'Reverse engineer of index %s.%s was attempted but the referenced columns count differs ' 'from the number of its referenced columns. Skipping index!' % (schema.name, index_name) ) continue for (column_name, ) in cls.execute_query(connection, index_columns_query % (cols, schema.name, table.name)): column = find_object_with_name(table.columns, column_name) if column: index_column = grt.classes.db_IndexColumn() index_column.name = index_name + '.' + column_name #index_column.descend = is_descending_key index_column.referencedColumn = column index.columns.append(index_column) else: grt.send_warning('%s: reverseEngineerTableIndices' % cls.getTargetDBMSName(), 'Reverse engineer of index %s.%s was attempted but the referenced column %s ' 'could not be found on table %s. Skipping index!' % (schema.name, index_name, column_name, table.name) ) continue table.addIndex(index) return 0
def reverseEngineerTablePK(cls, connection, table): """Reverse engineers the primary key for the given table.""" schema = table.owner catalog = schema.owner if len(table.columns) == 0: # Table must have columns reverse engineered before we can rev eng its primary key grt.send_error('Migration: reverseEngineerTablePKAndIndices: Reverse engineer of table %s was attempted but the table has no columns attribute' % table.name) return 1 # Find the index name associated with the PK: pk_index_rows = cls.get_connection(connection).cursor().primaryKeys(catalog=catalog.name, schema=schema.name, table=table.name).fetchall() pk_index_name = pk_index_rows[0].pk_name if pk_index_rows else '' indices_dict = {} # Map the indices names to their respective columns: for row in cls.get_connection(connection).cursor().statistics(catalog=catalog.name, schema=schema.name, table=table.name): if row.type == constant.SQL_TABLE_STAT: # this entry is not an index continue indices_dict.setdefault(row.index_name, []).append(row) if pk_index_name not in indices_dict: # The primary key is not listed in cursor().statistics from collections import namedtuple Row = namedtuple('IndexRow', ['table_cat', 'table_schem', 'table_name', 'non_unique', 'index_qualifier', 'index_name', 'type', 'ordinal_position', 'column_name', 'asc_or_desc', 'cardinality', 'pages', 'filter_condition']) for pk_index_row in pk_index_rows: row = Row(None, schema.name, table.name, 0, None, pk_index_name, 1, 1, pk_index_row.column_name, 'A', None, None, None) indices_dict.setdefault(pk_index_name, []).append(row) for index_name, row_list in list(indices_dict.items()): index = grt.classes.db_Index() index.name = index_name index.isPrimary = 1 if index_name == pk_index_name else 0 index.unique = not row_list[0].non_unique index.indexType = 'UNIQUE' if index.unique else 'INDEX' # index.hasFilter = False # TODO: Find out if there's a way to determine this # Get the columns for the index: for row in sorted(row_list, key=lambda elem: elem[7]): # Sorted by ordinal_position column = find_object_with_name(table.columns, row.column_name) if column: index_column = grt.classes.db_IndexColumn() index_column.name = index_name + '.' + row.column_name index_column.referencedColumn = column index.columns.append(index_column) table.addIndex(index) if index.isPrimary: table.primaryKey = index return 0
def reverseEngineerTablePK(cls, connection, table): """Reverse engineers the primary key(s) for the given table.""" schema = table.owner catalog = schema.owner query = """SELECT tc.CONSTRAINT_NAME, kcu.COLUMN_NAME FROM INFORMATION_SCHEMA.TABLE_CONSTRAINTS AS tc JOIN INFORMATION_SCHEMA.KEY_COLUMN_USAGE AS kcu ON kcu.CONSTRAINT_SCHEMA = tc.CONSTRAINT_SCHEMA AND kcu.CONSTRAINT_NAME = tc.CONSTRAINT_NAME AND kcu.TABLE_SCHEMA = tc.TABLE_SCHEMA AND kcu.TABLE_NAME = tc.TABLE_NAME WHERE tc.CONSTRAINT_TYPE='PRIMARY KEY' AND tc.TABLE_CATALOG = '%s' AND tc.TABLE_SCHEMA = '%s' AND tc.TABLE_NAME = '%s' ORDER BY tc.CONSTRAINT_NAME, kcu.ORDINAL_POSITION""" % ( catalog.name, schema.name, table.name) if len( table.columns ) == 0: # Table must have columns reverse engineered before we can rev eng its primary key(s) grt.send_error( '%s reverseEngineerTablePK: Reverse engineer of table %s was attempted but the table has ' 'no columns attribute' % (cls.getTargetDBMSName(), table.name)) return 1 fk_rows = cls.execute_query(connection, query).fetchall() if fk_rows: index = grt.classes.db_Index() index.name = fk_rows[0][0] index.isPrimary = 1 index.unique = 1 index.indexType = 'PRIMARY' for _, pk_col in fk_rows: table_column = find_object_with_name(table.columns, pk_col) if not table_column: grt.send_warning( '%s reverseEngineerTablePK: Could not find column "%s" in table "%s" referenced ' 'by primary key constraint "%s". The primary key will not be added.' % (cls.getTargetDBMSName(), pk_col, table.name, index.name)) return 0 index_column = grt.classes.db_IndexColumn() index_column.name = index.name + '.' + pk_col index_column.referencedColumn = table_column index.columns.append(index_column) table.primaryKey = index table.addIndex(index) return 0
def reverseEngineerTablePK(cls, connection, table): """Reverse engineers the primary key(s) for the given table.""" schema = table.owner catalog = schema.owner if len(table.columns) == 0: # Table must have columns reverse engineered before we can rev eng its primary key(s) grt.send_error('Migration: reverseEngineerTablePKAndIndices: Reverse engineer of table %s was attempted but the table has no columns attribute' % table.name) return 1 # Find the index name associated with the PK: pk_index_rows = cls.get_connection(connection).cursor().primaryKeys(catalog=catalog.name, schema=schema.name, table=table.name).fetchall() pk_index_name = pk_index_rows[0].pk_name if pk_index_rows else '' indices_dict = {} # Map the indices names to their respective columns: for row in cls.get_connection(connection).cursor().statistics(catalog=catalog.name, schema=schema.name, table=table.name): if row.type == constant.SQL_TABLE_STAT: # this entry is not an index continue indices_dict.setdefault(row.index_name, []).append(row) if pk_index_name not in indices_dict: # The primary key is not listed in cursor().statistics from collections import namedtuple Row = namedtuple('IndexRow', ['table_cat', 'table_schem', 'table_name', 'non_unique', 'index_qualifier', 'index_name', 'type', 'ordinal_position', 'column_name', 'asc_or_desc', 'cardinality', 'pages', 'filter_condition']) for pk_index_row in pk_index_rows: row = Row(None, schema.name, table.name, 0, None, pk_index_name, 1, 1, pk_index_row.column_name, 'A', None, None, None) indices_dict.setdefault(pk_index_name, []).append(row) for index_name, row_list in indices_dict.iteritems(): index = grt.classes.db_Index() index.name = index_name index.isPrimary = 1 if index_name == pk_index_name else 0 index.unique = not row_list[0].non_unique index.indexType = 'UNIQUE' if index.unique else 'INDEX' # index.hasFilter = False # TODO: Find out if there's a way to determine this # Get the columns for the index: for row in sorted(row_list, key=lambda elem: elem[7]): # Sorted by ordinal_position column = find_object_with_name(table.columns, row.column_name) if column: index_column = grt.classes.db_IndexColumn() index_column.name = index_name + '.' + row.column_name index_column.referencedColumn = column index.columns.append(index_column) table.addIndex(index) if index.isPrimary: table.primaryKey = index return 0
def connect(cls, connection, password): '''Establishes a connection to the server and stores the connection object in the connections pool. It first looks for a connection with the given connection parameters in the connections pool to reuse existent connections. If such connection is found it queries the server to ensure that the connection is alive and reestablishes it if is dead. If no suitable connection is found in the connections pool, a new one is created and stored in the pool. Parameters: =========== connection: an object of the class db_mgmt_Connection storing the parameters for the connection. password: a string with the password to use for the connection (ignored for SQLite). ''' con = None try: con = cls.get_connection(connection) try: if not con.cursor().execute('SELECT 1'): raise Exception('connection error') except Exception, exc: grt.send_info( 'Connection to %s apparently lost, reconnecting...' % connection.hostIdentifier) raise NotConnectedError('Connection error') except NotConnectedError, exc: grt.send_info('Connecting to %s...' % connection.hostIdentifier) if connection.driver.driverLibraryName == 'sqlanydb': import sqlanydbwrapper as sqlanydb # Replace this to a direct sqlanydb import when it complies with PEP 249 connstr = replace_string_parameters( connection.driver.connectionStringTemplate, dict(connection.parameterValues)) import ast try: all_params_dict = ast.literal_eval(connstr) except Exception, exc: grt.send_error( 'The given connection string is not a valid python dict: %s' % connstr) raise # Remove unreplaced parameters: params = dict( (key, value) for key, value in all_params_dict.iteritems() if not (value.startswith('%') and value.endswith('%'))) params['password'] = password conn_params = dict(params) conn_params['password'] = '******' connection.parameterValues[ 'wbcopytables_connection_string'] = repr(conn_params) con = sqlanydb.connect(**params)
def connect(cls, connection, password): """Establishes a connection to the server and stores the connection object in the connections pool. It first looks for a connection with the given connection parameters in the connections pool to reuse existent connections. If such connection is found it queries the server to ensure that the connection is alive and reestablishes it if is dead. If no suitable connection is found in the connections pool, a new one is created and stored in the pool. Parameters: =========== connection: an object of the class db_mgmt_Connection storing the parameters for the connection. password: a string with the password to use for the connection (ignored for SQLite). """ con = None try: con = cls.get_connection(connection) try: if not con.cursor().execute("SELECT 1"): raise Exception("connection error") except Exception, exc: grt.send_info("Connection to %s apparently lost, reconnecting..." % connection.hostIdentifier) raise NotConnectedError("Connection error") except NotConnectedError, exc: grt.send_info("Connecting to %s..." % connection.hostIdentifier) if connection.driver.driverLibraryName == "sqlanydb": import sqlanydbwrapper as sqlanydb # Replace this to a direct sqlanydb import when it complies with PEP 249 connstr = replace_string_parameters( connection.driver.connectionStringTemplate, dict(connection.parameterValues) ) import ast try: all_params_dict = ast.literal_eval(connstr) except Exception, exc: grt.send_error("The given connection string is not a valid python dict: %s" % connstr) raise # Remove unreplaced parameters: params = dict( (key, value) for key, value in all_params_dict.iteritems() if not (value.startswith("%") and value.endswith("%")) ) params["password"] = password conn_params = dict(params) conn_params["password"] = "******" connection.parameterValues["wbcopytables_connection_string"] = repr(conn_params) con = sqlanydb.connect(**params)
def reverseEngineerTablePK(cls, connection, table): """Reverse engineers the primary key for the given table.""" if len(table.columns) == 0: # Table must have columns reverse engineered before we can rev eng its primary key grt.send_error('Migration: reverseEngineerTablePKAndIndices: Reverse engineer of table %s was attempted but the table has no columns attribute' % table.name) return 1 pk_index_name = 'PrimaryKey' indices_dict = {} # Map the indices names to their respective columns: for row in cls.get_connection(connection).cursor().statistics(table=table.name): if row.type == constant.SQL_TABLE_STAT: # this entry is not an index continue indices_dict.setdefault(row.index_name, []).append(row) for index_name, row_list in list(indices_dict.items()): index = grt.classes.db_Index() index.name = index_name index.isPrimary = 1 if index_name == pk_index_name else 0 index.unique = not row_list[0].non_unique index.indexType = 'UNIQUE' if index.unique else 'INDEX' # index.hasFilter = False # TODO: Find out if there's a way to determine this skip = False # Get the columns for the index: for row in sorted(row_list, key=lambda elem: elem[7]): # Sorted by ordinal_position column = find_object_with_name(table.columns, row.column_name) if column: # skip indexes on LONGCHAR columns if column.simpleType.name in ["LONGCHAR"]: grt.send_warning("Migration: reverseEngineerTable: Skipping index %s.%s on a %s column\n" % (table.name, column.name, column.simpleType.name)) skip = True else: index_column = grt.classes.db_IndexColumn() index_column.name = index_name + '.' + row.column_name index_column.referencedColumn = column index.columns.append(index_column) if not column.isNotNull and index.isPrimary: column.isNotNull = 1 grt.send_warning("Migration: reverseEngineerTablePK: column %s.%s was changed to NOT NULL because it's a Primary Key column\n" % (column.owner.name, column.name)) else: grt.send_warning("Migration: reverseEngineerTablePK: could not find column %s, belonging to key %s. Key will be skipped\n" % (row.column_name, index_name)) skip = True if not skip: table.addIndex(index) if index.isPrimary: table.primaryKey = index return 0
def reverseEngineerTablePK(cls, connection, table): """Reverse engineers the primary key(s) for the given table.""" if len(table.columns) == 0: # Table must have columns reverse engineered before we can rev eng its primary key(s) grt.send_error('Migration: reverseEngineerTablePKAndIndices: Reverse engineer of table %s was attempted but the table has no columns attribute' % table.name) return 1 pk_index_name = 'PrimaryKey' indices_dict = {} # Map the indices names to their respective columns: for row in cls.get_connection(connection).cursor().statistics(table=table.name): if row.type == constant.SQL_TABLE_STAT: # this entry is not an index continue indices_dict.setdefault(row.index_name, []).append(row) for index_name, row_list in indices_dict.iteritems(): index = grt.classes.db_Index() index.name = index_name index.isPrimary = 1 if index_name == pk_index_name else 0 index.unique = not row_list[0].non_unique index.indexType = 'UNIQUE' if index.unique else 'INDEX' # index.hasFilter = False # TODO: Find out if there's a way to determine this skip = False # Get the columns for the index: for row in sorted(row_list, key=lambda elem: elem[7]): # Sorted by ordinal_position column = find_object_with_name(table.columns, row.column_name) if column: # skip indexes on LONGCHAR columns if column.simpleType.name in ["LONGCHAR"]: grt.send_warning("Migration: reverseEngineerTable: Skipping index %s.%s on a %s column\n" % (table.name, column.name, column.simpleType.name)) skip = True else: index_column = grt.classes.db_IndexColumn() index_column.name = index_name + '.' + row.column_name index_column.referencedColumn = column index.columns.append(index_column) if not column.isNotNull and index.isPrimary: column.isNotNull = 1 grt.send_warning("Migration: reverseEngineerTablePK: column %s.%s was changed to NOT NULL because it's a Primary Key column\n" % (column.owner.name, column.name)) else: grt.send_warning("Migration: reverseEngineerTablePK: could not find column %s, belonging to key %s. Key will be skipped\n" % (row.column_name, index_name)) skip = True if not skip: table.addIndex(index) if index.isPrimary: table.primaryKey = index return 0
def connect(cls, connection, password): '''Establishes a connection to the server and stores the connection object in the connections pool. It first looks for a connection with the given connection parameters in the connections pool to reuse existent connections. If such connection is found it queries the server to ensure that the connection is alive and reestablishes it if is dead. If no suitable connection is found in the connections pool, a new one is created and stored in the pool. Parameters: =========== connection: an object of the class db_mgmt_Connection storing the parameters for the connection. password: a string with the password to use for the connection (ignored for SQLite). ''' con = None try: con = cls.get_connection(connection) try: if not con.cursor().execute('SELECT 1'): raise Exception('connection error') except Exception as exc: grt.send_info( 'Connection to %s apparently lost, reconnecting...' % connection.hostIdentifier) raise NotConnectedError('Connection error') except NotConnectedError as exc: grt.send_info('Connecting to %s...' % connection.hostIdentifier) con = sqlite3.connect(connection.parameterValues['dbfile']) if not con: grt.send_error('Connection failed', str(exc)) raise connection.parameterValues[ 'wbcopytables_connection_string'] = "'" + connection.parameterValues[ 'dbfile'] + "'" grt.send_info('Connected') cls._connections[connection.__id__] = {'connection': con} if con: ver = cls.execute_query(connection, "SELECT sqlite_version()").fetchone()[0] grt.log_info('SQLite RE', 'Connected to %s, %s' % (connection.name, ver)) ver_parts = server_version_str2tuple(ver) + (0, 0, 0, 0) version = grt.classes.GrtVersion() version.majorNumber, version.minorNumber, version.releaseNumber, version.buildNumber = ver_parts[: 4] cls._connections[connection.__id__]['version'] = version return 1
def getOS(connection): conn = get_connection(connection) if conn: try: result = conn.executeQuery("SELECT @@version_compile_os") except db_utils.QueryError as e: grt.send_error("Error executing query: %s." % e) return None if result and result.nextRow(): compile_os = result.stringByIndex(1).lower() if 'linux' in compile_os: return 'linux' elif 'win' in compile_os: return 'windows' elif any(os in compile_os for os in ['osx', 'macos']): return 'darwin' return None
def reverseEngineerTablePK(cls, connection, table): """Reverse engineers the primary key(s) for the given table.""" schema = table.owner catalog = schema.owner if len(table.columns) == 0: # Table must have columns reverse engineered before we can rev eng its primary key(s) grt.send_error('Migration: reverseEngineerTablePKAndIndices: Reverse engineer of table %s was attempted but the table has no columns attribute' % table.name) return 1 # Find the index name associated with the PK: pk_index_row = cls.get_connection(connection).cursor().primaryKeys(catalog=catalog.name, schema=schema.name, table=table.name).fetchone() pk_index_name = pk_index_row.pk_name if pk_index_row else '' indices_dict = {} # Map the indices names to their respective columns: for row in cls.get_connection(connection).cursor().statistics(catalog=catalog.name, schema=schema.name, table=table.name): if row.type == constant.SQL_TABLE_STAT: # this entry is not an index continue indices_dict.setdefault(row.index_name, []).append(row) for index_name, row_list in indices_dict.iteritems(): index = grt.classes.db_Index() index.name = index_name index.isPrimary = 1 if index_name == pk_index_name else 0 index.unique = not row_list[0].non_unique index.indexType = 'UNIQUE' if index.unique else 'INDEX' # index.hasFilter = False # TODO: Find out if there's a way to determine this # Get the columns for the index: for row in sorted(row_list, key=lambda elem: elem[7]): # Sorted by ordinal_position column = find_object_with_name(table.columns, row.column_name) if column: index_column = grt.classes.db_IndexColumn() index_column.name = index_name + '.' + row.column_name index_column.referencedColumn = column index.columns.append(index_column) table.addIndex(index) if index.isPrimary: table.primaryKey = index return 0
def getOS(connection): conn = get_connection(connection) if conn: try: result = conn.executeQuery("SELECT @@version_compile_os") except db_utils.QueryError as e: grt.send_error("Error executing query: %s." % e) return None if result and result.nextRow(): compile_os = result.stringByIndex(1).lower() if 'linux' in compile_os: return 'linux' elif 'win' in compile_os: return 'windows' elif 'osx' in compile_os: return 'darwin' return None
def reverseEngineerTableFKs(cls, connection, table): """Reverse engineers the foreign keys for the given table.""" schema = table.owner catalog = schema.owner if len(table.columns) == 0: # Table must have columns reverse engineered before we can rev eng its foreign keys grt.send_error('Migration: reverseEngineerTableFKs: Reverse engineering of table %s was attempted but the table has no columns attribute' % table.name) return 1 query = """SELECT si.index_name, sfk.foreign_table_id, sfk.foreign_index_id, sfk.primary_table_id, sfk.primary_index_id FROM SYSFKEY sfk JOIN SYSIDX si ON (sfk.foreign_index_id=si.index_id AND sfk.foreign_table_id=si.table_id) JOIN SYSTAB st ON sfk.foreign_table_id=st.table_id JOIN SYSUSER su ON st.creator=su.user_id WHERE st.table_name='%s' AND su.user_name='%s' ORDER BY sfk.primary_index_id""" % (table.name, schema.name) fk_cursor = cls.get_connection(connection).cursor() for fk_name, this_table_id, this_index_id, other_table_id, other_index_id in fk_cursor.execute(query): this_column_query = """SELECT stc.column_name FROM SYSIDXCOL sic JOIN SYSTABCOL stc ON (sic.table_id=stc.table_id AND sic.column_id=stc.column_id) WHERE sic.table_id=%d AND sic.index_id=%d ORDER BY sic.sequence""" % (this_table_id, this_index_id) other_column_query = """SELECT su.user_name, st.table_name, stc.column_name FROM SYSIDXCOL sic JOIN SYSTABCOL stc ON (sic.table_id=stc.table_id AND sic.column_id=stc.column_id) JOIN SYSTAB st ON stc.table_id=st.table_id JOIN SYSUSER su ON st.creator=su.user_id WHERE sic.table_id=%d AND sic.index_id=%d ORDER BY sic.sequence""" % (other_table_id, other_index_id) these_columns = cls.execute_query(connection, this_column_query).fetchall() other_columns = cls.execute_query(connection, other_column_query).fetchall() foreign_key = grt.classes.db_ForeignKey() foreign_key.owner = table foreign_key.name = fk_name # Find the referenced table: referenced_schema = find_object_with_name(catalog.schemata, other_columns[0][0]) if other_columns[0][0] else schema foreign_key.referencedTable = find_object_with_name(referenced_schema.tables, other_columns[0][1]) if other_columns[0][1] else table for (this_column_name,), (_, _, other_column_name) in zip(these_columns, other_columns): column = find_object_with_name(table.columns, this_column_name) if not column: grt.send_error('Migration: reverseEngineerTableFKs: Column "%s" not found in table "%s"' % (this_column_name, table.name) ) continue ref_column = find_object_with_name(foreign_key.referencedTable.columns, other_column_name) if not ref_column: grt.send_error('Migration: reverseEngineerTableFKs: Column "%s" not found in table "%s"' % (other_column_name, foreign_key.referencedTable.name) ) continue foreign_key.columns.append(column) foreign_key.referencedColumns.append(ref_column) table.foreignKeys.append(foreign_key) return 0
def reverseEngineerTablePK(connection, table): """Reverse engineers the primary key(s) for the given table.""" schema = table.owner catalog = schema.owner execute_query(connection, 'USE %s' % catalog.name) query ="""SELECT sc.name FROM sysobjects so JOIN syskeys sk ON so.id=sk.id JOIN syscolumns sc ON sc.id=sk.id AND sc.colid IN (sk.key1, sk.key2, sk.key3, sk.key4, sk.key5, sk.key6, sk.key7, sk.key8) WHERE so.uid=USER_ID(?) AND sk.id=OBJECT_ID(?) AND sk.type=1""" if len(table.columns) == 0: grt.send_error('Sybase reverseEngineerTablePK', "Reverse engineer of table's %s.%s primary key was attempted but the table has no columns attribute" % (schema.name, table.name) ) return 1 # Table must have columns reverse engineered before we can rev eng its primary key(s) pk_col_names = [ row[0] for row in execute_query(connection, query, schema.name, table.name) ] for pk_column in pk_col_names: column = find_object_with_name(table.columns, pk_column) if column: table.addPrimaryKeyColumn(column) return 0
def connect(cls, connection, password): '''Establishes a connection to the server and stores the connection object in the connections pool. It first looks for a connection with the given connection parameters in the connections pool to reuse existent connections. If such connection is found it queries the server to ensure that the connection is alive and reestablishes it if is dead. If no suitable connection is found in the connections pool, a new one is created and stored in the pool. Parameters: =========== connection: an object of the class db_mgmt_Connection storing the parameters for the connection. password: a string with the password to use for the connection (ignored for SQLite). ''' con = None try: con = cls.get_connection(connection) try: if not con.cursor().execute('SELECT 1'): raise Exception('connection error') except Exception, exc: grt.send_info( 'Connection to %s apparently lost, reconnecting...' % connection.hostIdentifier) raise NotConnectedError('Connection error') except NotConnectedError, exc: grt.send_info('Connecting to %s...' % connection.hostIdentifier) con = sqlite3.connect(connection.parameterValues['dbfile']) if not con: grt.send_error('Connection failed', str(exc)) raise connection.parameterValues[ 'wbcopytables_connection_string'] = "'" + connection.parameterValues[ 'dbfile'] + "'" grt.send_info('Connected') cls._connections[connection.__id__] = {'connection': con}
def process_fk(catalog, table, fk_name, fk_rows): foreign_key = grt.classes.db_ForeignKey() if fk_name in cls._connections[connection.__id__]['fk_names']: while True: suffix = '_%06d' % random.randint(0, 999999) if fk_name + suffix not in cls._connections[connection.__id__]['fk_names']: break fk_name += suffix foreign_key.name = fk_name foreign_key.owner = table foreign_key.deleteRule = get_delete_action(fk_rows[0].grbit) foreign_key.updateRule = get_update_action(fk_rows[0].grbit) foreign_key.modelOnly = 0 # Find the referenced table: foreign_key.referencedTable = find_object_with_name(catalog.schemata[0].tables, fk_rows[0].szReferencedObject) if not foreign_key.referencedTable: grt.send_error('Migration: reverseEngineerTableFKs: Table "%s" not found in schemata "%s"' % (fk_rows[0].szReferencedObject, catalog.schemata[0].name) ) return 1 for fk_row in fk_rows: column = find_object_with_name(table.columns, fk_row.szColumn) if not column: grt.send_error('Migration: reverseEngineerTableFKs: Column "%s" not found in table "%s"' % (fk_row.szColumn, table.name) ) continue ref_column = find_object_with_name(foreign_key.referencedTable.columns, fk_row.szReferencedColumn) if not ref_column: grt.send_error('Migration: reverseEngineerTableFKs: Column "%s" not found in table "%s"' % (fk_row.szReferencedColumn, foreign_key.referencedTable.name) ) continue foreign_key.columns.append(column) foreign_key.referencedColumns.append(ref_column) # Find and delete indexes that are identical to FKs for index in reversed(table.indices): if table.primaryKey != index and len(index.columns) == len(foreign_key.columns): match = True for i, col in enumerate(index.columns): if foreign_key.columns[i] != col.referencedColumn: match = False break if match: grt.send_warning("Migration: reverseEngineerTable: Skipping duplicate index %s from table %s\n" % (col.name, table.name)) table.indices.remove(index) cls._connections[connection.__id__]['fk_names'][foreign_key.name] = table table.foreignKeys.append(foreign_key)
def reverseEngineerTableIndices(connection, table): """Reverse engineers the indices for the given table.""" schema = table.owner catalog = schema.owner if len(table.columns) == 0: grt.send_error( 'Sybase reverseEngineerTableIndices', 'Reverse engineer of table %s.%s was attempted but the table has no columns attribute' % (schema.name, table.name)) return 1 # Table must have columns reverse engineered before we can rev eng its indices execute_query(connection, 'USE %s' % catalog.name) query = """SELECT INDEX_NAME = A.name, IS_CLUSTERED = CASE WHEN ((A.status&16) = 16 OR (A.status2&512) = 512) THEN 1 ELSE 0 END, IS_PRIMARY = CASE WHEN ((A.status&0x800) = 0x800) THEN 1 ELSE 0 END, IS_UNIQUE = CASE WHEN ((A.status&2) = 2) THEN 1 ELSE 0 END, IGNORE_DUP = CASE WHEN ((A.status&4) = 4) THEN 1 ELSE 0 END, INDEX_COL(USER_NAME(B.uid) + '.' + B.name, indid, 1), INDEX_COL(USER_NAME(B.uid) + '.' + B.name, indid, 2), INDEX_COL(USER_NAME(B.uid) + '.' + B.name, indid, 3), INDEX_COL(USER_NAME(B.uid) + '.' + B.name, indid, 4), INDEX_COL(USER_NAME(B.uid) + '.' + B.name, indid, 5), INDEX_COL(USER_NAME(B.uid) + '.' + B.name, indid, 6), INDEX_COL(USER_NAME(B.uid) + '.' + B.name, indid, 7), INDEX_COL(USER_NAME(B.uid) + '.' + B.name, indid, 8), INDEX_COL(USER_NAME(B.uid) + '.' + B.name, indid, 9), INDEX_COL(USER_NAME(B.uid) + '.' + B.name, indid, 10), INDEX_COL(USER_NAME(B.uid) + '.' + B.name, indid, 11), INDEX_COL(USER_NAME(B.uid) + '.' + B.name, indid, 12), INDEX_COL(USER_NAME(B.uid) + '.' + B.name, indid, 13), INDEX_COL(USER_NAME(B.uid) + '.' + B.name, indid, 14), INDEX_COL(USER_NAME(B.uid) + '.' + B.name, indid, 15), INDEX_COL(USER_NAME(B.uid) + '.' + B.name, indid, 16), INDEX_COL(USER_NAME(B.uid) + '.' + B.name, indid, 17), INDEX_COL(USER_NAME(B.uid) + '.' + B.name, indid, 18), INDEX_COL(USER_NAME(B.uid) + '.' + B.name, indid, 19), INDEX_COL(USER_NAME(B.uid) + '.' + B.name, indid, 20), INDEX_COL(USER_NAME(B.uid) + '.' + B.name, indid, 21), INDEX_COL(USER_NAME(B.uid) + '.' + B.name, indid, 22), INDEX_COL(USER_NAME(B.uid) + '.' + B.name, indid, 23), INDEX_COL(USER_NAME(B.uid) + '.' + B.name, indid, 24), INDEX_COL(USER_NAME(B.uid) + '.' + B.name, indid, 25), INDEX_COL(USER_NAME(B.uid) + '.' + B.name, indid, 26), INDEX_COL(USER_NAME(B.uid) + '.' + B.name, indid, 27), INDEX_COL(USER_NAME(B.uid) + '.' + B.name, indid, 28), INDEX_COL(USER_NAME(B.uid) + '.' + B.name, indid, 29), INDEX_COL(USER_NAME(B.uid) + '.' + B.name, indid, 30), INDEX_COL(USER_NAME(B.uid) + '.' + B.name, indid, 31) FROM sysindexes A, sysobjects B WHERE A.indid > 0 AND A.indid < 255 AND A.status2 & 2 != 2 AND B.id = A.id AND B.type = 'U' AND USER_NAME(B.uid) = ? AND B.name=? ORDER BY 1, 2, 3""" for index_row in execute_query(connection, query, schema.name, table.name): index = grt.classes.db_sybase_Index() index.name = index_row[0] index.clustered = index_row[1] index.isPrimary = index_row[2] index.unique = index_row[3] index.indexType = 'UNIQUE' if index.unique else 'INDEX' index.ignoreDuplicateRows = index_row[4] table.addIndex(index) # Get the columns for the index: index_column_names = [ colname for colname in index_row[5:] if colname is not None ] for column_name in index_column_names: column = find_object_with_name(table.columns, column_name) if column: index_column = grt.classes.db_sybase_IndexColumn() index_column.name = index.name + '.' + column_name index_column.referencedColumn = column index.columns.append(index_column) return 0
def reverseEngineerTableFKs(connection, table): """Reverse engineers the foreign keys for the given table.""" schema = table.owner catalog = schema.owner execute_query(connection, 'USE %s' % catalog.name) query = """SELECT so.name, USER_NAME(so.uid), COL_NAME(sk.id, key1), COL_NAME(sk.id, key2), COL_NAME(sk.id, key3), COL_NAME(sk.id, key4), COL_NAME(sk.id, key5), COL_NAME(sk.id, key6), COL_NAME(sk.id, key7), COL_NAME(sk.id, key8), COL_NAME(sk.depid, depkey1), COL_NAME(sk.depid, depkey2), COL_NAME(sk.depid, depkey3), COL_NAME(sk.depid, depkey4), COL_NAME(sk.depid, depkey5), COL_NAME(sk.depid, depkey6), COL_NAME(sk.depid, depkey7), COL_NAME(sk.depid, depkey8) FROM syskeys sk JOIN sysobjects so ON sk.depid = so.id WHERE sk.type = 2 AND sk.id = OBJECT_ID('%s.%s')""" % (schema.name, table.name) if len(table.columns) == 0: grt.send_error('Sybase reverseEngineerTableFKs', 'Reverse engineer of foreign keys for table %s.%s was attempted but the table has no columns attribute' % (schema.name, table.name) ) return 1 # Table must have columns reverse engineered before we can rev eng its foreign keys table.foreignKeys.remove_all() for row in execute_query(connection, query): fk_columns = [col_name for col_name in row[2:10] if col_name] fk_ref_columns = [col_name for col_name in row[10:] if col_name] foreign_key = grt.classes.db_sybase_ForeignKey() foreign_key.name = '%s_%s_%s_fk' % (schema.name, table.name, '_'.join(fk_columns)) foreign_key.owner = table foreign_key.deleteRule = foreign_key.updateRule = 'RESTRICT' foreign_key.modelOnly = 0 referenced_schema = find_object_with_name(catalog.schemata, row[1]) if not referenced_schema: grt.send_error('Sybase reverseEngineerTableFKs', 'Could not find schema "%s" in catalog "%s"' % (schema.name, catalog.name) ) return 1 foreign_key.referencedTable = find_object_with_name(referenced_schema.tables, row[0]) if not foreign_key.referencedTable: grt.send_error('Sybase reverseEngineerTableFKs', 'Could not find referenced table "%s" in schema "%s"' % (row[0], schema.name) ) return 1 for column_name, referenced_column_name in zip(fk_columns, fk_ref_columns): column = find_object_with_name(table.columns, column_name) if not column: grt.send_error('Sybase reverseEngineerTableFKs', 'Could not find column "%s" in table "%s.%s"' % (column_name, schema.name, table.name) ) return 1 referenced_column = find_object_with_name(foreign_key.referencedTable.columns, referenced_column_name) if not referenced_column: grt.send_error('Sybase reverseEngineerTableFKs', 'Could not find column "%s" in table "%s.%s"' % (referenced_column_name, referenced_schema.name, foreign_key.referencedTable.name) ) return 1 foreign_key.columns.append(column) foreign_key.referencedColumns.append(referenced_column) table.foreignKeys.append(foreign_key) return 0
def reverseEngineerTableIndices(connection, table): """Reverse engineers the indices for the given table.""" schema = table.owner catalog = schema.owner if len(table.columns) == 0: grt.send_error('Sybase reverseEngineerTableIndices', 'Reverse engineer of table %s.%s was attempted but the table has no columns attribute' % (schema.name, table.name) ) return 1 # Table must have columns reverse engineered before we can rev eng its indices execute_query(connection, 'USE %s' % catalog.name) query = """SELECT INDEX_NAME = A.name, IS_CLUSTERED = CASE WHEN ((A.status&16) = 16 OR (A.status2&512) = 512) THEN 1 ELSE 0 END, IS_PRIMARY = CASE WHEN ((A.status&0x800) = 0x800) THEN 1 ELSE 0 END, IS_UNIQUE = CASE WHEN ((A.status&2) = 2) THEN 1 ELSE 0 END, IGNORE_DUP = CASE WHEN ((A.status&4) = 4) THEN 1 ELSE 0 END, INDEX_COL(USER_NAME(B.uid) + '.' + B.name, indid, 1), INDEX_COL(USER_NAME(B.uid) + '.' + B.name, indid, 2), INDEX_COL(USER_NAME(B.uid) + '.' + B.name, indid, 3), INDEX_COL(USER_NAME(B.uid) + '.' + B.name, indid, 4), INDEX_COL(USER_NAME(B.uid) + '.' + B.name, indid, 5), INDEX_COL(USER_NAME(B.uid) + '.' + B.name, indid, 6), INDEX_COL(USER_NAME(B.uid) + '.' + B.name, indid, 7), INDEX_COL(USER_NAME(B.uid) + '.' + B.name, indid, 8), INDEX_COL(USER_NAME(B.uid) + '.' + B.name, indid, 9), INDEX_COL(USER_NAME(B.uid) + '.' + B.name, indid, 10), INDEX_COL(USER_NAME(B.uid) + '.' + B.name, indid, 11), INDEX_COL(USER_NAME(B.uid) + '.' + B.name, indid, 12), INDEX_COL(USER_NAME(B.uid) + '.' + B.name, indid, 13), INDEX_COL(USER_NAME(B.uid) + '.' + B.name, indid, 14), INDEX_COL(USER_NAME(B.uid) + '.' + B.name, indid, 15), INDEX_COL(USER_NAME(B.uid) + '.' + B.name, indid, 16), INDEX_COL(USER_NAME(B.uid) + '.' + B.name, indid, 17), INDEX_COL(USER_NAME(B.uid) + '.' + B.name, indid, 18), INDEX_COL(USER_NAME(B.uid) + '.' + B.name, indid, 19), INDEX_COL(USER_NAME(B.uid) + '.' + B.name, indid, 20), INDEX_COL(USER_NAME(B.uid) + '.' + B.name, indid, 21), INDEX_COL(USER_NAME(B.uid) + '.' + B.name, indid, 22), INDEX_COL(USER_NAME(B.uid) + '.' + B.name, indid, 23), INDEX_COL(USER_NAME(B.uid) + '.' + B.name, indid, 24), INDEX_COL(USER_NAME(B.uid) + '.' + B.name, indid, 25), INDEX_COL(USER_NAME(B.uid) + '.' + B.name, indid, 26), INDEX_COL(USER_NAME(B.uid) + '.' + B.name, indid, 27), INDEX_COL(USER_NAME(B.uid) + '.' + B.name, indid, 28), INDEX_COL(USER_NAME(B.uid) + '.' + B.name, indid, 29), INDEX_COL(USER_NAME(B.uid) + '.' + B.name, indid, 30), INDEX_COL(USER_NAME(B.uid) + '.' + B.name, indid, 31) FROM sysindexes A, sysobjects B WHERE A.indid > 0 AND A.indid < 255 AND A.status2 & 2 != 2 AND B.id = A.id AND B.type = 'U' AND USER_NAME(B.uid) = ? AND B.name=? ORDER BY 1, 2, 3""" for index_row in execute_query(connection, query, schema.name, table.name): index = grt.classes.db_sybase_Index() index.name = index_row[0] index.clustered = index_row[1] index.isPrimary = index_row[2] index.unique = index_row[3] index.indexType = 'UNIQUE' if index.unique else 'INDEX' index.ignoreDuplicateRows = index_row[4] table.addIndex(index) # Get the columns for the index: index_column_names = [colname for colname in index_row[5:] if colname is not None] for column_name in index_column_names: column = find_object_with_name(table.columns, column_name) if column: index_column = grt.classes.db_sybase_IndexColumn() index_column.name = index.name + '.' + column_name index_column.referencedColumn = column index.columns.append(index_column) return 0
def reverseEngineerTableFKs(connection, table): """Reverse engineers the foreign keys for the given table.""" schema = table.owner catalog = schema.owner execute_query(connection, 'USE %s' % catalog.name) query = """SELECT so.name, USER_NAME(so.uid), COL_NAME(sk.id, key1), COL_NAME(sk.id, key2), COL_NAME(sk.id, key3), COL_NAME(sk.id, key4), COL_NAME(sk.id, key5), COL_NAME(sk.id, key6), COL_NAME(sk.id, key7), COL_NAME(sk.id, key8), COL_NAME(sk.depid, depkey1), COL_NAME(sk.depid, depkey2), COL_NAME(sk.depid, depkey3), COL_NAME(sk.depid, depkey4), COL_NAME(sk.depid, depkey5), COL_NAME(sk.depid, depkey6), COL_NAME(sk.depid, depkey7), COL_NAME(sk.depid, depkey8) FROM syskeys sk JOIN sysobjects so ON sk.depid = so.id WHERE sk.type = 2 AND sk.id = OBJECT_ID('%s.%s')""" % (schema.name, table.name) if len(table.columns) == 0: grt.send_error( 'Sybase reverseEngineerTableFKs', 'Reverse engineer of foreign keys for table %s.%s was attempted but the table has no columns attribute' % (schema.name, table.name)) return 1 # Table must have columns reverse engineered before we can rev eng its foreign keys table.foreignKeys.remove_all() for row in execute_query(connection, query): fk_columns = [col_name for col_name in row[2:10] if col_name] fk_ref_columns = [col_name for col_name in row[10:] if col_name] foreign_key = grt.classes.db_sybase_ForeignKey() foreign_key.name = '%s_%s_%s_fk' % (schema.name, table.name, '_'.join(fk_columns)) foreign_key.owner = table foreign_key.deleteRule = foreign_key.updateRule = 'RESTRICT' foreign_key.modelOnly = 0 referenced_schema = find_object_with_name(catalog.schemata, row[1]) if not referenced_schema: grt.send_error( 'Sybase reverseEngineerTableFKs', 'Could not find schema "%s" in catalog "%s"' % (schema.name, catalog.name)) return 1 foreign_key.referencedTable = find_object_with_name( referenced_schema.tables, row[0]) if not foreign_key.referencedTable: grt.send_error( 'Sybase reverseEngineerTableFKs', 'Could not find referenced table "%s" in schema "%s"' % (row[0], schema.name)) return 1 for column_name, referenced_column_name in zip(fk_columns, fk_ref_columns): column = find_object_with_name(table.columns, column_name) if not column: grt.send_error( 'Sybase reverseEngineerTableFKs', 'Could not find column "%s" in table "%s.%s"' % (column_name, schema.name, table.name)) return 1 referenced_column = find_object_with_name( foreign_key.referencedTable.columns, referenced_column_name) if not referenced_column: grt.send_error( 'Sybase reverseEngineerTableFKs', 'Could not find column "%s" in table "%s.%s"' % (referenced_column_name, referenced_schema.name, foreign_key.referencedTable.name)) return 1 foreign_key.columns.append(column) foreign_key.referencedColumns.append(referenced_column) table.foreignKeys.append(foreign_key) return 0
# majorVersion = int(version.split('.', 1)[0]) # if majorVersion >= 9: # con.add_output_converter(-150, lambda value: value if value is None else value.decode('utf-16')) # con.add_output_converter(0, lambda value: value if value is None else value.decode('utf-16')) # else: # con.add_output_converter(-150, lambda value: value if value is None else str(value)) # con.add_output_converter(0, lambda value: value if value is None else str(value)) except pyodbc.Error, odbc_err: # 28000 is from native SQL Server driver... 42000 seems to be from FreeTDS # FIXME: This should be tuned for Sybase if len(odbc_err.args) == 2 and odbc_err.args[0] in ('28000', '42000') and "(18456)" in odbc_err.args[1]: raise grt.DBLoginError(odbc_err.args[1]) if not con: grt.send_error('Connection failed', str(exc)) raise _connections[connection.__id__] = {"connection" : con } _connections[connection.__id__]["version"] = getServerVersion(connection) version = execute_query(connection, "SELECT @@version").fetchone()[0] grt.send_info("Connected to %s, %s", (host_identifier, version)) return 1 @ModuleInfo.export(grt.INT, grt.classes.db_mgmt_Connection) def disconnect(connection): if connection.__id__ in _connections: del _connections[connection.__id__] # pyodbc cursors are automatically closed when deleted return 0
def reverseEngineerTableFKs(cls, connection, table): """Reverse engineers the foreign keys for the given table.""" catalog = table.owner.owner schema = table.owner query = """SELECT kcu1.COLUMN_NAME, rc.CONSTRAINT_NAME, kcu2.TABLE_SCHEMA, kcu2.TABLE_NAME, kcu2.COLUMN_NAME, rc.UPDATE_RULE, rc.DELETE_RULE FROM INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS rc JOIN INFORMATION_SCHEMA.TABLE_CONSTRAINTS AS tc ON rc.CONSTRAINT_NAME = tc.CONSTRAINT_NAME JOIN INFORMATION_SCHEMA.KEY_COLUMN_USAGE kcu1 ON kcu1.CONSTRAINT_CATALOG = rc.CONSTRAINT_CATALOG AND kcu1.CONSTRAINT_SCHEMA = rc.CONSTRAINT_SCHEMA AND kcu1.CONSTRAINT_NAME = rc.CONSTRAINT_NAME JOIN INFORMATION_SCHEMA.KEY_COLUMN_USAGE kcu2 ON kcu2.CONSTRAINT_CATALOG = rc.UNIQUE_CONSTRAINT_CATALOG AND kcu2.CONSTRAINT_SCHEMA = rc.UNIQUE_CONSTRAINT_SCHEMA AND kcu2.CONSTRAINT_NAME = rc.UNIQUE_CONSTRAINT_NAME WHERE tc.CONSTRAINT_TYPE = 'FOREIGN KEY' AND kcu1.ORDINAL_POSITION = kcu2.ORDINAL_POSITION AND kcu1.TABLE_CATALOG = ? AND kcu1.TABLE_SCHEMA = ? AND kcu1.TABLE_NAME = ? ORDER BY kcu1.CONSTRAINT_NAME, kcu1.ORDINAL_POSITION""" if len(table.columns) == 0: grt.send_error('%s reverseEngineerTableFKs: Reverse engineering of table ' '%s was attempted but the table has no columns attribute' % (cls.getTargetDBMSName(), table.name)) return 1 # Table must have columns reverse engineered before we can rev eng its foreign keys cursor = cls.execute_query(connection, query, catalog.name, schema.name, table.name) current_fk = None table.foreignKeys.remove_all() for col_name, fk_name, ref_schema, ref_table, ref_col, upd_rule, del_rule in cursor: if not current_fk or fk_name != current_fk.name: if current_fk: table.foreignKeys.append(current_fk) foreign_key = grt.classes.db_ForeignKey() foreign_key.name = fk_name foreign_key.owner = table foreign_key.deleteRule = del_rule.upper() foreign_key.updateRule = upd_rule.upper() foreign_key.modelOnly = 0 referenced_schema = find_object_with_name(catalog.schemata, ref_schema) if not referenced_schema: grt.send_warning('%s reverseEngineerTableFKs: Could not find referenced schema "%s" ' 'for foreign key constraint "%s"' % (cls.getTargetDBMSName(), ref_schema, fk_name)) continue referenced_table = find_object_with_name(referenced_schema.tables, ref_table) if not referenced_table: grt.send_warning('%s reverseEngineerTableFKs: Could not find referenced table "%s.%s" ' 'for foreign key constraint "%s"' % (cls.getTargetDBMSName(), ref_schema, ref_table, fk_name)) continue if len(referenced_table.columns) == 0: grt.send_error('%s reverseEngineerTableFKs: Reverse engineering of table ' '%s was attempted but the table has no columns attribute' % (cls.getTargetDBMSName(), referenced_table.name)) return 1 # Table must have columns reverse engineered before we can rev eng its foreign keys foreign_key.referencedTable = referenced_table current_fk = foreign_key column = find_object_with_name(table.columns, col_name) if not column: grt.send_warning('%s reverseEngineerTableFKs: Could not find column "%s.%s.%s" ' 'for foreign key constraint "%s"' % (cls.getTargetDBMSName(), schema.name, table.name, col_name, fk_name)) continue current_fk.columns.append(column) referenced_column = find_object_with_name(current_fk.referencedTable.columns, ref_col) if not referenced_column: grt.send_warning('%s reverseEngineerTableFKs: Could not find referenced column "%s.%s.%s" ' 'for foreign key constraint "%s"' % (cls.getTargetDBMSName(), ref_schema, ref_table, ref_col, fk_name)) continue current_fk.referencedColumns.append(referenced_column) # Store the last fk: if current_fk: table.foreignKeys.append(current_fk) return 0
def reverseEngineerTableFKs(cls, connection, table): """Reverse engineers the foreign keys for the given table.""" def get_action(value): if value is None: return '' elif value == constant.SQL_CASCADE: return 'CASCADE' elif value == constant.SQL_RESTRICT: return 'RESTRICT' elif value == constant.SQL_SET_NULL: return 'SET NULL' elif value == constant.SQL_NO_ACTION: return 'NO ACTION' elif value == constant.SQL_SET_DEFAULT: return 'SET DEFAULT' else: return '' def process_fk(catalog, table, fk_name, fk_rows): foreign_key = grt.classes.db_ForeignKey() if fk_name in cls._connections[connection.__id__]['fk_names']: while True: suffix = '_%06d' % random.randint(0, 999999) if fk_name + suffix not in cls._connections[connection.__id__]['fk_names']: break fk_name += suffix foreign_key.name = fk_name foreign_key.owner = table foreign_key.deleteRule = get_action(fk_rows[0].delete_rule) foreign_key.updateRule = get_action(fk_rows[0].update_rule) foreign_key.modelOnly = 0 # Find the referenced table: referenced_schema = find_object_with_name(catalog.schemata, fk_rows[0].pktable_schem) if fk_rows[0].pktable_schem else schema foreign_key.referencedTable = find_object_with_name(referenced_schema.tables, fk_rows[0].pktable_name) if fk_rows[0].pktable_name else table for fk_row in fk_rows: column = find_object_with_name(table.columns, fk_row.fkcolumn_name) if not column: grt.send_error('Migration: reverseEngineerTableFKs: Column "%s" not found in table "%s"' % (fk_row.fkcolumn_name, table.name) ) continue ref_column = find_object_with_name(foreign_key.referencedTable.columns, fk_row.pkcolumn_name) if not ref_column: grt.send_error('Migration: reverseEngineerTableFKs: Column "%s" not found in table "%s"' % (fk_row.pkcolumn_name, foreign_key.referencedTable.name) ) continue foreign_key.columns.append(column) foreign_key.referencedColumns.append(ref_column) cls._connections[connection.__id__]['fk_names'][foreign_key.name] = table table.foreignKeys.append(foreign_key) if len(table.columns) == 0: grt.send_error('Migration: reverseEngineerTableFKs: Reverse engineer of table %s was attempted but the table has no columns attribute' % table.name) return 1 # Table must have columns reverse engineered before we can rev eng its indices catalog = table.owner.owner schema = table.owner table.foreignKeys.remove_all() fk_dict = {} # Map the foreign key names to their respective columns: for row in cls.get_connection(connection).cursor().foreignKeys(foreignSchema=schema.name, foreignTable=table.name): fk_dict.setdefault(row.fk_name, []).append(row) for fk_name, fk_columns in fk_dict.iteritems(): if not fk_name: # If there are unnamed fks we might have several fks merged, need to separate them # Partition the list based on key_seq so that if the key_seq list is, for instance, [1, 2, 3, 1, 2, 1] # we can have [ [1, 2, 3], [1, 2], [1] ] indices = [idx for idx, item in enumerate(fk_columns) if item.key_seq == 1] slices = [fk_columns[i:j] for i, j in zip(indices, indices+[None])] random_names = ['FK_generated_%06d' % id for id in random.sample(range(1000000), len(slices))] # Random names for each fk for slice, random_name in zip(slices, random_names): if slice: process_fk(catalog, table, random_name, slice) else: process_fk(catalog, table, fk_name, fk_columns) return 0
grt.send_error('The given connection string is not a valid python dict: %s' % connstr) raise # Remove unreplaced parameters: params = dict( (key, value) for key, value in all_params_dict.iteritems() if not (value.startswith('%') and value.endswith('%')) ) params['password'] = password conn_params = dict(params) conn_params['password'] = '******' connection.parameterValues['wbcopytables_connection_string'] = repr(conn_params) con = sqlanydb.connect(**params) else: con = db_driver.connect(connection, password) if not con: grt.send_error('Connection failed', str(exc)) raise grt.send_info('Connected') cls._connections[connection.__id__] = {'connection': con} if con: ver = cls.execute_query(connection, "SELECT @@version").fetchone()[0] grt.log_info("SQLAnywhere RE", "Connected to %s, %s\n" % (connection.name, ver)) ver_parts = server_version_str2tuple(ver) + (0, 0, 0, 0) version = grt.classes.GrtVersion() version.majorNumber, version.minorNumber, version.releaseNumber, version.buildNumber = ver_parts[:4] cls._connections[connection.__id__]["version"] = version return 1 @classmethod @release_cursors
def reverseEngineerTableFKs(cls, connection, table): """Reverse engineers the foreign keys for the given table.""" def get_action(value): if value is None: return '' elif value == constant.SQL_CASCADE: return 'CASCADE' elif value == constant.SQL_RESTRICT: return 'RESTRICT' elif value == constant.SQL_SET_NULL: return 'SET NULL' elif value == constant.SQL_NO_ACTION: return 'NO ACTION' elif value == constant.SQL_SET_DEFAULT: return 'SET DEFAULT' else: return '' def process_fk(catalog, table, fk_name, fk_rows): foreign_key = grt.classes.db_ForeignKey() if fk_name in cls._connections[connection.__id__]['fk_names']: while True: suffix = '_%06d' % random.randint(0, 999999) if fk_name + suffix not in cls._connections[connection.__id__]['fk_names']: break fk_name += suffix foreign_key.name = fk_name foreign_key.owner = table foreign_key.deleteRule = get_action(fk_rows[0].delete_rule) foreign_key.updateRule = get_action(fk_rows[0].update_rule) foreign_key.modelOnly = 0 # Find the referenced table: referenced_schema = find_object_with_name(catalog.schemata, fk_rows[0].pktable_schem) if fk_rows[0].pktable_schem else schema foreign_key.referencedTable = find_object_with_name(referenced_schema.tables, fk_rows[0].pktable_name) if fk_rows[0].pktable_name else table for fk_row in fk_rows: column = find_object_with_name(table.columns, fk_row.fkcolumn_name) if not column: grt.send_error('Migration: reverseEngineerTableFKs: Column "%s" not found in table "%s"' % (fk_row.fkcolumn_name, table.name) ) continue ref_column = find_object_with_name(foreign_key.referencedTable.columns, fk_row.pkcolumn_name) if not ref_column: grt.send_error('Migration: reverseEngineerTableFKs: Column "%s" not found in table "%s"' % (fk_row.pkcolumn_name, foreign_key.referencedTable.name) ) continue foreign_key.columns.append(column) foreign_key.referencedColumns.append(ref_column) cls._connections[connection.__id__]['fk_names'][foreign_key.name] = table table.foreignKeys.append(foreign_key) if len(table.columns) == 0: grt.send_error('Migration: reverseEngineerTableFKs: Reverse engineer of table %s was attempted but the table has no columns attribute' % table.name) return 1 # Table must have columns reverse engineered before we can rev eng its indices catalog = table.owner.owner schema = table.owner table.foreignKeys.remove_all() fk_dict = {} # Map the foreign key names to their respective columns: for row in cls.get_connection(connection).cursor().foreignKeys(foreignSchema=schema.name, foreignTable=table.name): fk_dict.setdefault(row.fk_name, []).append(row) for fk_name, fk_columns in list(fk_dict.items()): if not fk_name: # If there are unnamed fks we might have several fks merged, need to separate them # Partition the list based on key_seq so that if the key_seq list is, for instance, [1, 2, 3, 1, 2, 1] # we can have [ [1, 2, 3], [1, 2], [1] ] indices = [idx for idx, item in enumerate(fk_columns) if item.key_seq == 1] slices = [fk_columns[i:j] for i, j in zip(indices, indices+[None])] random_names = ['FK_generated_%06d' % id for id in random.sample(list(range(1000000)), len(slices))] # Random names for each fk for slice, random_name in zip(slices, random_names): if slice: process_fk(catalog, table, random_name, slice) else: process_fk(catalog, table, fk_name, fk_columns) return 0
def reverseEngineerTableFKs(cls, connection, table): """Reverse engineers the foreign keys for the given table.""" def get_update_action(grbit): if grbit & 256: return "CASCADE" else: return "RESTRICT" def get_delete_action(grbit): if grbit & 4352: return "CASCADE" else: return "RESTRICT" def process_fk(catalog, table, fk_name, fk_rows): foreign_key = grt.classes.db_ForeignKey() if fk_name in cls._connections[connection.__id__]['fk_names']: while True: suffix = '_%06d' % random.randint(0, 999999) if fk_name + suffix not in cls._connections[connection.__id__]['fk_names']: break fk_name += suffix foreign_key.name = fk_name foreign_key.owner = table foreign_key.deleteRule = get_delete_action(fk_rows[0].grbit) foreign_key.updateRule = get_update_action(fk_rows[0].grbit) foreign_key.modelOnly = 0 # Find the referenced table: foreign_key.referencedTable = find_object_with_name(catalog.schemata[0].tables, fk_rows[0].szReferencedObject) if not foreign_key.referencedTable: grt.send_error('Migration: reverseEngineerTableFKs: Table "%s" not found in schemata "%s"' % (fk_rows[0].szReferencedObject, catalog.schemata[0].name) ) return 1 for fk_row in fk_rows: column = find_object_with_name(table.columns, fk_row.szColumn) if not column: grt.send_error('Migration: reverseEngineerTableFKs: Column "%s" not found in table "%s"' % (fk_row.szColumn, table.name) ) continue ref_column = find_object_with_name(foreign_key.referencedTable.columns, fk_row.szReferencedColumn) if not ref_column: grt.send_error('Migration: reverseEngineerTableFKs: Column "%s" not found in table "%s"' % (fk_row.szReferencedColumn, foreign_key.referencedTable.name) ) continue foreign_key.columns.append(column) foreign_key.referencedColumns.append(ref_column) # Find and delete indexes that are identical to FKs for index in reversed(table.indices): if table.primaryKey != index and len(index.columns) == len(foreign_key.columns): match = True for i, col in enumerate(index.columns): if foreign_key.columns[i] != col.referencedColumn: match = False break if match: grt.send_warning("Migration: reverseEngineerTable: Skipping duplicate index %s from table %s\n" % (col.name, table.name)) table.indices.remove(index) cls._connections[connection.__id__]['fk_names'][foreign_key.name] = table table.foreignKeys.append(foreign_key) if len(table.columns) == 0: grt.send_error('Migration: reverseEngineerTableFKs: Reverse engineer of table %s was attempted but the table has no columns attribute' % table.name) return 1 # Table must have columns reverse engineered before we can rev eng its indices catalog = table.owner.owner table.foreignKeys.remove_all() fk_dict = {} # Map the foreign key names to their respective columns: import pyodbc try: for row in cls.get_connection(connection).cursor().execute("SELECT * FROM MSysRelationships WHERE szObject = ?", (table.name,)): fk_dict.setdefault(row.szRelationship, []).append(row) except pyodbc.ProgrammingError, e: if e.args[0] == '42000': grt.send_error("\n\nMigration: Could not read from System Tables. You must grant SELECT access on all system tables for the database.") return 1 raise
def connect(cls, connection, password): '''Establishes a connection to the server and stores the connection object in the connections pool. It first looks for a connection with the given connection parameters in the connections pool to reuse existent connections. If such connection is found it queries the server to ensure that the connection is alive and reestablishes it if is dead. If no suitable connection is found in the connections pool, a new one is created and stored in the pool. Parameters: =========== connection: an object of the class db_mgmt_Connection storing the parameters for the connection. password: a string with the password to use for the connection (ignored for SQLite). ''' con = None try: con = cls.get_connection(connection) try: if not con.cursor().execute('SELECT 1'): raise Exception('connection error') except Exception as exc: grt.send_info( 'Connection to %s apparently lost, reconnecting...' % connection.hostIdentifier) raise NotConnectedError('Connection error') except NotConnectedError as exc: grt.send_info('Connecting to %s...' % connection.hostIdentifier) if connection.driver.driverLibraryName == 'sqlanydb': import sqlanydbwrapper as sqlanydb # Replace this to a direct sqlanydb import when it complies with PEP 249 connstr = replace_string_parameters( connection.driver.connectionStringTemplate, dict(connection.parameterValues)) import ast try: all_params_dict = ast.literal_eval(connstr) except Exception as exc: grt.send_error( 'The given connection string is not a valid python dict: %s' % connstr) raise # Remove unreplaced parameters: params = dict( (key, value) for key, value in list(all_params_dict.items()) if not (value.startswith('%') and value.endswith('%'))) params['password'] = password conn_params = dict(params) conn_params['password'] = '******' connection.parameterValues[ 'wbcopytables_connection_string'] = repr(conn_params) con = sqlanydb.connect(**params) else: con = db_driver.connect(connection, password) if not con: grt.send_error('Connection failed', str(exc)) raise grt.send_info('Connected') cls._connections[connection.__id__] = {'connection': con} if con: ver = cls.execute_query(connection, "SELECT @@version").fetchone()[0] grt.log_info("SQLAnywhere RE", "Connected to %s, %s\n" % (connection.name, ver)) ver_parts = server_version_str2tuple(ver) + (0, 0, 0, 0) version = grt.classes.GrtVersion() version.majorNumber, version.minorNumber, version.releaseNumber, version.buildNumber = ver_parts[: 4] cls._connections[connection.__id__]["version"] = version return 1
# Remove unreplaced parameters: params = dict( (key, value) for key, value in all_params_dict.iteritems() if not (value.startswith("%") and value.endswith("%")) ) params["password"] = password conn_params = dict(params) conn_params["password"] = "******" connection.parameterValues["wbcopytables_connection_string"] = repr(conn_params) con = sqlanydb.connect(**params) else: con = db_driver.connect(connection, password) if not con: grt.send_error("Connection failed", str(exc)) raise grt.send_info("Connected") cls._connections[connection.__id__] = {"connection": con} if con: ver = cls.execute_query(connection, "SELECT @@version").fetchone()[0] grt.log_info("SQLAnywhere RE", "Connected to %s, %s\n" % (connection.name, ver)) ver_parts = server_version_str2tuple(ver) + (0, 0, 0, 0) version = grt.classes.GrtVersion() version.majorNumber, version.minorNumber, version.releaseNumber, version.buildNumber = ver_parts[:4] cls._connections[connection.__id__]["version"] = version return 1 @classmethod @release_cursors
def reverseEngineerTableFKs(cls, connection, table): """Reverse engineers the foreign keys for the given table.""" def get_update_action(grbit): if grbit & 256: return "CASCADE" else: return "RESTRICT" def get_delete_action(grbit): if grbit & 4352: return "CASCADE" else: return "RESTRICT" def process_fk(catalog, table, fk_name, fk_rows): foreign_key = grt.classes.db_ForeignKey() if fk_name in cls._connections[connection.__id__]['fk_names']: while True: suffix = '_%06d' % random.randint(0, 999999) if fk_name + suffix not in cls._connections[ connection.__id__]['fk_names']: break fk_name += suffix foreign_key.name = fk_name foreign_key.owner = table foreign_key.deleteRule = get_delete_action(fk_rows[0].grbit) foreign_key.updateRule = get_update_action(fk_rows[0].grbit) foreign_key.modelOnly = 0 # Find the referenced table: foreign_key.referencedTable = find_object_with_name( catalog.schemata[0].tables, fk_rows[0].szReferencedObject) if not foreign_key.referencedTable: grt.send_error( 'Migration: reverseEngineerTableFKs: Table "%s" not found in schemata "%s"' % (fk_rows[0].szReferencedObject, catalog.schemata[0].name)) return 1 for fk_row in fk_rows: column = find_object_with_name(table.columns, fk_row.szColumn) if not column: grt.send_error( 'Migration: reverseEngineerTableFKs: Column "%s" not found in table "%s"' % (fk_row.szColumn, table.name)) continue ref_column = find_object_with_name( foreign_key.referencedTable.columns, fk_row.szReferencedColumn) if not ref_column: grt.send_error( 'Migration: reverseEngineerTableFKs: Column "%s" not found in table "%s"' % (fk_row.szReferencedColumn, foreign_key.referencedTable.name)) continue foreign_key.columns.append(column) foreign_key.referencedColumns.append(ref_column) # Find and delete indexes that are identical to FKs for index in reversed(table.indices): if table.primaryKey != index and len(index.columns) == len( foreign_key.columns): match = True for i, col in enumerate(index.columns): if foreign_key.columns[i] != col.referencedColumn: match = False break if match: grt.send_warning( "Migration: reverseEngineerTable: Skipping duplicate index %s from table %s\n" % (col.name, table.name)) table.indices.remove(index) cls._connections[connection.__id__]['fk_names'][ foreign_key.name] = table table.foreignKeys.append(foreign_key) if len(table.columns) == 0: grt.send_error( 'Migration: reverseEngineerTableFKs: Reverse engineer of table %s was attempted but the table has no columns attribute' % table.name) return 1 # Table must have columns reverse engineered before we can rev eng its indices catalog = table.owner.owner table.foreignKeys.remove_all() fk_dict = {} # Map the foreign key names to their respective columns: import pyodbc try: for row in cls.get_connection(connection).cursor().execute( "SELECT * FROM MSysRelationships WHERE szObject = ?", (table.name, )): fk_dict.setdefault(row.szRelationship, []).append(row) except pyodbc.ProgrammingError, e: if e.args[0] == '42000': grt.send_error( "\n\nMigration: Could not read from System Tables. You must grant SELECT access on all system tables for the database." ) return 1 raise