def mview_vali(src_args, tag_args, _REMOTE_COMMAND, _REMOTE_TAG_COMMAND): get_src_mview_sql = "select MVIEW_NAME,owner from DBA_MVIEWS@ops_dblink;" get_tag_mview_sql = "select MVIEW_NAME,owner from DBA_MVIEWS;" src_mview_res = ssh_input(tag_args, _REMOTE_TAG_COMMAND % get_src_mview_sql) tag_mview_res = ssh_input(tag_args, _REMOTE_TAG_COMMAND % get_tag_mview_sql) src_obj_list = get_ssh_list(src_mview_res) tag_obj_list = get_ssh_list(tag_mview_res) title = ['物化视图名', '物化视图用户名'] if src_obj_list != []: print(f"\nINFO: 源端物化视图具体信息请查看本地 ops_check_detail.txt文件:") src_table_res = res_table(src_obj_list, title) with open('ops_check_detail.txt', 'a+', encoding='utf-8') as file: file.write(f"\nINFO: 源端物化视图具体信息:\n{src_table_res}\n") ## 获取缺失对象的ddl语句 for obj in src_obj_list: obj_name, obj_owner = obj ddl_res = get_ddl_info(src_args, obj_name, obj_owner, 'MATERIALIZED_VIEW', _REMOTE_COMMAND) ssh_input(tag_args, f'''echo '{ddl_res}' >> /tmp/ops_check.sql''') print(f"\nINFO: 源端物化视图的ddl语句请在目标端查看 /tmp/ops_check.sql 文件 ") else: print(f"\nINFO: 源端不存在物化视图") if tag_obj_list != []: print(f"\nINFO: 目标端物化视图具体信息请查看本地 ops_check_detail.txt文件:") tag_table_res = res_table(tag_obj_list, title) with open('ops_check_detail.txt', 'a+', encoding='utf-8') as file: file.write(f"\nINFO: 目标端物化视图具体信息:\n{src_table_res}\n") else: print(f"\nINFO: 源端不存在物化视图") return src_obj_list
def err_log(mysql_args,os_args): log_dir = get_all(mysql_args,"show global variables where variable_name in ('datadir','log_error');") if './' in log_dir[1][1]: errlog = (log_dir[0][1]+log_dir[1][1]).replace('./','/').replace('//','/') else: errlog = log_dir[1][1] errlog_size = round(int(ssh_input_noprint(os_args,f"du -sk {errlog}|awk '{{print $1}}'")[0].replace('\n',''))/1024,3) if errlog_size > 2048: advice = "Need clean error log" else: advice = "Do not need clean error log" print("\nINFO:\n目前MySQL错误日志大小如下:") title = ['Error_log_path','Size (MB)','Advice'] before_err_res = [[errlog,errlog_size,advice]] before_errlog_table = res_table(before_err_res,title) print(before_errlog_table) if advice == "Need clean error log": print("\nINFO:\n清理MySQL错误日志:") clean_log_file(os_args,errlog) run_noprint(mysql_args,"flush logs") print("\nINFO:\nMySQL错误日志清理完毕.") err_log(mysql_args,os_args) return before_err_res
def slow_vali(src_args, tag_args, _REMOTE_COMMAND, _REMOTE_TAG_COMMAND): owner = input("请输入veridata比对慢的表的用户名: ").upper() slow_tables_tmp = input("请输入veridata比对慢的表表名,多表请用逗号隔开: ").upper() slow_tables = str(tuple(slow_tables_tmp.split(','))).replace(',)', ')') get_slow_sql = ''' set serveroutput on; declare v_tablename varchar2(60); v_count int; v_sql varchar2(2000); cursor cur_tablename is select table_name from dba_tables where table_name in %s and owner = '%s'; begin open cur_tablename; loop fetch cur_tablename into v_tablename; exit when cur_tablename%%notfound; v_sql := 'select count(*) from %s.' || v_tablename || ''; execute immediate v_sql into v_count; dbms_output.put_line('%s.'||v_tablename || '.' || v_count); end loop; close cur_tablename; end; / ''' % (slow_tables, owner, owner, owner) src_slow_res = ssh_input(src_args, _REMOTE_COMMAND % get_slow_sql) tag_slow_res = ssh_input(tag_args, _REMOTE_TAG_COMMAND % get_slow_sql) diff_list = [] for i, v in enumerate(src_slow_res): src_cnt = v.split('.')[-1] tag_cnt = tag_slow_res[i].split('.')[-1] if src_cnt != tag_cnt: diff_tmp = v.split('.').append(tag_cnt) diff_list.append() else: pass print(diff_list) if diff_list == []: print(f"\nINFO: 两端{owner}的表 {slow_tables_tmp} 表行数一致.\n") else: title = ['用户名', '表名', '源端行数', '目标端行数'] slow_table_res = res_table(diff_list, title) print_res = f"\nINFO: 两端{owner}的表 {slow_tables_tmp} 表行数不一致:\n{slow_table_res}" print(print_res) print(f"\nINFO: 相关信息已保存到本地 ops_check_detail_slow.txt文件:") with open('ops_check_detail_slow.txt', 'a+', encoding='utf-8') as file: file.write(print_res) return diff_list
def schema_info(mysql_args): get_schema_sql = "select table_schema, truncate(sum(data_length)/1024/1024,2) as data_size_MB,\ truncate(sum(index_length)/1024/1024,2) as index_size_MB,\ (truncate(sum(data_length)/1024/1024,2))+((truncate(sum(index_length)/1024/1024,2))) as sum_size_MB\ from information_schema.tables\ group by table_schema\ order by sum_size_MB desc" info_res = get_all(mysql_args,get_schema_sql) print ("\nINFO:\nMySQL数据库规模统计:") title = ['DATABASE_NAME','DATA_SIZE (MB)','INDEX_SIZE (MB)','TOTAL_SIZE (MB)'] schema_table = res_table(info_res,title) print(schema_table)
def bin_log(mysql_args,os_args): logbin = get_all(mysql_args,"show global variables where Variable_name in ('log_bin')")[0][1] db_version = get_version(mysql_args) if logbin == 'ON': binlog = get_all(mysql_args,"show global variables where \ variable_name in ('log_bin','LOG_BIN_BASENAME','expire_logs_days','binlog_format','sync_binlog',\ 'log_bin_index','binlog_cache_size','max_binlog_cache_size','max_binlog_size','Binlog_cache_disk_use','Binlog_cache_use')") if db_version != '5.5': bin_dir = get_all(mysql_args,"show global variables where\ variable_name in ('log_bin_basename')") cmd = "ls -lhs --time-style '+%%Y/%%m/%%d %%H:%%M:%%S' %s*|awk '{print $9,$7,$8,$1}'"%bin_dir[0][1] bin_size = ssh_input_noprint(os_args,cmd) bin_set = [] for space in bin_size: space = [x for x in space.split(' ') if x != ''] if space!=[]: bin_set.append(space) else: bin_set = [('None', 'None')] bin_size = [('None', 'None')] else: binlog = [('log_bin', 'OFF')] bin_dir = [('None', 'None')] bin_size = [('None', 'None')] bin_set = [('None', 'None')] print("\nINFO:\nMySQL Binlog日志参数如下:") binlog_title = ["Variable_name","Value"] binlog_table = res_table(binlog,binlog_title) print(binlog_table) if binlog[2][1] == '0' and logbin =='ON': print("\n小结:\nLOG_BIN参数值为ON,且expire_logs_days参数值为0,建议设置保留时间.") if bin_set!=[('None', 'None')]: print("\nINFO:\nMySQL Binlog空间使用情况如下") binlog_dir_title = ["Binlog_path","Use_date","Use_time","Size"] binlog_dir_table = res_table(bin_set,binlog_dir_title) print(binlog_dir_table) return binlog,bin_set,bin_size
def os_dir_use(os_args): fs_set = ssh_input_noprint(os_args,"df -hP") space_param = [] for space in fs_set[1:]: space_tmp = space.split(' ') space = [x.replace('\n','') for x in space_tmp if x != ''][0:6] if space!=[]: space_param.append(space) print("\nINFO:\n操作系统目录使用情况如下:\n") dir_title = ["Filesystem","Size" ,"Used" ,"Avail" ,"Use%" ,"Mounted on"] dir_table = res_table(space_param,dir_title) print(dir_table) return space_param
def get_tb_info(src_args, _REMOTE_COMMAND, reinital_tables): get_info_sql_init = '''select owner, segment_name, sum(bytes) / 1024 / 1024 / 1024 as BYTS_GB from dba_segments where segment_name ='%s' and owner = '%s' AND segment_type = 'TABLE' group by owner,segment_name;''' re_tbs = [tb.split('.') for tb in reinital_tables.upper().split(',')] re_tbs_list = [] for re_tb_info in re_tbs: get_info_sql = get_info_sql_init % (re_tb_info[1], re_tb_info[0]) res = ssh_input(src_args, _REMOTE_COMMAND % get_info_sql)[0].replace('\n', '') res_list = del_null_list(res.split(' ')) re_tbs_list.append(res_list) tbs_table = res_table(re_tbs_list, ['用户名', '表名', '表大小(G)']) print(f"\nINFO:重新初始化表信息如下:\n{tbs_table}") owners = str(tuple(set([b[0] for b in re_tbs]))).replace(',)', ')') tbs = str(tuple(set([b[1] for b in re_tbs]))).replace(',)', ')') get_unsupport_tbs_sql = ''' select owner, table_name, column_name, data_type from dba_tab_columns where table_name in %s and owner in %s and data_type in ('BLOB', 'CLOB', 'LONG', 'LONG RAW');''' % (tbs, owners) unsup_tbs = ssh_input(src_args, _REMOTE_COMMAND % get_unsupport_tbs_sql) if unsup_tbs != []: unsup_tbs_tmp = [c.replace('\n', '').split(' ') for c in unsup_tbs] unsup_tbs_list = [del_null_list(d) for d in unsup_tbs_tmp] uns_table = res_table(unsup_tbs_list, ['用户名', '表名', '字段名', '字段类型']) print(f"\nINFO:以下为包含不支持类型字段的表信息{uns_table}") return unsup_tbs_list else: return "none support tb"
def get_diff_obj_info(src_args, tag_args, get_obj_info_sql, obj_type, _REMOTE_COMMAND, _REMOTE_TAG_COMMAND): if obj_type == 'LOB': title = [f'{obj_type} 名', f'{obj_type} 用户名'] elif obj_type == 'INVAILD OBJECT': title = [f'{obj_type} 名', f'{obj_type} 用户名', '对象类型', '状态'] else: title = [f'{obj_type} 名', f'{obj_type} 用户名', '状态'] if get_obj_info_sql == '': get_obj_info_sql = '''select distinct s.name, s.owner, b.status from dba_source@ops_dblink s, dba_objects@ops_dblink b where s.name = b.object_name and b.object_type = '%s' and s.owner in (select owner from ops_count where OBJECT_TYPE='%s') minus select distinct s.name, s.owner, b.status from dba_source s, dba_objects b where s.name = b.object_name and b.object_type = '%s' and s.owner in (select owner from ops_count where OBJECT_TYPE='%s'); ''' % (obj_type, obj_type, obj_type, obj_type) else: pass get_obj_res = ssh_input(tag_args, _REMOTE_TAG_COMMAND % get_obj_info_sql) obj_list = get_ssh_list(get_obj_res) if obj_list != []: print(f"\nINFO: 目标库缺失的{obj_type} 具体信息请查看本地 ops_check_detail.txt文件:") table_res = res_table(obj_list, title) with open('ops_check_detail.txt', 'a+', encoding='utf-8') as file: file.write(f"\nINFO: 目标库缺失的{obj_type} 具体信息:\n{table_res}\n") ## 获取缺失对象的ddl语句 if obj_type not in ['TABLE', 'LOB', 'DATABASE LINK', 'INVAILD OBJECT']: for obj in obj_list: obj_name, obj_owner, _ = obj ddl_res = get_ddl_info(src_args, obj_name, obj_owner, obj_type, _REMOTE_COMMAND) ssh_input(tag_args, f'''echo "{ddl_res}" >> /tmp/ops_check.sql''') print( f"\nINFO: 目标库缺失{obj_type}的ddl语句请在目标端查看 /tmp/ops_check.sql 文件 ") else: print(f"\nINFO: 源与目标端{obj_type}类型对象信息一致") return obj_list
def mysql_dir_use(mysql_args,os_args): dirs = get_all(mysql_args,"show global variables where variable_name in ('basedir','datadir')") get_dirs = [] for dir in dirs: dir = list(dir) size = '' if dir[1]!='': size = ssh_input_noprint(os_args,"du -sh %s|awk '{print $1}'"%dir[1])[0].replace('\n','') else: size = 0 dir.append(size) get_dirs.append(dir) print("\nINFO:\nMySQL软件及数据目录使用情况如下:\n") dir_title = ["Directory_name","Directory_path","Size"] dir_table = res_table(get_dirs,dir_title) print(dir_table) return get_dirs
def get_inconsist_obj(os_args, sync_users, _REMOTE_TAG_COMMAND): sync_users_str = str(tuple(sync_users)).upper().replace(',)', ')') create_count_tb_sql = '''drop table ops_count; create table ops_count(OWNER,OBJECT_TYPE,total) as SELECT D.OWNER, D.OBJECT_TYPE, COUNT(*) FROM dba_objects@ops_dblink d WHERE d.OWNER in %s and d.object_type not in ('TRIGGER', 'JOB', 'VIEW', 'LOB','TABLE') and object_name not like 'MLOG%%' AND NOT EXISTS (SELECT * FROM DBA_RECYCLEBIN@ops_dblink B WHERE B.object_name = D.OBJECT_NAME AND D.OWNER = B.owner) GROUP BY D.OWNER, D.OBJECT_TYPE minus SELECT D.OWNER, D.OBJECT_TYPE, COUNT(*) FROM dba_objects d WHERE d.OWNER in %s and d.object_type not in ('TRIGGER', 'JOB', 'VIEW', 'LOB','TABLE') and object_name not like 'MLOG%%' AND NOT EXISTS (SELECT * FROM DBA_RECYCLEBIN B WHERE B.object_name = D.OBJECT_NAME AND D.OWNER = B.owner) GROUP BY D.OWNER, D.OBJECT_TYPE;''' % (sync_users_str, sync_users_str) select_count_sql = 'select * from ops_count;' ssh_input(os_args, _REMOTE_TAG_COMMAND % create_count_tb_sql) select_count_res = ssh_input(os_args, _REMOTE_TAG_COMMAND % select_count_sql) if select_count_res != []: print("\nINFO:两端除了trigger、job、view、lob、table以外其他对象数量不一致的信息如下:\n") select_count_list = get_ssh_list(select_count_res) title = ['用户名', '对象类型', '不一致数目'] table_res = res_table(select_count_list, title) print(table_res) else: print("\nINFO:两端除了trigger、job、view、lob、table以外其他对象信息数量一致") return select_count_res
def get_table_info(db_args, start_time, end_time, table_name, table_owner, mc_logmnr_tb_name, mode): print( f"\nINFO:获取表{table_owner}.{table_name} 在{start_time} 至 {end_time} 的历史信息如下:" ) GET_LOGMNR_RES_SQL = SQL_LOG_MNR['GET_LOGMNR_RES_SQL'] % ( mc_logmnr_tb_name, table_name, table_owner) res_get, title = ora_all(db_args, GET_LOGMNR_RES_SQL, mode) if res_get != []: info_table = res_table(res_get, title) print(info_table) else: print( f"\nINFO:表{table_owner}.{table_name} 在{start_time} 至 {end_time} 没有查询到历史数据信息,请确认时间段!" ) print( f"\nINFO:如果需要删除本次挖掘信息,请手工执行<drop table {db_args[1]}.{mc_logmnr_tb_name};>" )
def undo_log(mysql_args,os_args): #log_dir = get_all(mysql_args,"show global variables where variable_name in ('datadir','innodb_undo_directory','innodb_max_undo_log_size','innodb_undo_tablespaces')") # if './' in log_dir[2][1]: # undolog_dir = (log_dir[0][1]+log_dir[2][1]).replace('./','/').replace('//','/') # else: # undolog_dir = log_dir[2][1] # size_cmd = "ls -lhs --time-style '+%%Y/%%m/%%d %%H:%%M:%%S' %s/*|awk '{{print $9,$7,$8,$1}}'"%(undolog_dir) # undolog_size = ssh_input_noprint(os_args,size_cmd) # undo_set = [] # for space in undolog_size: # space = [x for x in space.split(' ') if x != ''] # if space!=[]: # undo_set.append(space) # print("\nINFO:\nMySQL Undo log空间使用情况如下") # undolog_dir_title = ["Undolog_path","Use_date","Use_time","Size"] # undolog_dir_table = res_table(undo_set,undolog_dir_title) # print(undolog_dir_table) undo_info = get_all(mysql_args,"show global variables like '%undo%'") title = ['Variable_name','Value'] undo_table = res_table(undo_info,title) print("\nINFO:\nMySQL Undo 日志参数如下") print(undo_table) return undo_info
def default_info(sync_obj, db_args, mode): sync_obj = sync_obj.upper() if sync_obj == "FULL_EXPDP": select_deflt_info_sql = '''select distinct b.username,a.tablespace_name,b.TEMPORARY_TABLESPACE,b.profile from dba_tablespaces a, dba_users b,dba_segments c \ where a.tablespace_name not in ('SYSTEM','SYSAUX') and a.contents = 'PERMANENT' \ and a.tablespace_name=c.tablespace_name and b.username=c.owner \ group by b.username,a.tablespace_name,b.TEMPORARY_TABLESPACE,b.profile ''' else: schemas = "','".join( list(set([i.split('.')[0] for i in sync_obj.split(',')]))) select_deflt_info_sql = f'''select distinct b.username,a.tablespace_name,b.TEMPORARY_TABLESPACE,b.profile from dba_tablespaces a, dba_users b,dba_segments c \ where a.tablespace_name not in ('SYSTEM','SYSAUX') and a.contents = 'PERMANENT' \ and a.tablespace_name=c.tablespace_name and b.username=c.owner and username in ('{schemas}')\ group by b.username,a.tablespace_name,b.TEMPORARY_TABLESPACE,b.profile ''' deflt_info, title = ora_all(db_args, select_deflt_info_sql, mode) if deflt_info != []: info_table = res_table(deflt_info, title) print("\nINFO:导出对象用户的策略、默认表空间及默认临时表空间信息如下:") print(info_table) else: pass return deflt_info
def slow_log(mysql_args,os_args): slowlog = get_all(mysql_args,"show global variables where variable_name in ('slow_query_log','slow_query_log_file','log_output')") data_dir = get_all(mysql_args,"show global variables like 'datadir'") log_param,log_on,log_path = slowlog print("\nINFO:\nMySQL慢日志参数如下:") title = ['Variable_name','Value'] if log_on[1] == 'OFF': slowlog_table = res_table(slowlog,title) print (slowlog_table) print("\nINFO:\n该MySQL数据库未开启慢日志,无需清理.") else: if 'NONE' in log_param[1]: slowlog_table = res_table(slowlog,title) print (slowlog_table) print("\nINFO:\n该MySQL数据库输出存在NONE,无需清理.") elif log_param[1] == 'FILE': log_size = round(int(ssh_input_noprint(os_args,f"du -sk {log_path[1]}|awk '{{print $1}}'")[0].replace('\n',''))/1024,3) slowlog.append(('Size of file(MB)',log_size)) slowlog_table = res_table(slowlog,title) print (slowlog_table) if log_size > 1024: print("\nINFO:\n清理MySQL慢日志:") clean_log_file(os_args,log_path[1]) run_noprint(mysql_args,"flush slow logs") print("\nINFO:\nMySQL慢日志清理完毕.") slow_log(mysql_args,os_args) elif log_param[1] == 'TABLE': table_file_path = f"{data_dir[0][1]}/mysql/slow_log.CSV".replace('//','/') table_size = round(int(ssh_input_noprint(os_args,f"ls -l {table_file_path}|awk '{{print $5}}'")[0].replace('\n',''))/1024/1024,3) slowlog.append(('Size of table(MB)',table_size)) slowlog_table = res_table(slowlog,title) print (slowlog_table) if table_size > 1024: print("\nINFO:\n清理MySQL慢日志表:") more_sql(mysql_args,['drop table IF EXISTS mysql.mc_slow_log_old','set global slow_query_log=off','rename table mysql.slow_log to mysql.mc_slow_log_old', 'create table mysql.slow_log like mysql.mc_slow_log_old','set global slow_query_log=on','drop table IF EXISTS mysql.mc_slow_log_old']) print("\nINFO:\nMySQL慢日志表清理完毕.") slow_log(mysql_args,os_args) elif 'FILE' in log_param[1] and 'TABLE' in log_param[1]: log_size = round(int(ssh_input_noprint(os_args,f"du -sk {log_path[1]}|awk '{{print $1}}'")[0].replace('\n',''))/1024,3) slowlog.append(('Size of file(MB)',log_size)) table_file_path = f"{data_dir[0][1]}/mysql/slow_log.CSV".replace('//','/') table_size = round(int(ssh_input_noprint(os_args,f"ls -l {table_file_path}|awk '{{print $5}}'")[0].replace('\n',''))/1024/1024,3) slowlog.append(('Size of table(MB)',table_size)) slowlog_table = res_table(slowlog,title) print (slowlog_table) if log_size > 1024: print("\nINFO:\n清理MySQL慢日志:") clean_log_file(os_args,log_path[1]) run_noprint(mysql_args,"flush slow logs") print("\nINFO:\nMySQL慢日志清理完毕.") if table_size > 1024: print("\nINFO:\n清理MySQL慢日志表:") more_sql(mysql_args,['drop table IF EXISTS mysql.mc_slow_log_old','set global slow_query_log=off','rename table mysql.slow_log to mysql.mc_slow_log_old', 'create table mysql.slow_log like mysql.mc_slow_log_old','set global slow_query_log=on','drop table IF EXISTS mysql.mc_slow_log_old']) print("\nINFO:\nMySQL慢日志表清理完毕.") if table_size > 1024 or log_size > 1024: print ("\nINFO:\nMySQL慢日志参数如下:") slowlog_aft = get_all(mysql_args,"show global variables where variable_name in ('slow_query_log','slow_query_log_file','log_output')") log_size_aft = round(int(ssh_input_noprint(os_args,f"du -sk {log_path[1]}|awk '{{print $1}}'")[0].replace('\n',''))/1024,3) slowlog_aft.append(('Size of file(MB)',log_size_aft)) table_size_aft = round(int(ssh_input_noprint(os_args,f"ls -l {table_file_path}|awk '{{print $5}}'")[0].replace('\n',''))/1024/1024,3) slowlog_aft.append(('Size of table(MB)',table_size_aft)) slowlog_table_aft = res_table(slowlog_aft,title) print (slowlog_table_aft) return "clean slow log ok"
def index_vali(src_args, tag_args, _REMOTE_COMMAND, _REMOTE_TAG_COMMAND): get_diff_index_count_sql = ''' select TABLE_OWNER, TABLE_NAME, COUNT(*) from DBA_INDEXES@ops_dblink where owner in (select owner from ops_count where OBJECT_TYPE='INDEX') group by table_owner, table_name minus select TABLE_OWNER, TABLE_NAME, COUNT(*) from DBA_INDEXES where owner in (select owner from ops_count where OBJECT_TYPE='INDEX') group by table_owner, table_name;''' get_diff_index_info_sql = ''' set lin 10000 select index_owner, index_name, table_owner, table_name, listagg(to_char(column_name), ',') within group(order by index_name) as full_column from DBA_IND_COLUMNS@ops_dblink where table_owner = '%s' and table_name = '%s' group by index_owner, index_name, table_owner, table_name minus select index_owner, index_name, table_owner, table_name, listagg(to_char(column_name), ',') within group(order by index_name) as full_column from DBA_IND_COLUMNS where table_owner = '%s' and table_name = '%s' group by index_owner, index_name, table_owner, table_name;''' get_ind_count_res = ssh_input( tag_args, _REMOTE_TAG_COMMAND % get_diff_index_count_sql) ind_list = get_ssh_list(get_ind_count_res) ind_res_list = [] ind_ddl_list = [] for obj in ind_list: get_diff_index_info_sql_tmp = get_diff_index_info_sql % ( obj[0], obj[1], obj[0], obj[1]) get_ind_res = ssh_input( tag_args, _REMOTE_TAG_COMMAND % get_diff_index_info_sql_tmp) ind_info_list = get_ssh_list(get_ind_res)[0] ind_res_list.append(ind_info_list) ## 获取缺失对象的ddl语句 ind_owner, ind_name, _, _, _ = ind_info_list ddl_res = get_ddl_info(src_args, ind_name, ind_owner, 'INDEX', _REMOTE_COMMAND) ind_ddl_list.append(ddl_res) print(f"\nINFO: 目标库缺失的索引具体信息请查看本地 ops_check_detail.txt文件:") title = ['索引用户名', '索引名', '表用户名', '表名', '列名'] table_res = res_table(ind_res_list, title) with open('ops_check_detail.txt', 'a+', encoding='utf-8') as file: file.write(f"\nINFO: 目标库缺失的INDEX 具体信息:\n{table_res}\n") ssh_input(tag_args, f'''echo "{''.join(ind_ddl_list)}" >> /tmp/ops_check.sql''') print(f"\nINFO: 目标库缺失 INDEX 的ddl语句请在目标端查看 /tmp/ops_check.sql 文件 ") return ind_res_list