def __init__(self, username, password, ipadress, port, servicename, dict_config): self.connection = Oracle_Conn(username, password, ipadress, port, servicename) #configuration# self.dict_config = dict_config
def __init__(self,dbausername,dbapassword,ipadress,port,servicename,directory,\ tables,parallel,content,additional,skema,sysuser,syspassword): self.connection = Oracle_Conn(dbausername, dbapassword, ipadress, port, servicename) self.ipadress = ipadress self.port = port self.servicename = servicename self.directory = directory self.tables = tables self.parallel = parallel self.content = content self.additional = additional self.skema = skema self.sysuser = sysuser self.syspassword = syspassword
def __init__(self,dbausername,dbapassword,ipadress,port,servicename,directory,skema,sysuser,syspassword,\ table_exists_action,remap_table,expdp_job_name,expdp_skema,tables,\ expdp_tablespace,expdp_dumpfilepath,expdp_parallel): self.connection = Oracle_Conn(dbausername, dbapassword, ipadress, port, servicename) self.ipadress = ipadress self.port = port self.servicename = servicename self.directory = directory self.skema = skema self.sysuser = sysuser self.syspassword = syspassword self.table_exists_action = table_exists_action self.remap_table = remap_table self.expdp_job_name = expdp_job_name self.expdp_skema = expdp_skema self.tables = tables self.expdp_tablespace = expdp_tablespace self.expdp_dumpfilepath = expdp_dumpfilepath self.expdp_parallel = expdp_parallel
def getash(ipadress, port, servicename, dbausername, dbapassword, minutebefore, duration, instance_number): try: connection = Oracle_Conn(dbausername, dbapassword, ipadress, port, servicename) except: return "Error:数据库无法连接" else: pass #双节点的dbid是一致的 re_dbid = connection.execsql( "select dbid from v$database") #要么报错str要么就是list begintime = "SYSDATE-" + str(minutebefore) + "/1440" #SYSDATE-30/1440 endtime = "SYSDATE-" + str(int(minutebefore) - int(duration)) + "/1440" if isinstance(re_dbid, str): connection.close_commit() return "Error:获取数据库信息异常" else: dbid = re_dbid[0][0] ash_sql = ''' select output from table(dbms_workload_repository.ash_report_html( {0},{1},{2},{3})) '''.format(dbid, instance_number, begintime, endtime) re_ash = connection.execsql(ash_sql) if isinstance(re_ash, str): connection.close_commit() return "Error:获取ASH报告异常:" + re_ash else: ashrpt_name = 'ashrpt_'+ servicename + '_' +str(instance_number)+ '_minus' + \ str(minutebefore) + '_duration' + str(duration) + '.html' try: makehtml(ashrpt_name, re_ash) connection.close_commit() except: return "Error:ASH报告生成异常" else: return "Success:请下载后查阅"
def planchange(ipadress, port, servicename, dbausername, dbapassword, parsing_shema_name): try: connection = Oracle_Conn(dbausername, dbapassword, ipadress, port, servicename) except: return "Error:数据库无法连接" else: pass tmpsql=planchange_checksql.format(parsing_shema_name,\ planchange_dict['CHANGE_RATE'],\ planchange_dict['EXECUTIONS'],\ planchange_dict['AVG_TIME_MS']) print tmpsql re_planchange = connection.execsql(tmpsql) #要么报错str要么就是list if isinstance(re_planchange, str): connection.close_commit() return "Error:获取信息异常" else: connection.close_commit() #print re_planchange list_planchange = [] for item in re_planchange: dict_tmp = { 'inst_id': item[0], 'sql_id': item[1], 'sql_text': item[2], 'plan_hash_value': item[3], 'first_load_time': item[4], 'last_active_time': item[5], 'max_last_active_time': item[6], 'executions': item[7], 'avg_time_ms': item[8], 'best_avg_time_ms': item[9], 'change_rate': item[10] } list_planchange.append(dict_tmp) return list_planchange
def inefficient(ipadress, port, servicename, dbausername, dbapassword, parsing_shema_name): try: connection = Oracle_Conn(dbausername, dbapassword, ipadress, port, servicename) except: return "Error:数据库无法连接" else: pass tmpsql=inefficient_checksql.format(parsing_shema_name,\ inefficient_dict['EXECUTIONS'],inefficient_dict['EXE_MIN_SINCE'],\ inefficient_dict['AVG_TIME_MS'],inefficient_dict['AVG_GETS_MB'],inefficient_dict['AVG_READS_MB']) re_inefficient = connection.execsql(tmpsql) #要么报错str要么就是list #print re_inefficient if isinstance(re_inefficient, str): connection.close_commit() return "Error:获取信息异常" else: list_inefficient = [] for item in re_inefficient: #print type(item[2]) clob dict_tmp = { 'inst_id': item[0], 'sql_id': item[1], 'sql_text': item[2].read(), #转为str 注意read()在close_commit之前 'plan_hash_value': item[3], 'first_load_time': item[4], 'last_active_time': item[5], 'executions': item[6], 'avg_time_ms': item[7], 'avg_gets_mb': item[8], 'avg_reads_mb': item[9] } list_inefficient.append(dict_tmp) connection.close_commit() return list_inefficient
class check(): def __init__(self, username, password, ipadress, port, servicename, dict_config): self.connection = Oracle_Conn(username, password, ipadress, port, servicename) #configuration# self.dict_config = dict_config #tables def tables_info(self): """get tables info""" return ''' select distinct a.table_name,decode(a.partitioned,'YES',b.tablespace_name,a.tablespace_name),a.partitioned,a.degree from user_tables a left join user_tab_partitions b on a.table_name=b.table_name where a.table_name not like 'BIN$%' and a.table_name not like '%BAK%' and a.table_name not like '%TMP%' and a.table_name not like '%TEMP%' ''' def tables_notpartition(self): """big tables but not partitioned""" return ''' select table_name from user_tables where partitioned='NO' and table_name not like 'BIN$%' and table_name not like '%BAK%' and table_name not like '%TMP%' and table_name not like '%TEMP%' and num_rows>{0} '''.format( int(self.dict_config['max_row_eachtable'])) def tables_notinterval(self): """tables range partitioned interval status""" return ''' select table_name,partition_count from user_part_tables where table_name not like 'BIN$%' and table_name not like '%BAK%' and table_name not like '%TMP%' and table_name not like '%TEMP%' and interval is null and partitioning_type ='RANGE' ''' def tables_datatype(self): """tables columns datatype""" return ''' select c.table_name,c.column_name,c.data_type from user_tab_columns c,user_tables t where t.table_name=c.table_name and t.table_name not like 'BIN$%' and t.table_name not like '%BAK%' and t.table_name not like '%TMP%' and t.table_name not like '%TEMP%' and data_type not in ({0}) '''.format( self.dict_config['standard_datatype']) def tables_colcnt(self): """tables columns count""" return ''' select c.table_name,count(*) from user_tab_columns c,user_tables t where c.table_name=t.table_name and t.table_name not like 'BIN$%' and t.table_name not like '%BAK%' and t.table_name not like '%TMP%' and t.table_name not like '%TEMP%' group by c.table_name having count(*)> {0}'''.format( self.dict_config['max_colnum_eachtable']) def tables_standard_cols(self): """tables standard columns""" return '''select a.table_name,count(b.column_name) from user_tables a left join user_tab_columns b on a.table_name=b.table_name and b.column_name||','||b.data_type in ({0}) where a.table_name not like 'BIN$%' and a.table_name not like '%BAK%' and a.table_name not like '%TMP%' and a.table_name not like '%TEMP%' group by a.table_name having count(b.column_name)!=5'''.format( self.dict_config['standard_cols']) def tables_indexcnt(self): """tables indexes count""" return ''' select a.table_name,count(b.index_name) from user_tables a left join user_indexes b on a.table_name=b.table_name and b.index_type not in ('LOB') where a.table_name not like 'BIN$%' and a.table_name not like '%BAK%' and a.table_name not like '%TMP%' and a.table_name not like '%TEMP%' group by a.table_name ''' def tables_primarykey(self): """tables primary key""" return ''' select a.table_name,b.constraint_name,b.status,b.validated from user_tables a left join user_constraints b on a.table_name=b.table_name and b.constraint_type='P' where a.table_name not like 'BIN$%' and a.table_name not like '%BAK%' and a.table_name not like '%TMP%' and a.table_name not like '%TEMP%' ''' #indexes def indexes_info(self): """get indexes info """ return '''select table_name, index_name, uniqueness,tablespace_name,degree, 'NO' PARTITIONED from user_indexes where partitioned = 'NO' and index_type not in ('LOB') and table_name not like 'BIN$%' and table_name not like '%BAK%' and table_name not like '%TMP%' and table_name not like '%TEMP%' union all select distinct b.table_name,a.index_name,b.uniqueness,a.tablespace_name, b.degree,'YES' PARTITIONED from user_ind_partitions a, user_indexes b where a.index_name = b.index_name and b.partitioned = 'YES' and index_type not in ('LOB') and b.table_name not like 'BIN$%' and b.table_name not like '%BAK%' and b.table_name not like '%TMP%' and b.table_name not like '%TEMP%' ''' def indexes_pk(self): """get indexes pk""" return ''' select a.table_name,b.index_name from user_tables a ,user_constraints b where a.table_name=b.table_name and b.constraint_type='P' and index_name is not null and a.table_name not like 'BIN$%' and a.table_name not like '%BAK%' and a.table_name not like '%TMP%' and a.table_name not like '%TEMP%' ''' def indexes_unnormal(self): """indexes unnormal""" return '''select table_name,index_name,index_type from user_indexes where index_type not in ('NORMAL','LOB') and table_name not like 'BIN$%' and table_name not like '%BAK%' and table_name not like '%TMP%' and table_name not like '%TEMP%' ''' def indexes_colcnt(self): """indexes columns count""" return '''select table_name,index_name,count(*) from user_ind_columns where table_name not like 'BIN$%' and table_name not like '%BAK%' and table_name not like '%TMP%' and table_name not like '%TEMP%' group by table_name,index_name''' def indexes_locality(self): """indexes (partition table) locality status""" return '''select a.table_name, a.index_name, a.partitioned, nvl(b.locality,'GLOBAL') locality from (select i.table_name, i.index_name, i.partitioned from user_indexes i, user_tables t where i.table_name = t.table_name and t.partitioned = 'YES' and i.index_type not in ('LOB') and t.table_name not like 'BIN$%' and t.table_name not like '%BAK%' and t.table_name not like '%TMP%' and t.table_name not like '%TEMP%') a left join user_part_indexes b on b.index_name = a.index_name and b.table_name=a.table_name''' def indexes_redundant(self): """indexes redundant""" return '''select b.table_name, b.index_name, b.column_name, a.index_name, a.column_name from (select table_name, index_name, LISTAGG(column_name,',') within GROUP(order by column_position) as column_name from user_ind_columns where table_name not like 'BIN$%' and table_name not like '%BAK%' and table_name not like '%TMP%' and table_name not like '%TEMP%' group by table_name, index_name) a, (select table_name, index_name, LISTAGG(column_name,',') within GROUP(order by column_position) as column_name from user_ind_columns where table_name not like 'BIN$%' and table_name not like '%BAK%' and table_name not like '%TMP%' and table_name not like '%TEMP%' group by table_name, index_name) b where a.table_name = b.table_name and a.index_name != b.index_name and a.column_name like b.column_name||'%' ''' #sequences def sequences_info(self): """sequences""" return "select sequence_name,cycle_flag,order_flag,cache_size,round(last_number/max_value,2)*100,max_value from user_sequences" #commments def comments_table(self): """comments table""" return '''select table_name from user_tab_comments where comments is null and table_type='TABLE' and table_name not like 'BIN$%' and table_name not like '%BAK%' and table_name not like '%TMP%' and table_name not like '%TEMP%' ''' def comments_tablecol(self): """comments tablecolumn""" return '''select table_name, to_char(WMSYS.WM_CONCAT(column_name)) as column_name, to_char(WMSYS.WM_CONCAT(comment_column)) as comment_column from ( select a.table_name,a.column_name, (case when b.comments is null then '' else b.column_name end) COMMENT_COLUMN from user_tab_cols a ,user_col_comments b where a.table_name=b.table_name and a.column_name=b.column_name and a.table_name not like 'BIN$%' and a.table_name not like '%BAK%' and a.table_name not like '%TMP%' and a.table_name not like '%TEMP%') group by table_name ''' #并行度 def check_elements( self, list_elementcheck, tablespacecheck, standardcolcheck): #['tables','indexes','comments','sequences'] checklist = [] for item in dir(check): #dir inspect getattr都比较有用 #print item for element in list_elementcheck: if item.startswith(element): checklist.append(item) #print checklist dict_return = {} for item in checklist: funcname = "self." + item #print funcname sqlres = self.connection.execsql(eval(funcname)()) if isinstance(sqlres, list): dict_return[item] = copy.deepcopy(sqlres) else: pass self.connection.close_commit() #开始审核逻辑 #表 dict_table = {} if "tables" in list_elementcheck: data_tbs = self.dict_config['data_tbs'] #检查表名和表空间 tables_info for i in dict_return['tables_info']: dict_table[i[0]] = [] if not i[0].startswith(self.dict_config['tabname']): dict_table[i[0]].append("表名不规范,应以" + str(self.dict_config['tabname']) + "开头;") if tablespacecheck == 'tablespacecheck': if i[1] != data_tbs: dict_table[i[0]].append("使用了" + str(i[1]) + "表空间,应使用" + str(data_tbs) + "表空间;") if int(i[3]) != 1 or i[3] == 'default': dict_table[i[0]].append("该表有" + str(i[3]) + "个并行度,请关闭;") #检查大表没有分区 tables_notpartition for i in dict_return['tables_notpartition']: dict_table[ i[0]].append("非分区表" + str(i[0]) + "超过" + str(self.dict_config['max_row_eachtable']) + "行,请考虑进行分区处理;") #检查range分区表非间隔分区 tables_notinterval for i in dict_return['tables_notinterval']: dict_table[i[0]].append("分区表" + str(i[0]) + "分区数为" + str(i[1]) + ",不是间隔分区,请检查是否需要扩分区;") #检查表的列类型合规 tables_datatype for i in dict_return['tables_datatype']: dict_table[i[0]].append( str(i[1]) + "字段使用了不合规的类型" + str(i[2]) + ";") #表的总列数 tables_colcnt for i in dict_return['tables_colcnt']: dict_table[i[0]].append( "本表超过" + str(self.dict_config['max_colnum_eachtable']) + "个字段;") #是否有标准列 tables_standard_cols if standardcolcheck == 'standardcolcheck': for i in dict_return['tables_standard_cols']: dict_table[i[0]].append("本表没有创建全部标准列;") #检查表的索引数量 tables_indexcnt for i in dict_return['tables_indexcnt']: if i[1] > int(self.dict_config['max_indnum_eachtable']): dict_table[i[0]].append( "本表有" + str(i[1]) + "个索引,超过" + str(self.dict_config['max_indnum_eachtable']) + "个,请确认索引必要性;") elif i[1] == 0: dict_table[i[0]].append("表上没有任何索引,请关注;") #表的主键 tables_primarykey for i in dict_return['tables_primarykey']: if i[1] is None: dict_table[i[0]].append("本表没有主键约束,请添加;") elif i[1] is not None and i[2] == 'DISABLED': dict_table[i[0]].append("本表的主键约束" + str(i[1]) + "已DISABLED;") #此时index_name为空 elif i[1] is not None and i[2] == 'ENABLED' and i[ 3] == 'NOT VALIDATED': dict_table[i[0]].append("本表的主键约束" + str(i[1]) + "创建时为NOT VALIDATED;") else: pass #索引(主键索引) dict_ind = {} #用于记录全部索引 dict_pk = {} #用于记录主键索引 if "indexes" in list_elementcheck: ind_tbs = self.dict_config['ind_tbs'] #主键索引 indexes_pk for i in dict_return['indexes_pk']: dict_pk[i[0] + "." + i[1]] = [] #索引 indexes_info for i in dict_return['indexes_info']: tab_ind_name = i[0] + "." + i[1] dict_ind[tab_ind_name] = [] if tab_ind_name not in dict_pk.keys(): if tablespacecheck == 'tablespacecheck': if i[3] != ind_tbs: dict_ind[tab_ind_name].append("该索引使用了" + str(i[3]) + "表空间,应该使用" + str(ind_tbs) + "表空间;") if i[2] == 'NONUNIQUE' and not i[1].startswith( self.dict_config['indname']): dict_ind[tab_ind_name].append( "该索引名不规范,应以" + str(self.dict_config['indname']) + "开头;") if i[2] == 'UNIQUE' and not i[1].startswith( self.dict_config['uniqindname']): dict_ind[tab_ind_name].append( "该索引名不规范,唯一索引应以" + str(self.dict_config['uniqindname']) + "开头;") if int(i[4]) != 1 or i[4] == 'default': dict_ind[tab_ind_name].append("该索引上有" + str(i[4]) + "个并行度,请关闭;") else: if tablespacecheck == 'tablespacecheck': if i[3] != ind_tbs: dict_ind[tab_ind_name].append("该主键索引使用了" + str(i[3]) + "表空间,应该使用" + str(ind_tbs) + "表空间;") if not i[1].startswith(self.dict_config['pkname']): dict_ind[tab_ind_name].append( "该主键索引不规范,应以" + str(self.dict_config['pkname']) + "开头;") if int(i[4]) != 1 or i[4] == 'default': dict_ind[tab_ind_name].append("该主键索引上有" + str(i[4]) + "个并行度,请关闭;") if i[2] == 'NONUNIQUE': dict_ind[tab_ind_name].append("该主键索引非唯一索引") #非普通索引 indexes_unnormal for i in dict_return['indexes_unnormal']: tab_ind_name = i[0] + "." + i[1] dict_ind[tab_ind_name].append("该索引是" + str(i[2]) + "索引,不要使用非普通索引,请改造;") #索引字段过多 indexes_colcnt for i in dict_return['indexes_colcnt']: tab_ind_name = i[0] + "." + i[1] if tab_ind_name not in dict_pk.keys(): if i[2] > int(self.dict_config['ind_max_colnum']): dict_ind[tab_ind_name].append( "该索引联合列超过" + str(self.dict_config['ind_max_colnum']) + "个,请关注;") else: if i[2] > int(self.dict_config['pk_max_colnum']): dict_ind[tab_ind_name].append( "该主键索引联合列超过" + str(self.dict_config['pk_max_colnum']) + "个,请关注;") #分区表的索引不是本地的 indexes_locality for i in dict_return['indexes_locality']: tab_ind_name = i[0] + "." + i[1] if not (i[2] == 'YES' and i[3] == 'LOCAL'): dict_ind[tab_ind_name].append("在分区表上不要使用全局索引,请改造;") #冗余索引 indexes_redundant for i in dict_return['indexes_redundant']: tab_ind_name = i[0] + "." + i[1] dict_ind[tab_ind_name].append("该索引(" + str(i[2]) + ")是一个冗余索引,它可以被" + str(i[3]) + "索引(" + str(i[4]) + ")替代,建议删除该索引;") #序列 dict_sequence = {} if "sequences" in list_elementcheck: #序列信息 sequences_info for i in dict_return['sequences_info']: dict_sequence[i[0]] = [] if not i[0].startswith(self.dict_config['seqname']): dict_sequence[i[0]].append( "序列名不规范,应以" + str(self.dict_config['seqname']) + "开头;") if i[1] == 'Y': #本序列使用了CYCLE属性 if len(str(i[5])) < int(self.dict_config['seq_len']): dict_sequence[i[0]].append( "本循环序列位数少于" + str(self.dict_config['seq_len']) + ",请关注;") elif i[1] == 'N': if i[4] > int(self.dict_config['seq_usedrate']): dict_sequence[i[0]].append( "本序列使用率达" + str(self.dict_config['seq_usedrate']) + ",请关注;") if len(str(i[5])) < int(self.dict_config['seq_len']): dict_sequence[i[0]].append( "非循环序列位数少于" + str(self.dict_config['seq_len']) + ",请关注;") else: pass if i[2] == 'Y': dict_sequence[i[0]].append("序列使用了ORDER属性,不允许;") if i[3] < int(self.dict_config['seqcache']): dict_sequence[i[0]].append( "本序列CACHE值小于" + str(self.dict_config['seqcache']) + ",请关注;") #注释 dict_comment = {} if "comments" in list_elementcheck: #表注释 comments_table for i in dict_return['comments_table']: dict_comment[i[0]] = [] dict_comment[i[0]].append("本表无表级注释;") #列注释 comments_tablecol for i in dict_return['comments_tablecol']: if dict_comment.has_key(i[0]): pass else: dict_comment[i[0]] = [] if i[2] is None: dict_comment[i[0]].append("本表没有任何列注释;") elif i[2] is not None: list_column = i[1].split(",") list_comment_column = i[2].split(",") #print list_column #print list_comment_column ret = [ ii for ii in list_column if ii not in list_comment_column ] strret = ','.join(ret) if strret: dict_comment[i[0]].append("本表的" + strret + "列没有注释;") list_returninfo = [] for key in dict_table: if dict_table[key]: list_returninfo.append({ 'object': key, 'object_type': 'TABLE', 'problem': '\n'.join(dict_table[key]) }) for key in dict_ind: if dict_ind[key]: list_returninfo.append({ 'object': key, 'object_type': 'INDEX', 'problem': '\n'.join(dict_ind[key]) }) for key in dict_sequence: if dict_sequence[key]: list_returninfo.append({ 'object': key, 'object_type': 'SEQUENCE', 'problem': '\n'.join(dict_sequence[key]) }) for key in dict_comment: if dict_comment[key]: list_returninfo.append({ 'object': key, 'object_type': 'COMMENT', 'problem': '\n'.join(dict_comment[key]) }) return list_returninfo
class impdp_process(expdp_process): def __init__(self,dbausername,dbapassword,ipadress,port,servicename,directory,skema,sysuser,syspassword,\ table_exists_action,remap_table,expdp_job_name,expdp_skema,tables,\ expdp_tablespace,expdp_dumpfilepath,expdp_parallel): self.connection = Oracle_Conn(dbausername, dbapassword, ipadress, port, servicename) self.ipadress = ipadress self.port = port self.servicename = servicename self.directory = directory self.skema = skema self.sysuser = sysuser self.syspassword = syspassword self.table_exists_action = table_exists_action self.remap_table = remap_table self.expdp_job_name = expdp_job_name self.expdp_skema = expdp_skema self.tables = tables self.expdp_tablespace = expdp_tablespace self.expdp_dumpfilepath = expdp_dumpfilepath self.expdp_parallel = expdp_parallel #def get_dumpfilepath(self):继承 #def check_tables(self):继承 #def close(self):继承 #def get_oracle_env(self):继承 def get_tablespace(self): tablespace_sql = '''select tablespace_name from DBA_TS_QUOTAS where username='******' '''.format( self.skema.upper()) #tablespace_sql2='''select distinct tablespace_name from dba_segments where owner='{0}' '''.format(self.skema.upper()) dict_return = {}.fromkeys(['idx', 'tab']) try: retablespace = self.connection.execsql(tablespace_sql) except: return dict_return else: if isinstance(retablespace, str): return dict_return if retablespace == []: return dict_return else: for item in retablespace: if item[0].endswith("IDX"): dict_return['idx'] = item[0] if item[0].endswith("DATA"): dict_return['tab'] = item[0] return dict_return def get_impdpcommand(self): impdp_job_name = "IMP_" + self.expdp_job_name dict_tablespace = self.get_tablespace() dict_expdp_tablespace = eval(self.expdp_tablespace) tmp_list = [] if dict_tablespace['tab']: for item in dict_expdp_tablespace['tab'].split(","): tmp_list.append(item + ":" + dict_tablespace['tab']) if dict_tablespace['idx']: for item in dict_expdp_tablespace['idx'].split(","): tmp_list.append(item + ":" + dict_tablespace['idx']) remap_tablespace = "" remap_table = "" remap_schema = "" if tmp_list != []: remap_tablespace = "remap_tablespace=" + ','.join(tmp_list) if self.remap_table != "": remap_table = "remap_table=" + self.remap_table if self.expdp_skema != self.skema: remap_schema = "remap_schema=" + self.expdp_skema + ":" + self.skema remap_info = "" if remap_table: remap_info = remap_info + remap_table + " " if remap_schema: remap_info = remap_info + remap_schema + " " if remap_tablespace: remap_info = remap_info + remap_tablespace + " " impdpcommand='''impdp "'/ as sysdba'" directory={0} dumpfile={1}.dmp logfile={2}.log tables={3} cluster=N job_name={4} parallel={5} table_exists_action={6} {7}'''\ .format(self.directory,self.expdp_job_name+"%U",impdp_job_name,self.tables,impdp_job_name,self.expdp_parallel,self.table_exists_action,remap_info) return impdp_job_name, impdpcommand def start_impdp(self, impdp_job_name, impdpcommand): #impdp_job_name,impdpcommand=self.get_impdpcommand() pathname = self.get_dumpfilepath() oracle_env = self.get_oracle_env() if oracle_env == "": logger.info("导入任务job_name:%s发起失败,环境变量获取异常", impdp_job_name) return 'fail', "导入任务job_name:%s发起失败,环境变量获取异常" % impdp_job_name logger.info("准备发起导入任务job_name:%s", impdp_job_name) try: ssh = paramiko.SSHClient() ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) ssh.connect(self.ipadress, 22, self.sysuser, self.syspassword) logger.info(" 发起导入任务job_name:%s (%s:%s/%s)\n 导入命令%s", impdp_job_name, self.ipadress, self.port, self.servicename, impdpcommand) stdin, stdout, stderr = ssh.exec_command(oracle_env + '\n' + impdpcommand) list_logfile = stderr.readlines() #日志是stderr中输出的 logger.info(" 导入任务job_name:%s结束", impdp_job_name) except: logger.info(" 导入任务job_name:%s执行异常", impdp_job_name) return "fail", "导入任务job_name:%s执行异常" % impdp_job_name else: ssh.close() logger.info(" 导入任务job_name:%s日志:%s", impdp_job_name, ''.join(list_logfile)) for i in list_logfile: if i.startswith('''Job "SYS"."''' + impdp_job_name): if i.find("successfully completed") != -1: status = "suc" else: status = 'warning' return status, ''.join(list_logfile) def trans_dmpfile(self, exp_dumpname, exp_path, imp_path): try: #下载导出文件到本地 t = paramiko.Transport((self.source_ip, 22)) t.connect(username=self.source_sysuser, password=self.source_syspassword) sftp = paramiko.SFTPClient.from_transport(t) sftp.get(exp_path + "/" + exp_dumpname, self.local_path + "/" + exp_dumpname) t.close() #从本地上传导出文件到目标库 t2 = paramiko.Transport((self.target_ip, 22)) t2.connect(username=self.target_sysuser, password=self.target_syspassword) sftp2 = paramiko.SFTPClient.from_transport(t2) sftp2.put(self.local_path + "/" + exp_dumpname, imp_path + "/" + exp_dumpname) t2.close() except: log(self.local_logfile, 'dumpfile transfer fail') return "F" else: log(self.local_logfile,'dumpfile has been successly transfered\n[source] '+\ self.source_ip+":"+exp_path+"/"+exp_dumpname+"\n[target] "+self.target_ip+":"+imp_path+"/"+exp_dumpname) return "S"
class expdp_process(): def __init__(self,dbausername,dbapassword,ipadress,port,servicename,directory,\ tables,parallel,content,additional,skema,sysuser,syspassword): self.connection = Oracle_Conn(dbausername, dbapassword, ipadress, port, servicename) self.ipadress = ipadress self.port = port self.servicename = servicename self.directory = directory self.tables = tables self.parallel = parallel self.content = content self.additional = additional self.skema = skema self.sysuser = sysuser self.syspassword = syspassword def get_dumpfilepath(self): try: repath = self.connection.execsql( "select directory_path from dba_directories where directory_name='{0}'" .format(self.directory)) except: return "error" else: pass if isinstance(repath, str): return "error" else: if repath == []: return "error" else: pathname = repath[0][0] if pathname.endswith('/'): return pathname[0:-1] else: return pathname def get_expdpcommand(self): today_value = datetime.datetime.now() date = today_value.strftime('%Y%m%d%H%M') jobname = self.skema + date expdpcommand='''expdp "'/ as sysdba'" directory={0} dumpfile={1}.dmp logfile={2}.log tables={3} cluster=N compression=all reuse_dumpfiles=y job_name={4} parallel={5} content={6} {7}'''\ .format(self.directory,jobname+"%U",jobname,self.tables,jobname,self.parallel,self.content,self.additional) return jobname, expdpcommand #预估数据量 def get_segments_bytes(self): list_tmp = [] list_tables = self.tables.split(',') #owner.t1,owner.t2 for item in list_tables: list_tmp.append(item.split('.')[1]) str_tablename = "','".join(list_tmp) str_tablename = "'" + str_tablename + "'" #print str_tablename try: reseg = self.connection.execsql( "select sum(bytes/1024/1024) from dba_segments where owner='{0}' and segment_name in ({1})" .format(self.skema, str_tablename)) except: return "error" else: if isinstance(reseg, str): return "error" if reseg == []: return "error" else: return reseg[0][0] #检查表是否存在 def check_tables(self): list_tmp = [] list_tables = self.tables.split(',') #owner.t1,owner.t2 for item in list_tables: list_tmp.append(item.split('.')[1]) str_tablename = "','".join(list_tmp) str_tablename = "'" + str_tablename + "'" #print str_tablename try: retables = self.connection.execsql( "select table_name from dba_tables where owner='{0}' and table_name in ({1})" .format(self.skema, str_tablename)) except: return "error" else: if isinstance(retables, str): return "error" list_tabnames = [] for item in retables: list_tabnames.append(item[0]) #返回差集 list_return = list(set(list_tmp).difference(set(list_tabnames))) return ','.join(list_return) #表空间查询 def get_tablespace(self): list_tmp = [] list_tables = self.tables.split(',') #owner.t1,owner.t2 for item in list_tables: list_tmp.append(item.split('.')[1]) str_tablename = "','".join(list_tmp) str_tablename = "'" + str_tablename + "'" #print str_tablename tablespace_sql = ''' select distinct tablespace_name from dba_segments where owner = '{0}' and segment_name in ({1})'''.format(self.skema.upper(), str_tablename) tablespace_idx_sql = ''' select distinct tablespace_name from dba_segments where (owner, segment_name) in (select owner, index_name from dba_indexes where owner='{0}' and table_name in ({1}) ) '''.format(self.skema.upper(), str_tablename) dict_return = {} try: retablespace = self.connection.execsql(tablespace_sql) except: dict_return['tab'] = "" else: list_tmp = [] for item in retablespace: list_tmp.append(item[0]) dict_return['tab'] = ','.join(list_tmp) try: retablespace_idx = self.connection.execsql(tablespace_idx_sql) except: dict_return['idx'] = "" else: list_tmp = [] for item in retablespace_idx: list_tmp.append(item[0]) dict_return['idx'] = ','.join(list_tmp) return dict_return def close(self): self.connection.close_commit() def get_oracle_env(self): try: ssh = paramiko.SSHClient() ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) ssh.connect(self.ipadress, 22, self.sysuser, self.syspassword) stdin, stdout, stderr = ssh.exec_command( "cat .bash_profile|grep ORACLE_") oracle_env = stdout.read() #返回内容是stdout except: return "" else: ssh.close() return oracle_env def start_expdp(self, jobname, expdpcommand): pathname = self.get_dumpfilepath() #if pathname=="Error": # logger.info("导出任务job_name:%s发起失败,导出路径获取异常",jobname) # return 'fail',"导出任务job_name:%s发起失败,导出路径获取异常"%jobname oracle_env = self.get_oracle_env() if oracle_env == "": logger.info("导出任务job_name:%s发起失败,环境变量获取异常", jobname) return 'fail', "导出任务job_name:%s发起失败,环境变量获取异常" % jobname logger.info("准备发起导出任务job_name:%s", jobname) try: ssh = paramiko.SSHClient() ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) ssh.connect(self.ipadress, 22, self.sysuser, self.syspassword) logger.info(" 发起导出任务job_name:%s (%s:%s/%s)\n 导出命令%s", jobname, self.ipadress, self.port, self.servicename, expdpcommand) stdin, stdout, stderr = ssh.exec_command(oracle_env + '\n' + expdpcommand) list_logfile = stderr.readlines() #日志是stderr中输出的 logger.info(" 导出任务job_name:%s结束", jobname) stdin, stdout, stderr = ssh.exec_command('cd ' + pathname + ' \n' + 'ls |grep ' + jobname + '*dmp|wc -l') numdumpfile = stdout.read() #返回内容是stdout except: logger.info(" 导出任务job_name:%s执行异常", jobname) return "fail", "导出任务job_name:%s执行异常" % jobname else: ssh.close() logger.info(" 导出任务job_name:%s日志:%s", jobname, ''.join(list_logfile)) for i in list_logfile: if i.startswith('''Job "SYS"."''' + jobname): if i.find("successfully completed") != -1: status = "suc" else: status = 'warning' #if numdumpfile!=self.parallel: # status='warning' return status, ''.join(list_logfile)
class check(): def __init__(self, username, password, ipadress, port, servicename): self.connection = Oracle_Conn(username, password, ipadress, port, servicename) def common_part(self, sql, tuple_colname): sqlres = self.connection.execsql(sql) if isinstance(sqlres, list): sqlres.insert(0, tuple_colname) return sqlres else: tmp_list = [] tmp_list.append(tuple_colname) return tmp_list def eventmetric(self, name): sql = '''select N.name,round(E.TIME_WAITED * 10 / (CASE WHEN WAIT_COUNT = 0 THEN 1 ELSE WAIT_COUNT END), 2) WAIT_MS FROM V$EVENTMETRIC E ,V$EVENT_NAME N WHERE E.EVENT_ID = N.EVENT_ID and N.NAME= '{0}' '''.format(name) return self.common_part(sql, ("EVENTMETRIC_NAME", "WAIT_MS")) def sysmetric(self, name): sql = '''select METRIC_NAME,round(value,2) from v$sysmetric where METRIC_NAME ='{0}' and rownum <=1 order by INTSIZE_CSEC desc'''.format(name) return self.common_part(sql, ("SYSMETRIC_NAME", "VALUE")) def session(self): sql = '''select count(*) from v$session where type!='BACKGROUND' ''' return self.common_part(sql, ("SESSCOUNT", )) def session_active(self): sql = '''select count(*) from v$session where type!='BACKGROUND' and status='ACTIVE' ''' return self.common_part(sql, ("ACTIVE_SESSCOUNT", )) def size_datafiles(self): sql = '''select round(sum(bytes/1024/1024/1024),2) from dba_data_files''' return self.common_part(sql, ("DATAFILE_SIZE_G", )) def size_segments(self): sql = '''select round(sum(bytes/1024/1024/1024),2) from dba_segments ''' return self.common_part(sql, ("SEGMENT_SIZE_G", )) def top(self, name): minute = "5" if name == "sql": sql = ''' select * from (select SQL_ID , SQL_PLAN_HASH_VALUE plan_hash_value, sql_opname, sum(decode(session_state,'ON CPU',1,0)) "CPU", sum(decode(session_state,'WAITING',1,0)) - sum(decode(session_state,'WAITING', decode(wait_class, 'User I/O',1,0),0)) "OTHER_WAIT" , sum(decode(session_state,'WAITING', decode(wait_class, 'User I/O',1,0),0)) "IO_WAIT" , sum(decode(session_state,'ON CPU',1,1)) "TOTAL" from v$active_session_history where SQL_ID is not NULL and sample_time > sysdate - {0}/24/60 group by sql_id, SQL_PLAN_HASH_VALUE , sql_opname order by sum(decode(session_state,'ON CPU',1,1)) desc ) where rownum < 10'''.format(minute) return self.common_part(sql, ("SQL_ID", "PLAN_HASH_VALUE", "SQL_OPNAME", "CPU", "OTHER_WAIT", "IO_WAIT", "TOTAL")) elif name == "session": sql = ''' select * from (select ash.session_id, ash.session_serial#, ash.machine, ash.sql_id, b.USERNAME, sum(decode(ash.session_state,'ON CPU',1,0)) "CPU_WAIT", sum(decode(ash.session_state,'WAITING', decode(ash.wait_class, 'User I/O',1,0),0)) "IO_WAIT" , sum(decode(ash.session_state,'WAITING',1,0)) - sum(decode(ash.session_state,'WAITING', decode(ash.wait_class, 'User I/O',1,0),0)) "OTHER_WAIT" , sum(decode(ash.session_state,'ON CPU',1,1)) "TOTAL" from V$ACTIVE_SESSION_HISTORY ash,dba_users b where sample_time >sysdate -{0}/24/60 and ash.user_id=b.user_id group by ash.session_id,ash.session_serial#,ash.machine,ash.sql_id,b.username order by sum(decode(ash.session_state,'ON CPU',1,1)) desc) where rownum<10 '''.format( minute) return self.common_part( sql, ("SESSION_ID", "SERIAL#", "MACHINE", "SQL_ID", "USERNAME", "CPU", "IO", "OTHER", "TOTAL")) elif name == "event": pass else: pass def longquery(self): pass def longtrans(self): pass #redo #temp tablespace #tablespace #instance info uptime #undo #lock #DG #wait event #pga sga def parameter(self): sql = ''' select name,value,ISSES_MODIFIABLE,ISSYS_MODIFIABLE from v$parameter where name in ('db_name','db_files','db_writer_processes','db_block_size','db_file_multiblock_read_count', 'processes','sessions','open_cursors','session_cached_cursors','undo_management', 'undo_retention','job_queue_processes','fast_start_mttr_target','log_checkpoints_to_alert', 'log_checkpoint_timeout','log_checkpoint_interval','statistics_level','timed_statistics', 'log_archive_max_processes','control_file_record_keep_time','cursor_sharing', 'deferred_segment_creation','optimizer_features_enable','optimizer_mode', 'parallel_max_servers','remote_login_passwordfile','archive_lag_target', 'audit_trail','filesystemio_options','disk_asynch_io','memory_max_target', 'memory_target','workarea_size_policy','pga_aggregate_target','sga_max_size', 'sga_target','lock_sga','db_cache_size','db_cache_advice','shared_pool_size', 'large_pool_size','java_pool_size','streams_pool_size','log_buffer', 'db_block_checking','db_block_checksum','db_lost_write_protect','db_ultra_safe')''' sqlres = self.connection.execsql(sql) if isinstance(sqlres, list): sqlres.insert(0, ( "PARAMETER_NAME", "VALUE", "ISSES_MODIFIABLE", "ISSYS_MODIFIABLE", )) return sqlres else: return [( "PARAMETER_NAME", "VALUE", "ISSES_MODIFIABLE", "ISSYS_MODIFIABLE", ), ( sqlres, "", "", "", )] def close(self): self.connection.close_commit()
def __init__(self, username, password, ipadress, port, servicename): self.connection = Oracle_Conn(username, password, ipadress, port, servicename)
def getawr(ipadress, port, servicename, dbausername, dbapassword, date, beginhr, endhr, instance_number): try: connection = Oracle_Conn(dbausername, dbapassword, ipadress, port, servicename) except: return "Error:数据库无法连接" else: pass #双节点的dbid是一致的 re_dbid = connection.execsql( "select dbid from v$database") #要么报错str要么就是list snaptime_begin = str(date) + str(beginhr) #2017032522 snaptime_end = str(date) + str(endhr) snapid_sql = ''' select distinct snap_id from dba_hist_snapshot s where to_char(s.end_interval_time,'yyyymmddhh24') in ('{0}','{1}') order by 1 '''.format(snaptime_begin, snaptime_end) re_snapid = connection.execsql(snapid_sql) if isinstance(re_dbid, str) or isinstance(re_snapid, str): connection.close_commit() return "Error:获取数据库信息异常" elif len( re_snapid ) != 2: #必须出来2个snap_id 如果1个那么后面生产awr会报错 snap_time_begin和snap_time_end不能相等 connection.close_commit() return "Error:获取SNAPID信息异常" else: dbid = re_dbid[0][0] snapid_begin = re_snapid[0][0] snapid_end = re_snapid[1][0] awr_sql = ''' select output from table(dbms_workload_repository.awr_report_html({0},{1},{2},{3}, 0 )) '''.format(dbid, instance_number, snapid_begin, snapid_end) #0 noaddm 8 addm #print awr_sql re_awr = connection.execsql(awr_sql) if isinstance(re_awr, str): connection.close_commit() return "Error:获取AWR报告异常:" + re_awr else: awrrpt_name = 'awrrpt_'+ servicename + '_' +str(instance_number)+ '_' + \ str(snaptime_begin) + '_' + str(endhr) + '.html' #print awrrpt_name try: makehtml(awrrpt_name, re_awr) connection.close_commit() except: return "Error:AWR报告生成异常" else: return "Success:请下载后查阅"
class DmlAudit(): sql_species = { "(^INSERT\s*INTO.*SELECT.*)": "insert_select", "(^INSERT\s*INTO.*)": "insert", # \s匹配任何空白字符 "(^UPDATE.*)": "update", # . 匹配任意字符(除了换行符) "(^DELETE.*)": "delete" } # * 匹配0个或多个的表达式 def __init__(self, username, password, ip, port, servicename): self.connection = Oracle_Conn(username, password, ip, port, servicename) def audit(self, sqltext): #输入的sqltext可能有很多的换行 有多行注释和单行注释 这不方面确定是什么类型的sql #所以要处理一下 当然返回入库的还是原sqltext lexerSplitor = splitor.LexerSplitor() uncomment_sqltext = lexerSplitor.remove_sqlcomment(sqltext) if uncomment_sqltext == '': return None #纯注释语句 直接返回none #非sql_species中类型的语句不通过执行 for key in DmlAudit.sql_species: res = re.search( key, uncomment_sqltext, re.IGNORECASE | re.DOTALL ) #正则处理 忽略大小写|Make the '.' special character match any character at all if res: sqltype = DmlAudit.sql_species[key] #print sqltype if sqltype == 'insert': # 要判断是否是insert select if re.search("(^INSERT\s*INTO.*SELECT.*)", uncomment_sqltext, re.IGNORECASE | re.DOTALL): sqltype = 'insert_select' break else: break else: break else: sqltype = "other" #print sqltype #语法 if sqltype == 'other': grammar = 'invalid' gra_failreason = "本平台禁止执行非DML语句" else: gra = self.connection.execsql("explain plan for " + sqltext) #要么报错str要么就是none if gra: grammar = 'invalid' gra_failreason = gra else: grammar = 'valid' gra_failreason = '' #print grammar #print gra_failreason #执行计划详情 预估行数和执行时间 sqlplan = [] rowaffact = 0 exetime = '' if grammar == 'valid': re_sqlplan = self.connection.execsql( "select * from table(dbms_xplan.display)") #select * from table(dbms_xplan.display('','','OUTLINE')); for item in re_sqlplan: sqlplan.append(item[0]) if sqltype == 'insert': tmp_exeplan = re_sqlplan[4][0].split( '|') #第一行有效数据 insert一般没有plan hash value #insert如果有sequence 那么有plan hash value 这个时候tmp_exeplan长度一般为1 if len(tmp_exeplan) < 8: tmp_exeplan = re_sqlplan[5][0].split('|') #第一行有效数据 else: tmp_exeplan = re_sqlplan[5][0].split('|') #第一行有效数据 #预估执行时间 exetime = tmp_exeplan[7].strip() if exetime == '': # Bytes有时候不展示 那么7就是空 拿6 exetime = tmp_exeplan[6].strip() #预估影响行数 evaluate_rows = tmp_exeplan[4] rows_value = re.search('\d+', evaluate_rows.strip(), 0).group(0) rows_unit = '' if re.search('\D+', evaluate_rows.strip(), 0): rows_unit = re.search('\D+', evaluate_rows.strip(), 0).group(0) if rows_unit == 'K': evaluate_rows = int(rows_value) * 1000 elif rows_unit == 'M': evaluate_rows = int(rows_value) * 1000 * 1000 else: evaluate_rows = int(rows_value) #返回影响行数 如果预估的行数比较少 那么执行并返回精确的行数 if evaluate_rows >= 1000: rowaffact = evaluate_rows else: func_name = sqltype + "_change" sqlchange = getattr(DmlAudit, func_name)(uncomment_sqltext) #非注释语句进行改写 #print sqlchange try: rowaffact = int(self.connection.execsql(sqlchange)[0] [0]) #可能异常不返回数字的str except: rowaffact = evaluate_rows #print rowaffact #print exetime #print sqlplan 列表 #最终审核结果 超过10分钟,影响1000行 if grammar == "invalid": audit_status = "unqualified" elif grammar == "valid" and rowaffact <= 1000 and exetime <= '00:10:00': audit_status = "qualified" else: audit_status = "semi-qualified" #print audit_status #print sqltext #audit_status="semi-qualified"#全部是待评估 用于测试 re_dict = {} for i in [ 'sqltext', 'grammar', 'gra_failreason', 'sqlplan', 'rowaffact', 'audit_status', 'sqltype', 'exetime' ]: re_dict[i] = locals()[i] #print re_dict return re_dict def close_commit(self): self.connection.close_commit() def close_rollback(self): self.connection.close_rollback() @staticmethod #静态方法 不要self了 def update_change(sqltext, backupflag=0): sqltext = sqltext.replace( "\n", " ") #去除换行符号和tab 会造成问题 如果表数据中有\n可能会有问题 最多评估行数不准 sqltext = sqltext.replace("\t", " ") #print sqltext list_uppersqltext = sqltext.upper().split(" ") #print list_uppersqltext position_update = list_uppersqltext.index("UPDATE") position_set = list_uppersqltext.index("SET") try: position_where = list_uppersqltext.index("WHERE") except: position_where = None else: #有where关键字 一个或者多个 list_position_where = [] #记录位置 for i in range(len(list_uppersqltext)): if list_uppersqltext[i] == 'WHERE': list_position_where.append(i) #print list_position_where position_where = None for i in list_position_where: tmpstr = ' '.join(list_uppersqltext[0:i]) if tmpstr.count("(") == tmpstr.count(")"): position_where = i break else: pass list_sqltext = sqltext.split(" ") if position_where: str1 = " ".join(list_sqltext[position_update + 1:position_set]) str2 = " ".join(list_sqltext[position_where:]) if backupflag == 1: return "select * from " + str1 + " " + str2 elif backupflag == 0: return "select count(*) from " + str1 + " " + str2 else: pass else: str1 = " ".join(list_sqltext[position_update + 1:position_set]) if backupflag == 1: return "select * from " + str1 elif backupflag == 0: return "select count(*) from " + str1 else: pass @staticmethod def delete_change(sqltext, backupflag=0): sqltext = sqltext.replace("\n", " ") sqltext = sqltext.replace("\t", " ") list_uppersqltext = sqltext.upper().split(" ") list_sqltext = sqltext.split(" ") delete_position = list_uppersqltext.index("DELETE") #print list_uppersqltext[delete_position:] for item in list_uppersqltext[delete_position:]: if item == "FROM": position_from = list_uppersqltext.index("FROM") break elif item == 'DELETE' or item == '': continue else: position_from = None break #print position_from if position_from: str1 = " ".join(list_sqltext[position_from + 1:]) #print str1 if backupflag == 0: return "select count(*) from " + str1 elif backupflag == 1: return "select * from " + str1 else: pass else: position_delete = list_uppersqltext.index("DELETE") str1 = " ".join(list_sqltext[position_delete + 1:]) if backupflag == 0: return "select count(*) from " + str1 elif backupflag == 1: return "select * from " + str1 else: pass @staticmethod def insert_select_change(sqltext): sqltext = sqltext.replace("\n", " ") sqltext = sqltext.replace("\t", " ") list_uppersqltext = sqltext.upper().split(" ") list_sqltext = sqltext.split(" ") position_from = list_uppersqltext.index("FROM") str1 = " ".join(list_sqltext[position_from:]) return "select count(*) " + str1 @staticmethod def insert_change(sqltext): return "select 1 from dual" def execsql(self, sqltext): return self.connection.execsql(sqltext)