def execute_workflow(self, workflow, close_conn=True): """执行上线单,返回Review set""" sql = workflow.sqlworkflowcontent.sql_content execute_result = ReviewSet(full_sql=sql) # 删除注释语句,切分语句,将切换CURRENT_SCHEMA语句增加到切分结果中 sql = sqlparse.format(sql, strip_comments=True) split_sql = sqlparse.split(sql) line = 1 statement = None db_name = workflow.db_name try: conn = self.get_connection(db_name=db_name) cursor = conn.cursor() # 逐条执行切分语句,追加到执行结果中 for statement in split_sql: statement = statement.rstrip(';') with FuncTimer() as t: cursor.execute(statement) conn.commit() execute_result.rows.append(ReviewResult( id=line, errlevel=0, stagestatus='Execute Successfully', errormessage='None', sql=statement, affected_rows=cursor.rowcount, execute_time=t.cost, )) line += 1 except Exception as e: logger.warning(f"PGSQL命令执行报错,语句:{statement or sql}, 错误信息:{traceback.format_exc()}") execute_result.error = str(e) # 追加当前报错语句信息到执行结果中 execute_result.rows.append(ReviewResult( id=line, errlevel=2, stagestatus='Execute Failed', errormessage=f'异常信息:{e}', sql=statement or sql, affected_rows=0, execute_time=0, )) line += 1 # 报错语句后面的语句标记为审核通过、未执行,追加到执行结果中 for statement in split_sql[line - 1:]: execute_result.rows.append(ReviewResult( id=line, errlevel=0, stagestatus='Audit completed', errormessage=f'前序语句失败, 未执行', sql=statement, affected_rows=0, execute_time=0, )) line += 1 finally: if close_conn: self.close() return execute_result
def execute_workflow(self, workflow): """执行上线单,返回Review set""" sql = workflow.sqlworkflowcontent.sql_content execute_result = ReviewSet(full_sql=sql) sqls = sqlparse.format(sql, strip_comments=True) sql_list = sqlparse.split(sqls) line = 1 for statement in sql_list: with FuncTimer() as t: result = self.execute(db_name=workflow.db_name, sql=statement, close_conn=True) if not result.error: execute_result.rows.append( ReviewResult( id=line, errlevel=0, stagestatus='Execute Successfully', errormessage='None', sql=statement, affected_rows=0, execute_time=t.cost, )) line += 1 else: # 追加当前报错语句信息到执行结果中 execute_result.error = result.error execute_result.rows.append( ReviewResult( id=line, errlevel=2, stagestatus='Execute Failed', errormessage=f'异常信息:{result.error}', sql=statement, affected_rows=0, execute_time=0, )) line += 1 # 报错语句后面的语句标记为审核通过、未执行,追加到执行结果中 for statement in sql_list[line - 1:]: execute_result.rows.append( ReviewResult( id=line, errlevel=0, stagestatus='Audit completed', errormessage=f'前序语句失败, 未执行', sql=statement, affected_rows=0, execute_time=0, )) line += 1 break return execute_result
def execute_workflow(self, workflow): """执行上线单,返回Review set""" sql = workflow.sqlworkflowcontent.sql_content split_sql = [cmd.strip() for cmd in sql.split('\n') if cmd.strip()] execute_result = ReviewSet(full_sql=sql) line = 1 cmd = None try: conn = self.get_connection(db_name=workflow.db_name) for cmd in split_sql: with FuncTimer() as t: conn.execute_command(*shlex.split(cmd)) execute_result.rows.append( ReviewResult( id=line, errlevel=0, stagestatus='Execute Successfully', errormessage='None', sql=cmd, affected_rows=0, execute_time=t.cost, )) line += 1 except Exception as e: logger.warning( f"Redis命令执行报错,语句:{cmd or sql}, 错误信息:{traceback.format_exc()}") # 追加当前报错语句信息到执行结果中 execute_result.error = str(e) execute_result.rows.append( ReviewResult( id=line, errlevel=2, stagestatus='Execute Failed', errormessage=f'异常信息:{e}', sql=cmd, affected_rows=0, execute_time=0, )) line += 1 # 报错语句后面的语句标记为审核通过、未执行,追加到执行结果中 for statement in split_sql[line - 1:]: execute_result.rows.append( ReviewResult( id=line, errlevel=0, stagestatus='Audit completed', errormessage=f'前序语句失败, 未执行', sql=statement, affected_rows=0, execute_time=0, )) line += 1 return execute_result
def query(request): """ 获取SQL查询结果 :param request: :return: """ instance_name = request.POST.get('instance_name') sql_content = request.POST.get('sql_content') db_name = request.POST.get('db_name') limit_num = int(request.POST.get('limit_num', 0)) schema_name = request.POST.get('schema_name', None) user = request.user result = {'status': 0, 'msg': 'ok', 'data': {}} try: instance = Instance.objects.get(instance_name=instance_name) except Instance.DoesNotExist: result['status'] = 1 result['msg'] = '实例不存在' return HttpResponse(json.dumps(result), content_type='application/json') # 服务器端参数验证 if None in [sql_content, db_name, instance_name, limit_num]: result['status'] = 1 result['msg'] = '页面提交参数可能为空' return HttpResponse(json.dumps(result), content_type='application/json') try: config = SysConfig() # 查询前的检查,禁用语句检查,语句切分 query_engine = get_engine(instance=instance) query_check_info = query_engine.query_check(db_name=db_name, sql=sql_content) if query_check_info.get('bad_query'): # 引擎内部判断为 bad_query result['status'] = 1 result['msg'] = query_check_info.get('msg') return HttpResponse(json.dumps(result), content_type='application/json') if query_check_info.get( 'has_star') and config.get('disable_star') is True: # 引擎内部判断为有 * 且禁止 * 选项打开 result['status'] = 1 result['msg'] = query_check_info.get('msg') return HttpResponse(json.dumps(result), content_type='application/json') sql_content = query_check_info['filtered_sql'] # 查询权限校验,并且获取limit_num priv_check_info = query_priv_check(user, instance, db_name, sql_content, limit_num) if priv_check_info['status'] == 0: limit_num = priv_check_info['data']['limit_num'] priv_check = priv_check_info['data']['priv_check'] else: result['status'] = 1 result['msg'] = priv_check_info['msg'] return HttpResponse(json.dumps(result), content_type='application/json') # explain的limit_num设置为0 limit_num = 0 if re.match(r"^explain", sql_content.lower()) else limit_num # 对查询sql增加limit限制或者改写语句 sql_content = query_engine.filter_sql(sql=sql_content, limit_num=limit_num) # 先获取查询连接,用于后面查询复用连接以及终止会话 query_engine.get_connection(db_name=db_name) thread_id = query_engine.thread_id max_execution_time = int(config.get('max_execution_time', 60)) # 执行查询语句,并增加一个定时终止语句的schedule,timeout=max_execution_time if thread_id: schedule_name = f'query-{time.time()}' run_date = (datetime.datetime.now() + datetime.timedelta(seconds=max_execution_time)) add_kill_conn_schedule(schedule_name, run_date, instance.id, thread_id) with FuncTimer() as t: # 获取主从延迟信息 seconds_behind_master = query_engine.seconds_behind_master if instance.db_type == 'pgsql': # TODO 此处判断待优化,请在 修改传参方式后去除 query_result = query_engine.query(db_name, sql_content, limit_num, schema_name=schema_name) else: query_result = query_engine.query(db_name, sql_content, limit_num) query_result.query_time = t.cost # 返回查询结果后删除schedule if thread_id: del_schedule(schedule_name) # 查询异常 if query_result.error: result['status'] = 1 result['msg'] = query_result.error # 数据脱敏,仅对查询无错误的结果集进行脱敏,并且按照query_check配置是否返回 elif config.get('data_masking'): try: with FuncTimer() as t: masking_result = query_engine.query_masking( db_name, sql_content, query_result) masking_result.mask_time = t.cost # 脱敏出错 if masking_result.error: # 开启query_check,直接返回异常,禁止执行 if config.get('query_check'): result['status'] = 1 result['msg'] = f'数据脱敏异常:{masking_result.error}' # 关闭query_check,忽略错误信息,返回未脱敏数据,权限校验标记为跳过 else: logger.warning( f'数据脱敏异常,按照配置放行,查询语句:{sql_content},错误信息:{masking_result.error}' ) query_result.error = None result['data'] = query_result.__dict__ # 正常脱敏 else: result['data'] = masking_result.__dict__ except Exception as msg: # 抛出未定义异常,并且开启query_check,直接返回异常,禁止执行 if config.get('query_check'): result['status'] = 1 result['msg'] = f'数据脱敏异常,请联系管理员,错误信息:{msg}' # 关闭query_check,忽略错误信息,返回未脱敏数据,权限校验标记为跳过 else: logger.warning( f'数据脱敏异常,按照配置放行,查询语句:{sql_content},错误信息:{msg}') query_result.error = None result['data'] = query_result.__dict__ # 无需脱敏的语句 else: result['data'] = query_result.__dict__ # 仅将成功的查询语句记录存入数据库 if not query_result.error: result['data']['seconds_behind_master'] = seconds_behind_master if int(limit_num) == 0: limit_num = int(query_result.affected_rows) else: limit_num = min(int(limit_num), int(query_result.affected_rows)) query_log = QueryLog(username=user.username, user_display=user.display, db_name=db_name, instance_name=instance.instance_name, sqllog=sql_content, effect_row=limit_num, cost_time=query_result.query_time, priv_check=priv_check, hit_rule=query_result.mask_rule_hit, masking=query_result.is_masked) # 防止查询超时 try: query_log.save() except OperationalError: connection.close() query_log.save() except Exception as e: logger.error( f'查询异常报错,查询语句:{sql_content}\n,错误信息:{traceback.format_exc()}') result['status'] = 1 result['msg'] = f'查询异常报错,错误信息:{e}' return HttpResponse(json.dumps(result), content_type='application/json') # 返回查询结果 try: return HttpResponse(json.dumps(result, cls=ExtendJSONEncoderFTime, bigint_as_string=True), content_type='application/json') # 虽然能正常返回,但是依然会乱码 except UnicodeDecodeError: return HttpResponse(json.dumps(result, default=str, bigint_as_string=True, encoding='latin1'), content_type='application/json')
def execute_workflow(self, workflow, close_conn=True): """执行上线单,返回Review set 原来的逻辑是根据 sql_content简单来分割SQL,进而再执行这些SQL 新的逻辑变更为根据审核结果中记录的sql来执行, 如果是PLSQL存储过程等对象定义操作,还需检查确认新建对象是否编译通过! """ review_content = workflow.sqlworkflowcontent.review_content review_result = json.loads(review_content) sqlitemList = get_exec_sqlitem_list(review_result, workflow.db_name) sql = workflow.sqlworkflowcontent.sql_content execute_result = ReviewSet(full_sql=sql) line = 1 statement = None try: conn = self.get_connection() cursor = conn.cursor() # 逐条执行切分语句,追加到执行结果中 for sqlitem in sqlitemList: statement = sqlitem.statement if sqlitem.stmt_type == "SQL": statement = statement.rstrip(';') with FuncTimer() as t: cursor.execute(statement) conn.commit() rowcount = cursor.rowcount stagestatus = "Execute Successfully" if sqlitem.stmt_type == "PLSQL" and sqlitem.object_name and sqlitem.object_name != 'ANONYMOUS' and sqlitem.object_name != '': query_obj_sql = f"""SELECT OBJECT_NAME, STATUS, TO_CHAR(LAST_DDL_TIME, 'YYYY-MM-DD HH24:MI:SS') FROM ALL_OBJECTS WHERE OWNER = '{sqlitem.object_owner}' AND OBJECT_NAME = '{sqlitem.object_name}' """ cursor.execute(query_obj_sql) row = cursor.fetchone() if row: status = row[1] if status and status == "INVALID": stagestatus = "Compile Failed. Object " + sqlitem.object_owner + "." + sqlitem.object_name + " is invalid." else: stagestatus = "Compile Failed. Object " + sqlitem.object_owner + "." + sqlitem.object_name + " doesn't exist." if stagestatus != "Execute Successfully": raise Exception(stagestatus) execute_result.rows.append(ReviewResult( id=line, errlevel=0, stagestatus=stagestatus, errormessage='None', sql=statement, affected_rows=rowcount, execute_time=t.cost, )) line += 1 except Exception as e: logger.warning(f"Oracle命令执行报错,语句:{statement or sql}, 错误信息:{traceback.format_exc()}") execute_result.error = str(e) # 追加当前报错语句信息到执行结果中 execute_result.rows.append(ReviewResult( id=line, errlevel=2, stagestatus='Execute Failed', errormessage=f'异常信息:{e}', sql=statement or sql, affected_rows=0, execute_time=0, )) line += 1 # 报错语句后面的语句标记为审核通过、未执行,追加到执行结果中 for sqlitem in sqlitemList[line - 1:]: execute_result.rows.append(ReviewResult( id=line, errlevel=0, stagestatus='Audit completed', errormessage=f'前序语句失败, 未执行', sql=sqlitem.statement, affected_rows=0, execute_time=0, )) line += 1 finally: if close_conn: self.close() return execute_result
def archive(archive_id): """ 执行数据库归档 :return: """ archive_info = ArchiveConfig.objects.get(id=archive_id) s_ins = archive_info.src_instance src_db_name = archive_info.src_db_name src_table_name = archive_info.src_table_name condition = archive_info.condition no_delete = archive_info.no_delete sleep = archive_info.sleep mode = archive_info.mode # 获取归档表的字符集信息 s_engine = get_engine(s_ins) db = s_engine.schema_object.databases[src_db_name] tb = db.tables[src_table_name] charset = tb.options['charset'].value if charset is None: charset = db.options['charset'].value pt_archiver = PtArchiver() # 准备参数 source = fr"h={s_ins.host},u={s_ins.user},p={s_ins.password},P={s_ins.port},D={src_db_name},t={src_table_name}" args = { "no-version-check": True, "source": source, "where": condition, "progress": 5000, "statistics": True, "charset": charset, "limit": 10000, "txn-size": 1000, "sleep": sleep } # 归档到目标实例 if mode == 'dest': d_ins = archive_info.dest_instance dest_db_name = archive_info.dest_db_name dest_table_name = archive_info.dest_table_name dest = fr"h={d_ins.host},u={d_ins.user},p={d_ins.password},P={d_ins.port},D={dest_db_name},t={dest_table_name}" args['dest'] = dest args['bulk-insert'] = True if no_delete: args['no-delete'] = True else: args['bulk-delete'] = True elif mode == 'file': output_directory = os.path.join(settings.BASE_DIR, 'downloads/archiver') os.makedirs(output_directory, exist_ok=True) args[ 'file'] = f'{output_directory}/{s_ins.instance_name}-{src_db_name}-{src_table_name}.txt' if no_delete: args['no-delete'] = True else: args['bulk-delete'] = True elif mode == 'purge': args['purge'] = True # 参数检查 args_check_result = pt_archiver.check_args(args) if args_check_result['status'] == 1: return JsonResponse(args_check_result) # 参数转换 cmd_args = pt_archiver.generate_args2cmd(args, shell=True) # 执行命令,获取结果 select_cnt = 0 insert_cnt = 0 delete_cnt = 0 with FuncTimer() as t: p = pt_archiver.execute_cmd(cmd_args, shell=True) stdout = '' for line in iter(p.stdout.readline, ''): if re.match(r'^SELECT\s(\d+)$', line, re.I): select_cnt = re.findall(r'^SELECT\s(\d+)$', line) elif re.match(r'^INSERT\s(\d+)$', line, re.I): insert_cnt = re.findall(r'^INSERT\s(\d+)$', line) elif re.match(r'^DELETE\s(\d+)$', line, re.I): delete_cnt = re.findall(r'^DELETE\s(\d+)$', line) stdout += f'{line}\n' statistics = stdout # 获取异常信息 stderr = p.stderr.read() if stderr: statistics = stdout + stderr # 判断归档结果 select_cnt = int(select_cnt[0]) if select_cnt else 0 insert_cnt = int(insert_cnt[0]) if insert_cnt else 0 delete_cnt = int(delete_cnt[0]) if delete_cnt else 0 error_info = '' success = True if stderr: error_info = f'命令执行报错:{stderr}' success = False if mode == 'dest': # 删除源数据,判断删除数量和写入数量 if not no_delete and (insert_cnt != delete_cnt): error_info = f"删除和写入数量不一致:{insert_cnt}!={delete_cnt}" success = False elif mode == 'file': # 删除源数据,判断查询数量和删除数量 if not no_delete and (select_cnt != delete_cnt): error_info = f"查询和删除数量不一致:{select_cnt}!={delete_cnt}" success = False elif mode == 'purge': # 直接删除。判断查询数量和删除数量 if select_cnt != delete_cnt: error_info = f"查询和删除数量不一致:{select_cnt}!={delete_cnt}" success = False # 执行信息保存到数据库 if connection.connection and not connection.is_usable(): close_old_connections() # 更新最后归档时间 ArchiveConfig( id=archive_id, last_archive_time=t.end).save(update_fields=['last_archive_time']) # 替换密码信息后保存 ArchiveLog.objects.create( archive=archive_info, cmd=cmd_args.replace(s_ins.password, '***').replace( d_ins.password, '***') if mode == 'dest' else cmd_args.replace( s_ins.password, '***'), condition=condition, mode=mode, no_delete=no_delete, sleep=sleep, select_cnt=select_cnt, insert_cnt=insert_cnt, delete_cnt=delete_cnt, statistics=statistics, success=success, error_info=error_info, start_time=t.start, end_time=t.end) if not success: raise Exception(f'{error_info}\n{statistics}')
def execute_workflow(self, workflow, close_conn=True): """执行上线单,返回Review set 原来的逻辑是根据 sql_content简单来分割SQL,进而再执行这些SQL 新的逻辑变更为根据审核结果中记录的sql来执行, 如果是PLSQL存储过程等对象定义操作,还需检查确认新建对象是否编译通过! """ review_content = workflow.sqlworkflowcontent.review_content review_result = json.loads(review_content) sqlitemList = get_exec_sqlitem_list(review_result, workflow.db_name) sql = workflow.sqlworkflowcontent.sql_content execute_result = ReviewSet(full_sql=sql) line = 1 failed_line = 0 statement = None try: conn = self.get_connection() cursor = conn.cursor() # 获取执行工单时间,用于备份SQL的日志挖掘起始时间 cursor.execute( f"alter session set nls_date_format='yyyy-mm-dd hh24:mi:ss'") cursor.execute(f"select sysdate from dual") rows = cursor.fetchone() begin_time = rows[0] # 逐条执行切分语句,追加到执行结果中 for sqlitem in sqlitemList: statement = sqlitem.statement if sqlitem.stmt_type == "SQL": statement = statement.rstrip(';') with FuncTimer() as t: if statement != '': cursor.execute(statement) #conn.commit() rowcount = cursor.rowcount stagestatus = "Execute Successfully" if sqlitem.stmt_type == "PLSQL" and sqlitem.object_name and sqlitem.object_name != 'ANONYMOUS' and sqlitem.object_name != '': query_obj_sql = f"""SELECT OBJECT_NAME, STATUS, TO_CHAR(LAST_DDL_TIME, 'YYYY-MM-DD HH24:MI:SS') FROM ALL_OBJECTS WHERE OWNER = '{sqlitem.object_owner}' AND OBJECT_NAME = '{sqlitem.object_name}' """ cursor.execute(query_obj_sql) row = cursor.fetchone() if row: status = row[1] if status and status == "INVALID": stagestatus = "Compile Failed. Object " + sqlitem.object_owner + "." + sqlitem.object_name + " is invalid." else: stagestatus = "Compile Failed. Object " + sqlitem.object_owner + "." + sqlitem.object_name + " doesn't exist." if stagestatus != "Execute Successfully": raise Exception(stagestatus) execute_result.rows.append( ReviewResult( id=line, errlevel=0, stagestatus=stagestatus, errormessage='None', sql=statement, affected_rows=cursor.rowcount, execute_time=t.cost, )) line += 1 except Exception as e: logger.warning( f"Oracle命令执行报错,工单id:{workflow.id},语句:{statement or sql}, 错误信息:{traceback.format_exc()}" ) execute_result.error = str(e) # 捕获异常,回滚未提交事务,标记备份结束位置 conn.rollback() failed_line = line # 追加当前报错语句信息到执行结果中 execute_result.rows.append( ReviewResult( id=line, errlevel=2, stagestatus='Execute Failed', errormessage=f'异常信息:{e}', sql=statement or sql, affected_rows=0, execute_time=0, )) line += 1 # 报错语句后面的语句标记为审核通过、未执行,追加到执行结果中 for sqlitem in sqlitemList[line - 1:]: execute_result.rows.append( ReviewResult( id=line, errlevel=0, stagestatus='Audit completed', errormessage=f'前序语句失败, 未执行', sql=sqlitem.statement, affected_rows=0, execute_time=0, )) line += 1 finally: # 完成会话做提交 conn.commit() # 备份 if workflow.is_backup: try: cursor.execute(f"select sysdate from dual") rows = cursor.fetchone() end_time = rows[0] self.backup(workflow, cursor=cursor, begin_time=begin_time, end_time=end_time) except Exception as e: logger.warning( f"Oracle工单备份异常,工单id:{workflow.id}, 错误信息:{traceback.format_exc()}" ) execute_result.error = str(e) if failed_line > 0: while failed_line > 0: execute_result.rows[failed_line].errlevel = 1 execute_result.rows[ failed_line].stagestatus = execute_result.rows[ failed_line].stagestatus + "\n Backup failed" execute_result.rows[ failed_line].errormessage = f"备份失败:{str(e)}" failed_line -= 1 else: line = line - 2 while line > 0: execute_result.rows[line].errlevel = 1 execute_result.rows[ line].stagestatus = execute_result.rows[ line].stagestatus + "\n Backup failed" execute_result.rows[ line].errormessage = f"备份失败:{str(e)}" line -= 1 else: if failed_line > 0: while failed_line > 0: execute_result.rows[ line - 2].stagestatus = execute_result.rows[ line - 2].stagestatus + "\n Backup successfully" failed_line -= 1 else: line = line - 2 while line > 0: execute_result.rows[ line - 2].stagestatus = execute_result.rows[ line - 2].stagestatus + "\n Backup successfully" line -= 1 if close_conn: self.close() return execute_result
def execute_workflow(self, workflow, close_conn=True): """执行上线单,返回Review set""" sql = workflow.sqlworkflowcontent.sql_content execute_result = ReviewSet(full_sql=sql) line = 1 statement = None try: conn = self.get_connection() cursor = conn.cursor() # 切换CURRENT_SCHEMA并且记录到执行结果中 if workflow.db_name: with FuncTimer() as t: cursor.execute( f"ALTER SESSION SET CURRENT_SCHEMA = {workflow.db_name}" ) execute_result.rows.append( ReviewResult( id=line, errlevel=0, stagestatus='Execute Successfully', errormessage='None', sql= f"ALTER SESSION SET CURRENT_SCHEMA = {workflow.db_name}", affected_rows=cursor.rowcount, execute_time=t.cost, )) line += 1 # 删除注释语句,切分语句逐条执行,追加到执行结果中 sql = sqlparse.format(sql, strip_comments=True) for statement in sqlparse.split(sql): statement = statement.rstrip(';') with FuncTimer() as t: cursor.execute(statement) conn.commit() execute_result.rows.append( ReviewResult( id=line, errlevel=0, stagestatus='Execute Successfully', errormessage='None', sql=statement, affected_rows=cursor.rowcount, execute_time=t.cost, )) line += 1 except Exception as e: logger.error( f"Oracle命令执行报错,语句:{statement or sql}, 错误信息:{traceback.format_exc()}" ) execute_result.error = str(e) # 追加报错信息到执行结果中 execute_result.rows.append( ReviewResult( id=line, errlevel=2, stagestatus='Execute Failed', errormessage=f'异常信息:{e}', sql=statement or sql, affected_rows=0, execute_time=0, )) finally: if close_conn: self.close() return execute_result