def get_type_count(): """ 获得每种攻击类型的发生次数 :return: dict,每种攻击类型的发生次数 """ conn = None cursor = None default = { "http-defense": 0, "dos-attack": 0, "web-attack": 0, "sensitive-data-tracking": 0, "identification-error": 0, } try: conn = get_connection() cursor = conn.cursor() sql = "SELECT attack_type, count(*) count FROM attack_ip " \ "GROUP BY attack_type" # logger.info(sql) cursor.execute(sql) all = cursor.fetchall() # 通过更新的方式,保证类型计数 >= 0 for item in all: default[item[0]] = item[1] return default except Exception as e: logger.info(e) finally: cursor.close() conn.close()
def invoke_rpc_repair(contents): """ 调用RPC进行修复 :param contents: :return: 执行状态 """ try: """ result = TrustLog.objects.filter(id=log_id, state=0) if not result: return False confirm_item = result[0] """ with BlackBoxRPCClient(BLACKBOX_RPC_IP, BLACKB0X_RPC_PORT) as client: for tmp_key in contents.keys(): content = contents[tmp_key] for tmp in content: repair_pack = RepairMessage(tmp['content'], tmp['host'], tmp['ip']) logger.debug(vars(repair_pack)) result = client.command(json.dumps(vars(repair_pack))) logger.debug(result) # 更新trustlog表中的state值 error_log = TrustLog.objects.get(id=tmp['id']) error_log.state = 1 error_log.save() logger.info("Invoking RPC to repair.") return True except Exception as e: logger.error(e) return False
def __new__(cls, *args, **kwargs): lock = Lock() logger.debug("init rabbitMQ instance") if cls._instance is None: try: lock.acquire() # 上锁, 用于保证线程安全性 # 双重锁 if cls._instance is None: cls._instance = super(RabbitMQConnection, cls).__new__(cls) credentials = pika.PlainCredentials('8lab', '8lab') cls._connection = pika.BlockingConnection( # heartbeat_interval=0表示关闭server的心跳检测, 防止超时关闭 pika.ConnectionParameters( host=LOCAL_CONFIG.rabbitmq_server, port=LOCAL_CONFIG.rabbitmq_port, virtual_host='/', credentials=credentials, heartbeat_interval=0 ) ) logger.info("Message server(%s:%s) started." % (LOCAL_CONFIG.rabbitmq_server, LOCAL_CONFIG.rabbitmq_port)) except Exception as e: logger.error(e) finally: lock.release() # 解锁 return cls._instance
def suspendScan(self): """ 挂起扫描 :return: """ logger.info("Invoke Clamav-RPC suspendScan().") return self.client.suspendScan()
def switch_waf_status(request): """ 切换对应主机的Waf规则状态,打开或者关闭 :param request: :return:json结果 """ switch = request.POST["switch"] switch_checks = ['alarm', 'defense'] logger.debug(switch) if switch not in switch_checks: # 支持的操作只有'on','off' logger.error("%s not supported", switch) ret = BIZ_CODE['ERROR'] return HttpResponse(json.dumps(ret)) options = ['http', 'web', 'dataTrack', 'errorCheck', 'dos', 'whole'] option = request.POST["option"] if option not in options: logger.error("%s not supported", option) ret = BIZ_CODE['ERROR'] return HttpResponse(json.dumps(ret)) ip = request.POST["ip"] result = 0 option = 'whole' # TODO 目前暂时只能关闭所有的,所以传什么,都改为关所有 if option == 'whole': # 如果传'whole',表示分别关闭(或者打开)所有的5个开关项 # for i in range(5): # _option = options[i] message_dict = {'option': option, 'switch': switch} # _single_result = switch_waf(message_dict, ip) _single_result = switch_alerm_or_defense(switch, ip) if _single_result == 0: # update 的 datchall() 返回空元组() 所以_res=() # _res = mysql_base_api.sql_execute( # 88888 # conn, cursor, "update octa_waf_host_status set " # + option + " = %s where ip = %s", [switch, ip]) # 88888 try: OctaWafHostStatus.objects.filter(ip=ip).update(whole=switch) except Exception as e: logger.error(e) return JsonResponse({}) # logger.debug(_res) result += _single_result else: # message_dict = {'option': option, 'switch': switch} # result = switch_waf(message_dict, ip) # logger.debug(result) result = 0 if result == 0: try: OctaWafHostStatus.objects.filter(ip=ip).update(whole=switch) except Exception as e: logger.error(e) return JsonResponse({}) ret = BIZ_CODE['SUCCESS'] else: ret = BIZ_CODE['ERROR'] logger.info(ret) return HttpResponse(json.dumps(ret))
def getClamavLog(self): """ 获得扫描日志 :return: """ logger.info("Invoke Clamav_RPC getClamavLog()") return self.client.getClamavLog()
def checkVersion(self): """ 检查版本 :return: """ logger.info("Invoke Clamav-RPC checkoutVersion().") return self.client.checkVersion()
def get_dimension(self, ip): """ 调用API查询IP对应的经纬度 :param ip: 要查询经纬度的IP :return: 成功返回dict类型的对应信息。返回-1:数据库被损坏或正在更新 """ logger.info("searching" + ip) try: ip_dimension = self.reader.get(ip) except ValueError as e: logger.error("wrong ip") longitude = "-97.822" latitude = "37.751" dimension = Dimension(longitude, latitude).__dict__ return dimension else: assert isinstance(ip_dimension, object) if ip_dimension == None: longitude = "-97.822" latitude = "37.751" else: ip_dimension = ip_dimension['location'] longitude = str(ip_dimension['longitude']) latitude = str(ip_dimension['latitude']) dimension = Dimension(longitude, latitude).__dict__ logger.info(dimension) return dimension
def getSummary(self): """ 获得摘要 :return: """ logger.info("Invoke Clamav_RPC getSummary()") return self.client.getSummary()
def resumeScan(self): """ 回复扫描 :return: """ logger.info("Invoke Clamav-RPC resumeScan().n") return self.client.resumeScan()
def query_current_version_tree(service_type, host_name, root_path): """ 获取当前版本信息中的目录树形结构 :param host_name: 主机名 :param root_path: 根目录 :return: """ current_version_dict = None try: is_success, current_version_obj, msg = query_current_version_obj( service_type, host_name, root_path) if is_success: current_version_json = current_version_obj.version_tree current_version_dict = json.loads(current_version_json) else: False, {}, str(msg) # json转换dict错误 except ValueError as json_error: error_message = traceback.format_exc() logger.error(error_message) return False, {}, str(json_error) except Exception as error: error_message = traceback.format_exc() logger.error(error_message) return False, {}, str(error) success_message = 'Success. Finish fetching Current version tree of host name %s, root_path %s: ' % ( host_name, root_path) logger.info(success_message) return True, current_version_dict, success_message
def __enter__(self): try: self.transport.open() logger.info("Nisa RPC Client connected.") return self.client except Exception as e: logger.error(e)
def _get_from_redis_to_bigchain_mysql(key): """ 处理过程: 1,根据机器情况,分别获取相应机器的最新日志,如果日志大于10万条,先取出10万存入bigchaindb,剩下的存完后继续取.最后删除ridis中的日志. 2,把日志存入bigchaindb后,相应的把交易id和私钥保存到mysql :return: 无 """ global client # try: log_len = client.llen(key) if log_len == 0: logger.debug('the key %s length is 0 .' % key) return if log_len > MAX_LOG_COUNT: logger.info(log_len) i = int(log_len / MAX_LOG_COUNT) for x in range(i): array_list = client.lrange(key, x * MAX_LOG_COUNT, (x + 1) * MAX_LOG_COUNT - 1) if array_list: _save_2_bdp_mysql(array_list) logger.debug("===========================") array_list = client.lrange(key, i * MAX_LOG_COUNT, log_len) if array_list: _save_2_bdp_mysql(array_list) # 为了测试,下面这行注释,方式数据删了没有测试数据, client.delete(key) else: # pop所有的 array_list = client.lpop(key) if array_list: _save_2_bdp_mysql(array_list)
def isRunning(self): """ 判断是否运行 :return: """ logger.info("Invoke Clamav-RPC isRunning().") return self.client.isRunning()
def publish_confirmed(self, channel, message, message_confirmed_key, task_id): """ 发送任务之后, 检验消息是否收到 :param channel: 消息队列名称 :param message: 消息 :param message_confirmed_key: 用于获取确认消息的key :param task_id: 任务ID :return: """ # 发布任务 self.redis.rdb.publish(channel, message) # 校验任务是否收到, 时间为3秒钟 for i in range(300): message = self.redis.rdb.get(message_confirmed_key) # 任务已被确认收到 if message and message == 'confirmed': msg = 'GET Task %s confirmed.' % str(task_id) logger.info(msg) return True, msg time.sleep(0.01) msg = 'Task %s unconfirmed' % str(task_id) logger.error(msg) return False, msg
def sayHello(self): """ 判断是否连通 :return: """ logger.info("Invoke Clamav_RPC sayHello()") return self.client.sayHello()
def stopScan(self): """ 终止扫描 :return: """ logger.info("Invoke Clamav-RPC stopScan().") return self.client.stopScan()
def freshClam(self): """ 更新病毒数据库 :return: """ logger.info("Invoke Clamav-RPC freshClam().") return self.client.freshClam()
def clamScan(self, conf_str): """ 扫描文件路径列表 :return: """ logger.info("Invoke Clamav-RPC clamScan().") return self.client.clamScan(conf_str)
def begin(self): """ 开始执行程序,先初始化数据库,再开始发送 :return: None """ logger.info("main_ip start") self.init_db() self.queryip()
def close(self): """ 关闭连接 :return: """ try: self.transport.close() logger.info("Clamav-RPC client has closed.") except Exception as e: logger.error(e)
def update_auto(checks): """ 启动自动更新,checks参数表示每天的更新频次 :param checks: 每天更新频次 :return: None """ try: os.system('freshclam -d --checks=%s' % checks) except Exception as e: logger.info(e)
def test_alarm(request): """ 测试通过篡改mongodb数据之后再查询交易id对应的交易时的告警 :param request: :return: """ tx_id = request.GET.get('tx_id') try: # 根据交易ID查询交易的资产信息 re = query_by_tx_id(tx_id, CONFIG.bdb_host, CONFIG.bdb_port) return HttpResponse(json.dumps({'code': '200', 'results': re})) except bigchaindb_driver.exceptions.TransportError as e: insert_alarm(1, 5) # 插入告警信息到告警表 error_info = '' error_info += "出现异常:资产信息被破坏, 资产ID: " + tx_id try: cu = ChainUserTran.objects.get(tx_id=tx_id) error_info += ' 用户ID:' + cu.chain_user_id + '资产类型: ' +\ cu.asset_type + ' 交易时间: ' +\ cu.create_time.strftime('%Y-%m-%d %H:%M:%S %f') except ChainUserTran.DoesNotExist: logger.error('the tx_id is error,' ' no transaction in table: chain_user_tran' + tx_id) """ 进行数据恢复操作 """ logger.info('find error in tx_id' + tx_id + ', and in reverting now:------') revert_re = recover_transaction_by_tx_id(CONFIG.bdb_host, CONFIG.mongo_port, CONFIG.bak_mongo_host, CONFIG.bak_mongo_port, tx_id) if revert_re == 0: error_info += ' ,and recover the transaction successfully.' logger.info('The tx_id' + tx_id + ' reverted successfully.') else: logger.error('The tx_id' + tx_id + ' reverted failed.') """ 如果报警开关打开,进行报警 """ if CONFIG.alarm_enable == 1: now = datetime.datetime.now() now = now.strftime('%Y-%m-%d %H:%M:%S') send_mail(mail_to_list(), "警告信息", "异常信息(" + now + "):" + error_info) send_wechat("区块链异常(" + now + "):" + error_info) return HttpResponse( json.dumps({ 'code': '200', 'results': 'alarm success' }))
def handle_found(result): """ 处理异常文件 :param result: {filename: ("FOUND", virusname)} :return: None """ logger.info("Attempt to remove found virus.") for k, v in result.iteritems(): if v[0] == "FOUND": os.remove(k)
def read_update(self, content): """ 读取更新的日志记录的IP信息,放入发送IP和写DB的两个队列 :param dif_line_num: 更新的日志行数 :return: None """ i = 0 # command = 'tail -n ' + str(dif_line_num) + ' ' + self.ip_log_path # (status, output) = subprocess.getstatusoutput(command) output = content.split('\n') logLength = len(output) for i in range(0, logLength): line = output[i] head_match = self.head.findall(line) if head_match and i + 1 < logLength: tmp_ip = output[i + 1] tmp_ip = tmp_ip.split() self.get_ip(tmp_ip) source_match = self.intranet_ip.findall(self.source_ip) des_match = self.intranet_ip.findall(self.des_ip) if source_match or des_match: assert isinstance(logger, object) logger.error(self.source_ip + " " + self.des_ip + "是内网IP地址,无法解析") else: self.get_dimension_info() while i + 1 < logLength: temp_head = i + 1 temp_line = output[temp_head] type_match = self.type.findall(temp_line) end_match = self.end.findall(temp_line) if type_match and temp_head + 1 < logLength and output[ temp_head + 1].find("[file \"/") != -1: cont = output[temp_head + 1].split() self.get_attack_type(cont) logger.info(self.attack_type) if end_match: self.attack_type = "http-defense" logger.debug("no file so http") if self.attack_type != 0: break else: i += 1 self.ip_send_list.append( IPContent(self.source, self.source_dimension, self.des, self.des_dimension, self.attack_type, self.attack_time).__dict__) self.attack_type = 0 #保存 攻击ip 地址到数据库 save_attack_ip(self.ip_send_list)
def create_tables(cursor, create_scripts): if len(create_scripts) == 0: # none sql script logger.info("None table to create.") return -1 try: for script in create_scripts: # fetch one table-create script a time cursor.execute(script) logger.error("SQL Result: %s", cursor.fetchone()) except MySQLdb.Error as e: logger.error("Error %d: %s" % (e.args[0], e.args[1])) return 0 return 1
def queryip(self): """ 开始解析log日志, 分析ip信息 :return: None """ logger.info("begin to queryip attack ip info...") if os.path.exists(self.ip_log_path): while True: self.read_file() time.sleep(2) else: logger.error("ip_log file doesn't exist") exit(0)
def query_version_history(service_type, host_name, root_path): """ 获取指定主机上的根目录的历史版本信息 :param host_name: 主机名 :param root_path: 根目录 :return: """ version_history_list = [] version_history_objs = None try: if service_type == "svn": version_history_objs = SVNVersionHistory.objects.using("dtamper_svn_mysql") \ .filter(protect_host_name=host_name, protect_root_path=root_path).order_by("-timestamp").all() elif service_type == "web": version_history_objs = WebVersionHistory.objects.using("dtamper_web_mysql") \ .filter(protect_host_name=host_name, protect_root_path=root_path).order_by("-timestamp").all() # 遍历并生成所有版本历史信息 for obj in version_history_objs: obj_dict = obj.toDict() # 删除根目录结构字段 if 'version_tree' in version_history_objs: del obj_dict['version_tree'] obj_dict['protect_host_addr'] = str(obj.protect_host_addr) obj_dict['timestamp'] = obj.timestamp.strftime("%Y-%m-%d %H:%M:%S") obj_dict['changed_objects'] = obj.changed_objects.split(",") # 仅用于svn业务 # 将svn的提交信息转换为list if 'commits_info' in obj_dict and obj.commits_info: try: obj_dict['commits_info'] = json.loads(obj.commits_info) except Exception as error: obj_dict['commits_info'] = [] logger.error(error) else: obj_dict['commits_info'] = [] version_history_list.append(obj_dict) except Exception as error: error_message = traceback.format_exc() logger.error(error_message) return False, [], str(error) success_message = 'Success. Finish fetching version histories of hostname: %s, root path: %s' % ( host_name, root_path) logger.info(success_message) return True, version_history_list, success_message
def upload_file(request): response = Response() length = len(request.FILES) if length > 0: logger.info(length) try: reqfile = request.FILES['filename'] # 生成随机字符串加后缀的文件名 handle_upload(reqfile) except Exception as e: logger.error(e) else: logger.error("no files") return HttpResponse(json.dumps(response.__dict__))
def query_user_section(start, end, field, condition): """ 按区间进行用户查询 :param start: 起始位置 :param end: 结束位置 :param field: {column}-0/1 排序字段和方式 :param condition: 查询条件 :return: 记录总数和分段结果 """ logger.info("start: %s, end: %s" % (start, end)) _update_user_list() # 更新数据库中的用户列表 if not 0 <= start < end: return try: result = UserInfo.objects.all() count = len(result) # 有查询条件,进行模糊查询 if condition: result = result.filter( # 按用户名、部门、特征进行模糊查询 Q(username__contains=condition) | Q(department__contains=condition) | Q(feature__contains=condition)) count = len(result) # 有排序条件,按字段和排序方式进行排序 if field: args = field.split("-") column = args[0] # 排序的列 sort = int(args[1]) # 排序方式,0表示升序,1表示降序 if not sort: result = result.order_by(column) else: result = result.order_by('-' + column) _data = [] for i, e in enumerate(list(result[start:end])): _ii = vars(User.po2vo(e)) _ii['max_feature'] = _calc_feature(_ii['user_id']) _data.append(_ii) # 将PO转换成VO再返回 return {"total": count, "data": _data} except Exception as e: logger.error(e) return list()