def switch_waf_status(request): """ 切换对应主机的Waf规则状态,打开或者关闭 :param request: :return:json结果 """ switch = request.POST["switch"] switch_checks = ['alarm', 'defense'] logger.debug(switch) if switch not in switch_checks: # 支持的操作只有'on','off' logger.error("%s not supported", switch) ret = BIZ_CODE['ERROR'] return HttpResponse(json.dumps(ret)) options = ['http', 'web', 'dataTrack', 'errorCheck', 'dos', 'whole'] option = request.POST["option"] if option not in options: logger.error("%s not supported", option) ret = BIZ_CODE['ERROR'] return HttpResponse(json.dumps(ret)) ip = request.POST["ip"] result = 0 option = 'whole' # TODO 目前暂时只能关闭所有的,所以传什么,都改为关所有 if option == 'whole': # 如果传'whole',表示分别关闭(或者打开)所有的5个开关项 # for i in range(5): # _option = options[i] message_dict = {'option': option, 'switch': switch} # _single_result = switch_waf(message_dict, ip) _single_result = switch_alerm_or_defense(switch, ip) if _single_result == 0: # update 的 datchall() 返回空元组() 所以_res=() # _res = mysql_base_api.sql_execute( # 88888 # conn, cursor, "update octa_waf_host_status set " # + option + " = %s where ip = %s", [switch, ip]) # 88888 try: OctaWafHostStatus.objects.filter(ip=ip).update(whole=switch) except Exception as e: logger.error(e) return JsonResponse({}) # logger.debug(_res) result += _single_result else: # message_dict = {'option': option, 'switch': switch} # result = switch_waf(message_dict, ip) # logger.debug(result) result = 0 if result == 0: try: OctaWafHostStatus.objects.filter(ip=ip).update(whole=switch) except Exception as e: logger.error(e) return JsonResponse({}) ret = BIZ_CODE['SUCCESS'] else: ret = BIZ_CODE['ERROR'] logger.info(ret) return HttpResponse(json.dumps(ret))
def _get_from_redis_to_bigchain_mysql(key): """ 处理过程: 1,根据机器情况,分别获取相应机器的最新日志,如果日志大于10万条,先取出10万存入bigchaindb,剩下的存完后继续取.最后删除ridis中的日志. 2,把日志存入bigchaindb后,相应的把交易id和私钥保存到mysql :return: 无 """ global client # try: log_len = client.llen(key) if log_len == 0: logger.debug('the key %s length is 0 .' % key) return if log_len > MAX_LOG_COUNT: logger.info(log_len) i = int(log_len / MAX_LOG_COUNT) for x in range(i): array_list = client.lrange(key, x * MAX_LOG_COUNT, (x + 1) * MAX_LOG_COUNT - 1) if array_list: _save_2_bdp_mysql(array_list) logger.debug("===========================") array_list = client.lrange(key, i * MAX_LOG_COUNT, log_len) if array_list: _save_2_bdp_mysql(array_list) # 为了测试,下面这行注释,方式数据删了没有测试数据, client.delete(key) else: # pop所有的 array_list = client.lpop(key) if array_list: _save_2_bdp_mysql(array_list)
def invoke_rpc_repair(contents): """ 调用RPC进行修复 :param contents: :return: 执行状态 """ try: """ result = TrustLog.objects.filter(id=log_id, state=0) if not result: return False confirm_item = result[0] """ with BlackBoxRPCClient(BLACKBOX_RPC_IP, BLACKB0X_RPC_PORT) as client: for tmp_key in contents.keys(): content = contents[tmp_key] for tmp in content: repair_pack = RepairMessage(tmp['content'], tmp['host'], tmp['ip']) logger.debug(vars(repair_pack)) result = client.command(json.dumps(vars(repair_pack))) logger.debug(result) # 更新trustlog表中的state值 error_log = TrustLog.objects.get(id=tmp['id']) error_log.state = 1 error_log.save() logger.info("Invoking RPC to repair.") return True except Exception as e: logger.error(e) return False
def db_user_create(conn, cursor, user_info): sql = "CREATE USER %s IDENTIFIED BY %s" args = [user_info["user"], user_info["password"]] # build sql arguments ret = sql_execute(conn, cursor, sql, args) logger.debug("User: %s is created." % user_info["user"]) return ret
def __new__(cls, *args, **kwargs): lock = Lock() logger.debug("init rabbitMQ instance") if cls._instance is None: try: lock.acquire() # 上锁, 用于保证线程安全性 # 双重锁 if cls._instance is None: cls._instance = super(RabbitMQConnection, cls).__new__(cls) credentials = pika.PlainCredentials('8lab', '8lab') cls._connection = pika.BlockingConnection( # heartbeat_interval=0表示关闭server的心跳检测, 防止超时关闭 pika.ConnectionParameters( host=LOCAL_CONFIG.rabbitmq_server, port=LOCAL_CONFIG.rabbitmq_port, virtual_host='/', credentials=credentials, heartbeat_interval=0 ) ) logger.info("Message server(%s:%s) started." % (LOCAL_CONFIG.rabbitmq_server, LOCAL_CONFIG.rabbitmq_port)) except Exception as e: logger.error(e) finally: lock.release() # 解锁 return cls._instance
def update_warninglist(request): """ 更新 警报名单 包括删除 启用 停用 :param request: :return: """ response_result = {} try: ids = request.POST.get("data") action_type = request.POST.get("type") _ids = ids[:-1].split('#') logger.debug(_ids) if action_type == "0": # 删除 WarningList.objects.filter(id__in=_ids).delete() elif action_type == "1": # 停用 WarningList.objects.filter(id__in=_ids).update(enabled=0) elif action_type == "2": # 启用 WarningList.objects.filter(id__in=_ids).update(enabled=1) clear_mail_to_list_cache() # 清除redis缓存 response_result = {'code': '1'} except Exception as e: response_result = {'code': '0'} logger.error(e) finally: return HttpResponse(json.dumps(response_result))
def audit_add_old(request): """ 添加数据库审计用户或者行为 该方案暂用,见新 audit_add 方法 :param request:req :return:json """ # return_dict = {} return_dict = {"code": "201"} # 设置默认值为"201",表示本次操作失败 _type = request.POST.get("type") ip = request.POST.get("ip") value = request.POST.get("value") db_type = request.POST.get("db_type").lower() # 目前,前端传过来的是 空字符串 if db_type == "": # 设置,mysql为默认数据库类型 db_type = "mysql" if _type == "2" and value not in ACTION_LIST: # 如果不合法,直接返回 return HttpResponse(json.dumps({'code': '202'})) conn, cursor = mysql_base_api.sql_init(jc.mysql_host, jc.mysql_user, jc.mysql_pass, jc.mysql_database, jc.mysql_port) sql = "select count(id) as count from white_user_action WHERE type ='" \ + _type + "' and ip = '" + ip + "' and value = '" + value + "' and db_type = '" + db_type + "'" # 当添加的已经存在,返回给前端300值 # 查询之指定的白名单用户或行为,是否已经添加 result = mysql_base_api.select_onesql(cursor, sql) # result= ({'count': 1},) if result[0]['count'] > 0: # 如果>0,则表明相应的白名单用户或行为已经添加 # mysql_base_api.sql_close(conn, cursor) return_dict['code'] = "300" # 已经存在,用户白名单添加重复 return HttpResponse(json.dumps(return_dict)) try: if db_type == 0: # 0 mysql audit_rpc = AuditRpcClient(ip, AUDIT_RPC_PORT) rpc_result = audit_rpc.add(value, _type) logger.debug(rpc_result) if rpc_result == 0: return_dict['code'] = "200" # insert_action_name = {'white_user_action': [ # {"value": value, "ip": ip, 'type': _type, 'db_type': db_type}]} # mysql_base_api.insert_row(conn, cursor, insert_action_name) # mysql_base_api.sql_close(conn, cursor) else: return_dict['code'] = "201" elif db_type == 1: # 1 postgresql insert_action_name = {'white_user_action': [ {"value": value, "ip": ip, 'type': _type, 'db_type': db_type}]} mysql_base_api.insert_row(conn, cursor, insert_action_name) mysql_base_api.sql_close(conn, cursor) return_dict['code'] = "200" except Exception as e: logger.error(e) return_dict['code'] = "201" mysql_base_api.sql_close(conn, cursor) return HttpResponse(json.dumps(return_dict)) return HttpResponse(json.dumps(return_dict))
def invoke_rpc_writelog(): try: with BlackBoxRPCClient(BLACKBOX_RPC_IP, BLACKB0X_RPC_PORT) as client: result = client.command("/8lab/blackbox -w -n eth0") logger.debug(result) return result except Exception as e: logger.error(e)
def delete_user(id): try: if id: UserInfo.objects.filter(id=id).delete() logger.debug(connection.queries) return Status(True) except Exception as e: logger.error(e) return Status(False)
def add_user(user): if isinstance(user, User): try: UserInfo(vars(user)).save() logger.debug(connection.queries) return Status(True) except Exception as e: logger.error(e) return Status(False)
def clear_mail_to_list_cache(): try: re = redis.Redis(CONF.redis4bigchanidb_host, CONF.redis4bigchanidb_port) delete_re = re.delete(MAIL_TO_KEY) logger.debug(delete_re) except Exception as e: logger.error('clear redis key ' + MAIL_TO_KEY + 'error:--') logger.error(e)
def _get_watcherlab_count_info(): """ 获取态势感知数据统计信息 :return: """ top = 5000 _result = [] try: es = Elasticsearch(LOCAL_CONFIG.es_server_ip_port) body = { "query": {}, "size": top, "sort": { "_script": { "script": "Math.random()", "type": "number", "order": "asc" } } } result = es.search(index='watcherlab*', body=body, ignore_unavailable=True) # 从es中读取 _results = result['hits']['hits'] locations = set() if result['hits']['total']: for hit in _results: location = hit['_source']['location'].split('-')[0] locations.add(location) for locate in locations: body = { "query": { "bool": { "must": { "match_phrase": { "location": "[" + locate + "]" } } } } } result = es.search(index='watcherlab*', body=body, ignore_unavailable=True) _count = result['hits']['total'] _r = {} _r['name'] = locate _r['value'] = str(_count) _result.append(_r) except Exception as e: logger.error(e) logger.error(traceback.format_exc()) finally: logger.debug(_result) return _result
def update_user(user): logger.debug(vars(user)) if isinstance(user, User) and user.id: try: UserInfo.objects.filter(id=user.id).update( **user.writable_fields()) return Status(True) except Exception as e: logger.error(e) return Status(False)
def read_update(self, content): """ 读取更新的日志记录的IP信息,放入发送IP和写DB的两个队列 :param dif_line_num: 更新的日志行数 :return: None """ i = 0 # command = 'tail -n ' + str(dif_line_num) + ' ' + self.ip_log_path # (status, output) = subprocess.getstatusoutput(command) output = content.split('\n') logLength = len(output) for i in range(0, logLength): line = output[i] head_match = self.head.findall(line) if head_match and i + 1 < logLength: tmp_ip = output[i + 1] tmp_ip = tmp_ip.split() self.get_ip(tmp_ip) source_match = self.intranet_ip.findall(self.source_ip) des_match = self.intranet_ip.findall(self.des_ip) if source_match or des_match: assert isinstance(logger, object) logger.error(self.source_ip + " " + self.des_ip + "是内网IP地址,无法解析") else: self.get_dimension_info() while i + 1 < logLength: temp_head = i + 1 temp_line = output[temp_head] type_match = self.type.findall(temp_line) end_match = self.end.findall(temp_line) if type_match and temp_head + 1 < logLength and output[ temp_head + 1].find("[file \"/") != -1: cont = output[temp_head + 1].split() self.get_attack_type(cont) logger.info(self.attack_type) if end_match: self.attack_type = "http-defense" logger.debug("no file so http") if self.attack_type != 0: break else: i += 1 self.ip_send_list.append( IPContent(self.source, self.source_dimension, self.des, self.des_dimension, self.attack_type, self.attack_time).__dict__) self.attack_type = 0 #保存 攻击ip 地址到数据库 save_attack_ip(self.ip_send_list)
def insert_row_nocommit(cursor, insert_data): if len(insert_data) == 0: # none data to insert logger.debug("None data to insert.") return [] pmkey = [] # store inserted row id for table, data in insert_data.items(): # fetch table name and data list for one_script in data: # fetch a row, keys coresponding to column name sql = build_insertsql(table, one_script) pmkey.append( sql_execute_keyreturn(None, cursor, sql, one_script.values()) ) # execute INSERT SQL, values of dict as the arguments return pmkey # return rows' primary keys list
def drop_tables(cursor, tables_list): if len(tables_list) == 0: # none table to drop logger.error("None table to drop.") return -1 try: for table in tables_list: # fetch one table a time cursor.execute("DROP TABLE %s" % table) logger.debug("SQL Result: %s", cursor.fetchone()) except MySQLdb.Error as e: logger.error("Error %d: %s" % (e.args[0], e.args[1])) return 1
def _init_tables(self): """ 初始化表格 octa_waf_host_status: 初次使用此模块的时候,安装zmq client的客户端主机,配置在配置文件中, 需要同步到数据库中 初始化表格 octa_global_setting: 默认全部都是打开状态 :return: """ conn, cursor = mysql_base_api.sql_init(LOCAL_CONFIG.mysql_host, LOCAL_CONFIG.mysql_user, LOCAL_CONFIG.mysql_pass, LOCAL_CONFIG.mysql_database, LOCAL_CONFIG.mysql_port) hosts = LOCAL_CONFIG.client_audit_hosts for host in hosts: _ip = host['ip'] _name = host['name'] sql = "select count(id) as count from octa_waf_host_status WHERE ip = %s " count = mysql_base_api.sql_execute(conn, cursor, sql, [_ip]) _int_count = count[0]['count'] if _int_count == 0: mysql_base_api.sql_execute( conn, cursor, "insert into octa_waf_host_status(ip, name, http, web, " "dataTrack, errorCheck, dos, whole) VALUES (%s, %s, 'on', " "'on', 'on', 'on', 'on', 'on')", [_ip, _name]) logger.debug( "the ip :%s ,the name %s insert to table: octa_waf_host_status successfully", _ip, _name) waf_rule_param_keys = [ 'http', 'web', 'dataTrack', 'errorCheck', 'dos', 'all' ] for key in waf_rule_param_keys: count = mysql_base_api.sql_execute( conn, cursor, "select count(id) as count from octa_global_setting WHERE param = %s", [key]) _int_count = count[0]['count'] if _int_count == 0: mysql_base_api.sql_execute( conn, cursor, "insert into octa_global_setting (param,value) VALUES (%s,'on')", [key]) logger.debug( "the key :%s not exist, insert to table:octa_global_setting successfully", key) # 关闭数据库 mysql_base_api.sql_close(conn, cursor)
def get_detail(cls): """ 获取扫描信息的接口 :return: 所有类型的扫描信息 """ _len = len(cls.detail_queue) if _len > 0: files = cls.detail_queue.copy() cls.detail_queue.clear() logger.debug("Fetch %s clamav scan messages." % _len) return json.dumps(FileScanDetail(file=files).__dict__) logger.debug('No clamav scan message to fetch.') # 队列中暂时没有信息 return json.dumps(FileScanDetail().__dict__)
def manual_train(arg, ttype="normal"): """ 手动训练 :param arg: 训练数据 :param ttype: normal表示普通训练,later表示者增量训练,status,表示查询当前任务状态 :return: 调用结果 """ try: logger.debug(arg) with NisaRpcClient() as rpc_client: result = rpc_client.train(arg, ttype) return result except Exception as e: logger.error(e) return dict()
def db_restore(restore_info): db_user = restore_info["user"] db_passwd = restore_info["password"] backup_file = restore_info["file"] db_list = restore_info["dblist"] if len(db_list) == 0: # None database to restore logger.debug("None database to resotre.") return elif len(db_list) == 1: # Only 1 database to restore cmd = "mysql -u %s --password=%s %s < %s" % (db_user, db_passwd, db_list[0], backup_file) else: # multi databases to restore cmd = "mysql -u %s --password=%s < %s" % (db_user, db_passwd, backup_file) os.system(cmd)
def app_pass_check(cursor, userinfo): table = userinfo["table"] user = userinfo["username"] passwd = userinfo["password"] sql = "SELECT password FROM %s WHERE username='******'" % (table, user) pass_result = sql_execute(None, cursor, sql, None) # get hash value of password in database if len(pass_result) > 0: pass_hash = pass_result[0]["password"] return pbkdf2_sha256.verify( passwd, pass_hash) # return password matched or not logger.debug("username: %s not exists.", user) return False
def switch(request): """ 停用或者启用用户,1表示启用 :return: """ user_id = request.POST.get('user_id') is_active = request.POST.get('is_active') # 1表示要启用,0表示要停用 logger.debug(is_active) try: user_obj = ChainUser.objects.filter(id=user_id) user_obj.update(is_active=is_active) return HttpResponse(json.dumps({'code': 200, 'results': 'success'})) except Exception as e: logger.error(e) return HttpResponse(json.dumps({'code': 201, 'results': '操作失败'}))
def delete_login_session(request): """ 删除登录用户session :param request: :return: """ if COOKIE_NAME not in request.COOKIES: # 检查是不是有cookie return '' key = request.COOKIES.get(COOKIE_NAME) try: red.delete("tamper_proof_session:" + key) logger.debug('remove key %s successfully' % key) except Exception as e: logger.error(e)
def get_model(user_id): """ 获取用户画像的模型 :param user_id: 用户ID :return: 模型 """ try: if user_id: with NisaRpcClient() as rpc_client: result = rpc_client.getmodel(user_id) logger.debug(result) return result except Exception as e: logger.error(e) return dict()
def db_backup(backup_info): db_user = backup_info["user"] db_passwd = backup_info["password"] backup_file = backup_info["file"] db_list = backup_info["dblist"] if len(db_list) == 0: logger.debug("None db to backup.") return -1 db_join = ' '.join(db_list) # build database list as "db1 db2 ..." logger.debug(db_join) # use mysqldump to backup databases. use "--password" to assign password without inputting while mysqldump cmd = "mysqldump -u %s --password=%s --databases %s > %s" % ( db_user, db_passwd, db_join, backup_file) os.system(cmd) return 1
def _upload_photo(request): """ 上传头像 :param request: :return: """ from app_fuzhou.api.handlers.fileupload import handle_upload photo_file = request.FILES.get("file") if not photo_file: return file_type = photo_file.name.split(".")[-1] if file_type in LEGAL_PHOTO_TYPE and photo_file.size < MAX_PHOTO_SIZE: # # 判断上传大小限制及文件类型 filename = handle_upload(photo_file, CHAIN_USER_PHOTO_DIR) if filename: logger.debug(filename) return os.path.join(MEDIA_URL, CHAIN_USER_PHOTO_DIR, filename)
def insert_row(conn, cursor, insert_data): if len(insert_data) == 0: # none data to insert logger.debug("None data to insert.") return [] pmkey = [] # store inserted row id for table, data in insert_data.items(): # fetch table name and data list for one_script in data: # fetch a row, keys coresponding to column name # qmarks = ','.join(["%s"] * len(one_script)) # build VALUES sub string: "%s, %s, ..." # cols = ','.join(one_script.keys()) # build COLUMNS sub string: "col1, col2, ..." # sql = "INSERT INTO %s (%s) VALUES (%s)" % (table, cols, qmarks) # build orginal INSERT SQL with table, columns and %s sql = build_insertsql(table, one_script) pmkey.append( sql_execute_keyreturn(None, cursor, sql, one_script.values()) ) # execute INSERT SQL, values of dict as the arguments conn.commit() return pmkey # return rows' primary keys list
def get_peak_trans_count(request): """ 获取峰值交易数 1,查询最新数据last交易时间 last_time 2,查询最开始数据交易时间 first_time,在查询时,由于最开始部署区块链有几条测试数据, 这部分数据一般就几条,并且严重影响最终结果,所以跳过这部分数据,类似于数据清洗功能 3,依据二八定律计算峰值,我们的区块链跑的数据比较稳定,所以定律因子根据这个特色定为1.5 :return: """ client = MongoClient(CONFIG.bdb_host, CONFIG.mongo_port) return_key = 'count' skip = 50 try: db = client.get_database('octachain') collection = db.get_collection('bigchain') first_time = 0 last_time = 1 _first_trans = collection.find().sort([('block.timestamp', ASCENDING) ]).skip(skip).limit(1) for _item in _first_trans: first_time = int(_item['block']['timestamp']) _last_trans = collection.find().sort([('block.timestamp', DESCENDING) ]).limit(1) for _item in _last_trans: last_time = int(_item['block']['timestamp']) count = collection.find().count() if count <= skip: # 说明没有什么交易 return HttpResponse(json.dumps({return_key: 0})) count -= skip logger.debug(count) logger.debug(last_time - first_time) res = round(count / (last_time - first_time) * FACTOR, 2) client.close() return HttpResponse(json.dumps({return_key: res})) except Exception as e: client.close() logger.error(e) return HttpResponse(json.dumps({return_key: 0}))
def insert_many(cursor, insert_data): if len(insert_data) == 0: # none data to insert logger.debug("None data to insert.") return [] args = [] pmkey = [] # sql= "" for table, data in insert_data.items(): # fetch table name and data list if len(data) == 0: return [] else: sql = build_insertsql(table, data[0]) for one_script in data: # fetch a row, keys coresponding to column name args.append(one_script.values()) # build arguments list pmkey.append(sql_executemany(None, cursor, sql, args)) return pmkey # return rows' primary keys list
def stop_s_block(ips): """ 关闭高级阻断 :param ips: 调用的一组主机IP :return: """ try: result = None confirm_items = BlackboxHost.objects.filter(hostip__in=ips) for item in confirm_items: msg = StopSeniorBlock(item.hostname, item.hostip) with BlackBoxRPCClient(BLACKBOX_RPC_IP, BLACKB0X_RPC_PORT) as client: result = client.command(json.dumps(vars(msg))) logger.debug(result) logger.info("Invoking RPC to stop senior block.") return result except Exception as e: logger.error(e)