def main(): logger.info("run scheduled task: delete expired index") es = ElasticHelper() raw_data_expire = main_config.raw_data_expire delete_map = { ElasticConfig.event_log_write_index_prefix: raw_data_expire["dc_log"], ElasticConfig.traffic_write_index_prefix: raw_data_expire["dc_krb5"], ElasticConfig.krb5_ticket_write_index_prefix: raw_data_expire["krb5_ticket"], ElasticConfig.user_activity_write_index_prefix: raw_data_expire["user_activity"] } for index_prefix, expire in delete_map.items(): ago = get_n_days_ago(expire) date = datetime_to_log_date(ago) index_name = index_prefix + date try: es.delete_index(index_name) logger.info( "delete index {name} successfully.".format(name=index_name)) except NotFoundError: logger.warn("index {name} not found.".format(name=index_name))
def check_mq_connection() -> bool: c = Consumer() if not c.check_connection(): logger.error("Can't connect to the MQ, please reconfirm the settings.") return False logger.info("Connect to the MQ successfully, OK.") return True
def delay_run(self): """ 延迟检测 ** 请单进程运行! ** """ self.load() logger.info("status: delay process running") while True: time.sleep(5) data_list = self._get_delay_data() for data in data_list: alert_code = data["_delay_info"]["alert_code"] # if data["type"] == "krb5": # krb = Kerberos(data) # self._run_analyze(data=krb, data_type=krb.msg_type, modules_map=self.traffic_kerberos_modules_map, # alert_code=alert_code) if data["type"] == "wineventlog": log = Log(data) self._run_analyze(data=log, data_type=log.event_id, modules_map=self.event_log_modules_map, alert_code=alert_code) # 删除完成检测数据 self._clear_confirmed_data(data_list)
def init_sensitive_groups(domain): logger.info("init sensitive groups.") domain = get_netbios_domain(domain) ldap_search = LDAPSearch(domain) redis = RedisHelper() mongo = MongoHelper(uri=MongoConfig.uri, db=MongoConfig.db, collection=MongoConfig.settings_collection) sensitive_groups = [] for item in default_sensitive_groups(domain): if len(item["sid"]) > 0: sensitive_groups.append(item) else: entry = ldap_search.search_by_name(item["name"], attributes=["objectSid"]) if not entry or len( entry.entry_attributes_as_dict["objectSid"]) == 0: continue sid = entry.entry_attributes_as_dict["objectSid"][0] item["sid"] = sid sensitive_groups.append(item) logger.info(",".join(list(map(lambda x: x["name"], sensitive_groups)))) sensitive_entry = mongo.find_one({"name": "sensitive_entry"})["value"] sensitive_entry["group"] = sensitive_groups mongo.update_one({"name": "sensitive_entry"}, {"$set": { "value": sensitive_entry }}, upsert=True) redis.set_str_value("sensitive_entry" + REDIS_KEY_SUFFIX, simplejson.dumps(sensitive_entry))
def _load_module(self, name: str, data_type: str) -> dict: modules_map = {} def _register_module(d_type, m): if d_type not in modules_map: modules_map[d_type] = [m] else: modules_map[d_type].append(m) file_list = get_walk_files(project_dir + "/modules/detect/" + name) for f in file_list: f = f.replace(project_dir, ".") module_path, f = format_module_path(f) module = __import__(module_path, fromlist=[f]) logger.info("loaded module: " + module_path) data_types = getattr(module, data_type) assert isinstance(data_types, list) for d_type in data_types: _register_module( d_type, { "code": getattr(module, "ALERT_CODE") if hasattr( module, "ALERT_CODE") else None, "object": getattr(module, f)() }) return modules_map
def check_mongo_connection() -> bool: mongo = MongoHelper(MongoConfig.uri) if not mongo.check_connection(): logger.error( "Can't connect to the MongoDB, please reconfirm the settings.") return False logger.info("Connect to the MongoDB successfully, OK.") return True
def stop(): logger.info("Stopping the WatchAD detect engine ...") rsp = subprocess.call("supervisorctl -c {root_dir}/supervisor.conf shutdown".format(root_dir=project_dir), shell=True, env={"WATCHAD_ENGINE_DIR": project_dir, "WATCHAD_ENGINE_NUM": "5"}) if rsp == 0: logger.info("Stopped!") else: logger.error("Stop failed.")
def start(): if not check(): sys.exit(-1) logger.info("Starting the WatchAD detect engine ...") rsp = subprocess.call("supervisord -c {root_dir}/supervisor.conf".format(root_dir=project_dir), shell=True, env={"WATCHAD_ENGINE_DIR": project_dir, "WATCHAD_ENGINE_NUM": "5"}) if rsp == 0: logger.info("Started!") else: logger.error("Start failed.")
def start(self): """ 引擎启动主入口 """ self.load() # 启动消费者 c = Consumer() # 注册回调 logger.info("start MQ consumer and register callback func.") logger.info("status: main process running") c.run(self.do_analyze)
def set_learning_end_time_setting(): value = move_n_days(datetime_utc_now_obj(), 10) logger.info("set learning end time: " + str(value)) name = "learning_end_time" redis = RedisHelper() mongo = MongoHelper(uri=MongoConfig.uri, db=MongoConfig.db, collection=MongoConfig.settings_collection) mongo.update_one(filter={"name": name}, doc={"$set": { "value": value }}, upsert=True) key = name + REDIS_KEY_SUFFIX redis.set_str_value(key, datetime_to_common_str(value))
def install(domain, server, user, password): logger.info("Install the WatchAD ...") # 初始化ES索引模板 init_es_template() # 初始化LDAP配置信息 init_ldap_settings(domain, server, user, password) # 获取域控计算机名保存入库 get_all_dc_names(domain) # 初始化其余配置信息 init_default_settings(domain) # 初始化填入敏感用户组 init_sensitive_groups(domain) # 根据当前安装时间,设置数据统计结束时间 set_learning_end_time_setting() # 设置计划任务 set_crontab_tasks()
def init_ldap_settings(domain, server, user, password): logger.info("init the ldap configuration.") if not server.startswith("ldap://"): server = "ldap://" + server mongo = MongoHelper(uri=MongoConfig.uri, db=MongoConfig.db, collection=MongoConfig.settings_collection) query = {"name": "ldap"} doc = { domain: { "server": server, "user": user, "password": password, "dn": get_dn_domain_name(domain) } } mongo.update_one(filter=query, doc={"$set": {"value": doc}}, upsert=True) redis = RedisHelper() redis.set_str_value("ldap" + REDIS_KEY_SUFFIX, simplejson.dumps(doc))
def set_crontab_tasks(): """ 设置定时任务: 1. 定期删除过期的ES索引 2. 定时扫描万能钥匙 """ logger.info("set crontab tasks.") my_user_cron = CronTab(user=True) # 定时扫描万能钥匙 每2分钟一次 skeleton_job = my_user_cron.new( command= '/usr/bin/python3 {project_dir}/scripts/skeleton_key_scan.py >/dev/null 2>&1' .format(project_dir=project_dir)) skeleton_job.minute.every(2) skeleton_job.set_comment("skeleton_job") logger.info("set skeleton_key_scan every 2 min.") # my_user_cron.remove(skeleton_job) # 定时删除过期索引 每天删除 delete_index_job = my_user_cron.new( command= '/usr/bin/python3 {project_dir}/scripts/delete_index.py >/dev/null 2>&1' .format(project_dir=project_dir)) delete_index_job.day.every(1) delete_index_job.hour.on(0) delete_index_job.minute.on(0) delete_index_job.set_comment("delete_index_job") logger.info("set delete_index_job every day.") # my_user_cron.remove(delete_index_job) my_user_cron.write()
def init_default_settings(domain): logger.info("init other settings.") redis = RedisHelper() mongo = MongoHelper(uri=MongoConfig.uri, db=MongoConfig.db, collection=MongoConfig.settings_collection) for name, value in default_settings.items(): if name == "domain_list": value = [domain] mongo.update_one(filter={"name": name}, doc={"$set": { "value": value }}, upsert=True) key = name + REDIS_KEY_SUFFIX if name in [ "domain_list", "VPN_ip_part", "detail_file_share_white_list_setting" ]: if len(value) != 0: redis.set_list(key, *value) elif name in [ "raw_data_expire", "honeypot_account", "alarms_merge", "sensitive_entry", "kerberos" ]: redis.set_str_value(key, simplejson.dumps(value)) elif name in ["brute_force_max"]: redis.set_str_value(key, str(value)) elif isinstance(value, list): if len(value) > 0 and isinstance(value[0], dict): redis.set_str_value(key, simplejson.dumps(value)) else: if len(value) != 0: redis.set_list(key, *value) elif isinstance(value, str): redis.set_str_value(key, value) elif isinstance(value, dict): redis.set_str_value(key, simplejson.dumps(value)) elif isinstance(value, int): redis.set_str_value(key, str(value))
def stop(): logger.info("Stopping the WatchAD detect engine ...") stop_rsp = subprocess.call( "supervisorctl -c {root_dir}/supervisor.conf stop all".format( root_dir=project_dir), shell=True, env={ "WATCHAD_ENGINE_DIR": project_dir, "WATCHAD_ENGINE_NUM": str(ENGINE_PROCESS_NUM) }) if stop_rsp == 0: logger.info("Stopped detection processes.") else: logger.error("Stop failed.") shutdown_rsp = subprocess.call( "supervisorctl -c {root_dir}/supervisor.conf shutdown".format( root_dir=project_dir), shell=True, env={ "WATCHAD_ENGINE_DIR": project_dir, "WATCHAD_ENGINE_NUM": str(ENGINE_PROCESS_NUM) }) if shutdown_rsp == 0: logger.info("Shutdown WatchAD.") else: logger.error("Shutdown WatchAD failed.")
def check_es_template() -> bool: """ 检查ES模板安装状态 """ logger.info("Check the elasticsearch index template.") es = ElasticHelper() for name, temp in template_map.items(): if es.exists_template(name=name): logger.info("template \"{name}\" ---> exist.".format(name=name)) else: logger.info( "template \"{name}\" ---> not exist.".format(name=name)) logger.error("Check the elasticsearch template fail.") return False logger.info("Check the elasticsearch template successfully, OK.") return True
def check() -> bool: logger.info("Checking the WatchAD environment ...") # 检查ES模板安装状态 if not check_es_template(): return False # 检查数据库连接 if not check_mongo_connection(): return False # 检查消息队列连接 if not check_mq_connection(): return False logger.info("OK!") logger.info("Check the WatchAD environment successfully!") return True
def init_es_template(): """ 初始化ES的索引模板 """ logger.info("init the elasticsearch index template.") es = ElasticHelper() for name, temp in template_map.items(): if es.exists_template(name=name): logger.info( "template \"{name}\" already exists, delete it.".format( name=name)) es.delete_template(name=name) logger.info("put template \"{name}\" ...".format(name=name)) es.put_template(name=name, body=temp) logger.debug(es.get_template(name))
def get_all_dc_names(domain: str): """ 将DC列表入库 """ domain = get_netbios_domain(domain) logger.info("Search all domain controllers using LDAP.") dc_name_list = [] ldap_search = LDAPSearch(domain) dc_list = ldap_search.search_domain_controller() for each in dc_list: dc_name = str(each["cn"]) dc_name_list.append(dc_name) mongo = MongoHelper(MongoConfig.uri, MongoConfig.db, MongoConfig.settings_collection) doc = {domain: dc_name_list} logger.info(",".join(dc_name_list)) logger.info( "domain controller count: {count}".format(count=len(dc_name_list))) logger.info("Save all domain controllers to settings.") mongo.update_one({"name": "dc_name_list"}, {"$set": {"value": doc}}, True) redis = RedisHelper() redis.set_str_value("dc_name_list" + REDIS_KEY_SUFFIX, simplejson.dumps(doc))
def load(self): # 加载事件日志检测模块 logger.info("loading detect modules based on event_log") self.event_log_modules_map = self._load_module("event_log", "EVENT_ID")