def process_vulnerability(self, request, module_name, module_info, scan_result): id = request.id url = request.url raw_request = request.raw poc_details = module_info["poc"] vul_info = module_info["vul"] vulnerability_name = poc_details["Name"] vulnerability_product = vul_info["Product"] vulnerability_severity = vul_info["Severity"].strip().upper() vulnerability_extra_info = "" try: vulnerability_extra_info = scan_result["Ret"] except Exception: pass self.redis_record(id, vulnerability_severity, vulnerability_name, url, vulnerability_product, vulnerability_extra_info, module_name) if self.output_csv: self.csv_record([ vulnerability_severity, vulnerability_name, url, vulnerability_product, vulnerability_extra_info, raw_request, module_name ]) msg = 'Disclose a vulnerability %s at %s' % (vulnerability_name, url) logger.success(msg)
def precheck_list(): # remove items that are not in "request" def _existance_check(listname): list_len = redis.conn.llen(listname) if list_len > 0: for i in range(0, list_len, 1): request_id = redis.conn.lindex(listname, i) if redis.retrieve_request(request_id) is None: redis.conn.lrem(listname, 0, request_id) _existance_check("waiting") _existance_check("running") # pre-process the running list # move items from "running" to "waiting" if the item is beyond the tasks limitation index threshold = scanner.task_manager.tasks_limitation running_list_len = redis.conn.llen("running") if running_list_len == 0: return for request_id in redis.conn.lrange("running", 0, threshold - 1): request = redis.retrieve_request(request_id) request_obj = Request(request) new_task = Task(request_obj) scanner.task_manager.add(new_task, False) if running_list_len > threshold: for request_id in redis.conn.lrange("running", threshold, running_list_len): redis.conn.lpush("waiting", request_id) redis.conn.ltrim("running", 0, threshold - 1) logger.success("Finish Pre-check.")
def update_config(conf, path): with open(path, 'w') as con: content = json.dumps(conf).replace("{", "{\n").replace("}", "\n}").replace(", ", ",\n").replace("'", '"') con.write(content) logger.success("Update the configuration %s"%path) load_config() return
def result_parser(): parser = ResultParser() logger.success("Initialize the Result Parser.") while common.scanner_status: if poc_result.queue.qsize() > 0: result = poc_result.queue.get(timeout=1.0) parser.parse(result) else: time.sleep(2)
def get_request(self): _request_id = self.conn.blpop("waiting", 10) if _request_id and _request_id[0] == b"waiting": request_id = _request_id[1] else: return None result = self.retrieve_request(request_id) logger.success("Retrieve one request from 'waiting'.") return result
def redis_connection_check(): try: redis.build_connection() except Exception as e: msg = "Fail to build connection with Redis. Please modify the configure file, check the redis status and " \ "restart. " logger.error(msg) return False else: logger.success("Build connection with redis") return True
def __init__(self): if not common.scanner_status: logger.warning( "Controller Initialization Stopped due to halted scanning status." ) self.run = bool # Just use arbitrary function to replace 'run' return None info_msg = 'Initialize controller...' logger.success(info_msg) scanner.task_manager = TaskManager() th.queue = Queue.Queue() poc_result.queue = Queue.Queue() self.precheck_list()
def add(self, task, modify_redis): if task.id in self.tasks.keys(): logger.info("Duplicate task received:%s" % task.id) return False else: while self.running_task_num >= self.tasks_limitation: sleep(1) logger.success("Add new task into the TaskManager %s" % task.id) self.tasks[task.id] = task task.scan() self.running_task_num += 1 # move request id from waiting to running if modify_redis: redis.run_task(task.id)
def request_producer(): logger.success("Initialize the Request Producer.") while common.scanner_status: # get task from redis request = redis.get_request() if request is None: sleep(1) continue # parse request from task request_obj = Request(request) # filter if filter_request(request_obj) == "filtered": redis.delete_request(request_obj.id) continue # task new_task = Task(request_obj) scanner.task_manager.add(new_task, True)
def _load_module(_name, _path): msg = 'Load custom script: %s at %s' % (_name, _path) logger.success(msg) try: spec = imp.find_spec(_name, [_path]) module = imp.module_from_spec(spec) spec.loader.exec_module(module) module_check(module) scanner.module_obj.append(module) except ImportError as e: error_msg = "Fail to import [%s.py] at %s\n%s" \ % (_name, _path, '[Error Msg]: ' + str(e)) sys.exit(logger.error(error_msg)) except AttributeError as e: error_msg = "Fail to find [%s.py] at %s\n%s" \ % (_name, _path, '[Error Msg]: ' + str(e)) sys.exit(logger.error(error_msg))
def scan(): while common.scanner_status: if th.queue.qsize() > 0: task = th.queue.get(timeout=1.0) else: gevent.sleep(1) continue try: # POC在执行时报错如果不被处理,线程框架会停止并退出 module, request = task[0], task[1] module_info = module.poc_info module_name = module.__name__ logger.info("Start poc: %s at %s" % (module_name, request.url)) scan_result = module.poc(request) logger.success("Finish poc: %s at %s" % (module_name, request.url)) poc_result.queue.put( [request, module_name, module_info, scan_result]) except Exception as e: th.errmsg = traceback.format_exc() logger.error(str(e))
def get(self): if "restore" in self.request.arguments: try: with open(DEFAULT_CONF_FILE, 'r') as handler: default_configuration = json.loads(handler.read()) update_config(default_configuration, CHECK_CONF_FILE) except Exception as e: logger.error("Fail to restore the default configuration.%s" % str(e)) update_config(common.conf, CHECK_CONF_FILE) else: common.conf = default_configuration logger.success("Restored default configuration.") scan_methods = {"GET": "", "POST": "", "DELETE": "", "PUT": ""} options = common.conf["scan_methods"].split(",") for m in options: if m.upper() in scan_methods: scan_methods[m] = "checked" return self.render("config.html", config=common.conf, scan_methods=scan_methods)
def web_init(): # 这里做的就是 把web服务建立起来 define("port", default=int(common.conf["port"]), type=int) define("address", default=common.conf["ip"]) tornado.options.parse_command_line() path = lambda root, *a: os.path.join(root, *a) # WEB 应用设置 settings = {} settings['static_path'] = path(RUNNING_PATH, "web", "static") settings['template_loader'] = tornado.template.Loader(path(RUNNING_PATH, "web", "templates")) settings['login_url'] = "/login" settings['debug'] = False site.addsitedir(path(RUNNING_PATH, 'handlers')) # 开启WEB服务 app = make_app(settings) app.listen(port=options.port, address=options.address) logger.success("Web app start at: http://%s:%s" % (options.address, options.port)) # Tornado WEB服务开始工作 tornado.ioloop.IOLoop.current().start() logging.getLogger("tornado.application").disabled = True logging.getLogger("tornado.general").disabled = True logging.getLogger("tornado").disabled = True
def get(self): stat = secure.clear(self.get_argument("stat")) if stat == "False": common.scanner_status = False logger.success("Stop the scanner.") else: common.scanner_status = True # start scanning logger.success("Start the scanner.") thread = threading.Thread(target=create_scanner, args=()) thread.setDaemon(True) thread.start() logger.success("Start the scanner.") return self.write(out.jump("/scan_config"))
def scan(self): for module in scanner.module_obj: th.queue.put([module, self.request]) logger.success("Adding new scan for %s" % self.url)
def init_engine(): th.threads_num = int(common.conf["threads_num"]) th.scan_count = th.found_count = 0 th.start_time = time.time() msg = 'Initialize the Engine.' logger.success(msg)