def bruter(url): ''' @description: 扫描插件入口函数 @param {url:目标} @return: ''' #url初始化 conf.parsed_url = urllib.parse.urlparse(url) #填补协议 if conf.parsed_url.scheme != 'http' and conf.parsed_url.scheme != 'https': url = 'http://' + url conf.parsed_url = urllib.parse.urlparse(url) #全局target的url,给crawl、fuzz模块使用。XXX:要放在填补url之前,否则fuzz模式会出现这样的问题:https://target.com/phpinfo.{dir}/ conf.url = url #填补url后的/ if not url.endswith('/'): url = url + '/' #打印当前target msg = '[+] Current target: {}'.format(url) outputscreen.success('\r' + msg + ' ' * (th.console_width - len(msg) + 1)) #自动识别404-预先获取404页面特征 if conf.auto_check_404_page: outputscreen.warning("[*] Launching auto check 404") # Autodiscriminator (probably deprecated by future diagnostic subsystem) i = Inspector(url) (result, notfound_type) = i.check_this() if notfound_type == Inspector.TEST404_MD5 or notfound_type == Inspector.TEST404_OK: conf.autodiscriminator_md5.add(result) #加载payloads payloads.all_payloads = scanModeHandler() #FIXME:设置后缀名。当前以拼接方式实现,遍历一遍payload。 try: if conf.file_extension: outputscreen.warning('[+] Use file extentsion: {}'.format( conf.file_extension)) for i in range(len(payloads.all_payloads)): payloads.all_payloads[i] += conf.file_extension except: outputscreen.error('[+] plz check extension!') sys.exit() #debug模式,打印所有payload,并退出 if conf.debug: outputscreen.blue('[+] all payloads:{}'.format(payloads.all_payloads)) sys.exit() #payload入队task队列 for payload in payloads.all_payloads: #FIXME:添加fuzz模式时,引入的url_payload构造判断 if conf.fuzz_mode: url_payload = conf.parsed_url.scheme + '://' + conf.parsed_url.netloc + payload else: url_payload = url + payload #print(url_payload) #payload入队,等待处理 tasks.all_task.put(url_payload) #设置进度条长度,若是递归模式或爬虫模式,则不设置任务队列长度,即无法显示进度,仅显示耗时 if not conf.recursive_scan: #NOTE:这里取所有payloads的长度*target数量计算任务总数,修复issue#2 tasks.task_length = len(payloads.all_payloads) * conf.target_nums bar.log.start(tasks.task_length) #FIXME:循环任务数不能一次性取完所有的task,暂时采用每次执行30个任务。这样写还能解决hub.LoopExit的bug while not tasks.all_task.empty(): all_task = [gevent.spawn(boss) for i in range(conf.request_limit)] gevent.joinall(all_task)
def bruter(url): ''' @description: 扫描插件入口函数 @param {url:目标} @return: ''' #全局target的url,给crawl、fuzz模块使用。FIXME conf.url = url #url初始化 conf.parsed_url = urllib.parse.urlparse(url) #填补协议 if conf.parsed_url.scheme != 'http' and conf.parsed_url.scheme != 'https': url = 'http://' + url conf.parsed_url = urllib.parse.urlparse(url) #填补url后的/ if not url.endswith('/'): url = url + '/' #自动识别404-预先获取404页面特征 if conf.auto_check_404_page: outputscreen.warning("[*] Launching auto check 404") # Autodiscriminator (probably deprecated by future diagnostic subsystem) i = Inspector(url) (result, notfound_type) = i.check_this() if notfound_type == Inspector.TEST404_URL: conf.autodiscriminator_location = result outputscreen.success("[+] 404 ---> 302 ----> {}".format( conf.autodiscriminator_location)) elif notfound_type == Inspector.TEST404_MD5: conf.autodiscriminator_md5 = result outputscreen.success("[+] 404 ---> PAGE_MD5 ----> {}".format( conf.autodiscriminator_md5)) #加载payloads #添加payloads是否加载成功判断 payloads.all_payloads = scanModeHandler() if payloads.all_payloads == None: outputscreen.error('[x] load payloads error!') if conf.dict_mode: outputscreen.error('[x] plz check dict mode config!') if conf.blast_mode: outputscreen.error('[x] plz check blast mode config!') if conf.crawl_mode: outputscreen.error('[x] plz check crawl mode config!') if conf.fuzz_mode: outputscreen.error('[x] plz check fuzz mode config!') sys.exit() #FIXME:设置后缀名。当前以拼接方式实现,遍历一遍payload。 try: if conf.file_extension: outputscreen.warning('[+] Use file extentsion: {}'.format( conf.file_extension)) for i in range(len(payloads.all_payloads)): payloads.all_payloads[i] += conf.file_extension except: outputscreen.error('[+] plz check extension!') #debug模式,打印所有payload,并退出 if conf.debug: outputscreen.blue('[+] all payloads:{}'.format(payloads.all_payloads)) sys.exit() #payload入队task队列 for payload in payloads.all_payloads: #FIXME:添加fuzz模式时,引入的url_payload构造判断 if conf.fuzz_mode: url_payload = conf.parsed_url.scheme + '://' + conf.parsed_url.netloc + payload else: url_payload = url + payload #payload入队,等待处理 tasks.all_task.put(url_payload) #设置进度条长度,若是递归模式,则不设置任务队列长度,即无法显示进度,仅显示耗时 if not conf.recursive_scan: tasks.task_length = tasks.all_task.qsize() bar.log.start(tasks.task_length) #FIXME:循环任务数不能一次性取完所有的task,暂时采用每次执行30个任务。这样写还能解决hub.LoopExit的bug while not tasks.all_task.empty(): all_task = [gevent.spawn(boss) for i in range(conf.request_limit)] gevent.joinall(all_task)