def ScanModeHandler(): ''' @description: 选择扫描模式,加载payloads,一次只能加载一个模式,TODO:可一次运行多个模式 @param {type} @return: ''' if conf.recursive_scan: outputscreen.warning('[*] Use recursive scan: Yes') else: outputscreen.warning('[*] Use recursive scan: No') if conf.dict_mode: outputscreen.warning('[*] Use dict mode') if conf.dict_mode == 1: return loadSingleDict(conf.dict_mode_load_single_dict) elif conf.dict_mode == 2: return loadMultDict(conf.dict_mode_load_mult_dict) else: outputscreen.error("[-] You must select a dict") sys.exit() elif conf.blast_mode: outputscreen.warning('[*] Use blast mode') outputscreen.warning('[*] Use char set: {}'.format( conf.blast_mode_custom_charset)) outputscreen.warning('[*] Use paylaod min length: {}'.format( conf.blast_mode_min)) outputscreen.warning('[*] Use paylaod max length: {}'.format( conf.blast_mode_max)) return generateBlastDict() #TODO:递归爬取url elif conf.crawl_mode: outputscreen.warning('[*] Use crawl mode') headers = {} headers[ "User-Agent"] = "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36" response = requests.get(conf.url, headers=headers, timeout=5) if response.status_code in conf.response_status_code: html = etree.HTML(response.text) #加载自定义xpath用于解析html urls = html.xpath(conf.crawl_mode_parse_html) for url in urls: #去除相似url if urlSimilarCheck(url): #判断:1.是否同域名 2.netloc是否为空(值空时为同域).。若满足1和2,则添加到temp payload if (urllib.parse.urlparse(url).netloc == urllib.parse.urlparse(conf.url).netloc ) or urllib.parse.urlparse(url).netloc == '': payloads.crawl_mode_dynamic_fuzz_temp_dict.add(url) payloads.crawl_mode_dynamic_fuzz_temp_dict = payloads.crawl_mode_dynamic_fuzz_temp_dict - { '#', '' } #加载后缀,TODO:独立动态生成字典模块 #这里的路径考虑单独做一个配置文件 loadSuffix(os.path.join(paths.DATA_PATH, 'crawl_mode_suffix.txt')) #生成新url for i in payloads.crawl_mode_dynamic_fuzz_temp_dict: payloads.crawl_mode_dynamic_fuzz_dict.extend(generateCrawlDict(i)) return payloads.crawl_mode_dynamic_fuzz_dict elif conf.fuzz_mode: outputscreen.warning('[*] Use fuzz mode') if conf.fuzz_mode == 1: return generateSingleFuzzDict(conf.fuzz_mode_load_single_dict) if conf.fuzz_mode == 2: return generateMultFuzzDict(conf.fuzz_mode_load_mult_dict) else: outputscreen.error("[-] You must select a scan mode") sys.exit()
def scanModeHandler(): ''' @description: 扫描模式处理,加载payloads @param {type} @return: ''' if conf.recursive_scan: msg = '[*] Use recursive scan: Yes' outputscreen.warning('\r' + msg + ' ' * (th.console_width - len(msg) + 1)) else: msg = '[*] Use recursive scan: No' outputscreen.warning('\r' + msg + ' ' * (th.console_width - len(msg) + 1)) payloadlists = [] # fuzz模式处理,只能单独加载 if conf.fuzz_mode: outputscreen.warning('[*] Use fuzz mode') if conf.fuzz_mode == 1: return generateSingleFuzzDict(conf.fuzz_mode_load_single_dict) if conf.fuzz_mode == 2: return generateMultFuzzDict(conf.fuzz_mode_load_mult_dict) # 其他模式处理,可同时加载 else: if conf.dict_mode: outputscreen.warning('[*] Use dict mode') if conf.dict_mode == 1: payloadlists.extend( loadSingleDict(conf.dict_mode_load_single_dict)) elif conf.dict_mode == 2: payloadlists.extend(loadMultDict( conf.dict_mode_load_mult_dict)) else: outputscreen.error("[-] You must select a dict") sys.exit() if conf.blast_mode: outputscreen.warning('[*] Use blast mode') outputscreen.warning('[*] Use char set: {}'.format( conf.blast_mode_custom_charset)) outputscreen.warning('[*] Use paylaod min length: {}'.format( conf.blast_mode_min)) outputscreen.warning('[*] Use paylaod max length: {}'.format( conf.blast_mode_max)) payloadlists.extend(generateBlastDict()) #TODO:递归爬取url if conf.crawl_mode: outputscreen.warning('[*] Use crawl mode') #自定义header headers = {} if conf.request_headers: try: for header in conf.request_headers.split(','): k, v = header.split('=') #print(k,v) headers[k] = v except Exception as e: outputscreen.error( "[x] Check personalized headers format: header=value,header=value.\n[x] error:{}" .format(e)) sys.exit() #自定义ua if conf.request_header_ua: headers['User-Agent'] = conf.request_header_ua #自定义cookie if conf.request_header_cookie: headers['Cookie'] = conf.request_header_cookie try: response = requests.get(conf.url, headers=headers, timeout=conf.request_timeout, verify=False, allow_redirects=conf.redirection_302, proxies=conf.proxy_server) except requests.exceptions.ConnectionError as e: outputscreen.error( "[x] Crawler network connection error!plz check whether the target is accessible" ) sys.exit() #获取页面url if response.status_code in conf.response_status_code: try: contentDecode = response.content.decode('utf-8') except UnicodeDecodeError: try: contentDecode = response.content.decode('gbk') except: outputscreen.error( "[x] Unrecognized page coding errors") html = etree.HTML(contentDecode) #加载自定义xpath用于解析html urls = html.xpath(conf.crawl_mode_parse_html) for url in urls: #去除相似url if urlSimilarCheck(url): #判断:1.是否同域名 2.netloc是否为空(值空时为同域)。若满足1或2,则添加到temp payload if (urllib.parse.urlparse(url).netloc == urllib.parse.urlparse(conf.url).netloc ) or urllib.parse.urlparse(url).netloc == '': payloads.crawl_mode_dynamic_fuzz_temp_dict.add(url) payloads.crawl_mode_dynamic_fuzz_temp_dict = payloads.crawl_mode_dynamic_fuzz_temp_dict - { '#', '' } if conf.crawl_mode_dynamic_fuzz: #加载动态fuzz后缀,TODO:独立动态生成字典模块 loadSuffix( os.path.join(paths.DATA_PATH, conf.crawl_mode_dynamic_fuzz_suffix)) #生成新爬虫动态字典 for i in payloads.crawl_mode_dynamic_fuzz_temp_dict: payloads.crawl_mode_dynamic_fuzz_dict.extend( generateCrawlDict(i)) for i in payloads.crawl_mode_dynamic_fuzz_temp_dict: payloads.crawl_mode_dynamic_fuzz_dict.append( urllib.parse.urlparse(i).path) payloadlists.extend(set(payloads.crawl_mode_dynamic_fuzz_dict)) else: for i in payloads.crawl_mode_dynamic_fuzz_temp_dict: payloads.crawl_mode_dynamic_fuzz_dict.append( urllib.parse.urlparse(i).path) payloadlists.extend(set(payloads.crawl_mode_dynamic_fuzz_dict)) if payloadlists: return payloadlists else: outputscreen.error( "[-] You have to select at least one mode , plz check mode config") sys.exit()