def EngineRegister(args): # if the engine mode is conflicting if args.engine_thread and args.engine_gevent: outputscreen.error( "Cannot use Multi-Threaded mode and Coroutine mode at the same time" ) outputscreen.error( 'Use [-eT] to set Multi-Threaded mode or [-eG] to set Coroutine mode' ) sys.exit() # else if engine mode is Multi-Threaded mode elif args.engine_thread: conf.engine_mode = "multi_threaded" # set threads num if args.thread_num > 200 or args.thread_num < 1: msg = '[*] Invalid input in [-t](range: 1 to 200), has changed to default(30)' outputscreen.warning(msg) conf.thread_num = 30 return conf.thread_num = args.thread_num # else if engine mode is Coroutine mode else: conf.engine_mode = 'coroutine'
def EngineRegister(args): # init threads num if args.thread_num > 100 or args.thread_num < 1: msg = '[*] Invalid input in [-t](range: 1 to 100), has changed to default(10)' outputscreen.warning(msg) conf.thread_num = 10 return conf.thread_num = args.thread_num
def _get_option(section, option): try: cf = ConfigParser() cf.read(paths.CONFIG_PATH) return cf.get(section=section, option=option) except: outputscreen.warning( 'Missing essential options, please check your config-file.') return ''
def manual_login(self): msg = '[*] Please input your ZoomEye Email and Password below.' outputscreen.warning(msg) self.username = input('[*] ZoomEye Username(Email): ').strip() self.password = input('[*] ZoomEye Password: '******'[-] Error ZoomEye username or password.' outputscreen.error(msg) sys.exit()
def login(self): msg = '[+] Trying to login with credentials in config file: %s.' % paths.CONFIG_PATH outputscreen.success(msg) self.api_key = ConfigFileParser().shodan_apikey() if not self.api_key: msg = '[*] Automatic authorization failed.' outputscreen.warning(msg) msg = '[*] Please input your Shodan API Key (https://account.shodan.io/).' outputscreen.warning(msg) self.api_key = input('[*] API KEY > ').strip()
def EngineRegister(args): """ 加载并发引擎模块 """ conf.engine_mode = 'coroutine' #设置线程数 if args.thread_num > 200 or args.thread_num < 1: msg = '[*] Invalid input in [-t](range: 1 to 200), has changed to default(30)' outputscreen.warning(msg) conf.thread_num = 30 return conf.thread_num = args.thread_num
def auto_login(self): msg = '[+] Trying to login with credentials in config file: %s.' % paths.CONFIG_PATH outputscreen.success(msg) try: self.username = ConfigFileParser().ZoomEyeEmail() self.password = ConfigFileParser().ZoomEyePassword() except: pass if bool(self.username and self.password): if self.get_token(): return msg = '[*] Automatic authorization failed.' outputscreen.warning(msg) self.manual_login()
def TargetRegister(args): """ 加载目标模块 """ msg = '[*] Initialize targets...' outputscreen.warning(msg) #初始化目标队列 conf.target = queue.Queue() # 用户输入入队 if args.target_input: # 尝试解析目标地址 try: lists = parseTarget(args.target_input) except: helpmsg = "Invalid input in [-i], Example: -i [http://]target.com or 192.168.1.1[/24] or 192.168.1.1-192.168.1.100" outputscreen.error(helpmsg) sys.exit() # 判断处理量 if (len(lists)) > 100000: warnmsg = "[*] Loading %d targets, Maybe it's too much, continue? [y/N]" % ( len(lists)) outputscreen.warning(warnmsg) flag = input() if flag in ('Y', 'y', 'yes', 'YES', 'Yes'): pass else: msg = '[-] User quit!' outputscreen.warning(msg) sys.exit() msg = '[+] Load targets from: %s' % args.target_input outputscreen.success(msg) # save to conf for target in lists: conf.target.put(target) conf.target_nums = conf.target.qsize() # 文件读入入队 elif args.target_file: if not os.path.isfile(args.target_file): msg = '[-] TargetFile not found: %s' % args.target_file outputscreen.error(msg) sys.exit() msg = '[+] Load targets from: %s' % args.target_file outputscreen.success(msg) with open(args.target_file, 'r', encoding='utf-8') as f: targets = f.readlines() for target in targets: target = target.strip('\n') parsed_target = parseTarget(target) for i in parsed_target: conf.target.put(i) conf.target_nums = conf.target.qsize() #验证目标数量 if conf.target.qsize() == 0: errormsg = msg = '[!] No targets found.Please load targets with [-i|-iF]' outputscreen.error(errormsg) sys.exit()
def handle_google(query, limit, offset=0): key = ConfigFileParser().google_developer_key() engine = ConfigFileParser().google_engine() if not key or not engine: msg = "[-] Please config your 'developer_key' and 'search_enging' at saucerfram.conf" outputscreen.error(msg) sys.exit() try: service = build("customsearch", "v1", http=_initHttpClient(), developerKey=key) result_info = service.cse().list(q=query, cx=engine).execute() msg = '[+] Max query results: %s' % str( result_info.get('searchInformation', {}).get('totalResults')) outputscreen.success(msg) ans = set() limit += offset for i in range(int(offset / 10), int((limit + 10 - 1) / 10)): result = service.cse().list(q=query, cx=engine, num=10, start=i * 10 + 1).execute() if 'items' in result: for url in result.get('items'): ans.add(url.get('link')) for t in ans: conf.target.put(t) except SocketError: outputscreen.error( '[-] Unable to connect Google, maybe agent/proxy error.') sys.exit() except ServerHttpDenied as e: outputscreen.warning( '[-] It seems like Google-Server denied this request.') outputscreen.error(e) sys.exit()
def handle_fofa(query, limit, offset=0): try: msg = '[+] Trying to login with credentials in config file: %s.' % paths.CONFIG_PATH outputscreen.success(msg) email = ConfigFileParser().fofa_email() key = ConfigFileParser().fofa_key() if check(email, key): pass else: raise # will go to except block except: msg = '[*] Automatic authorization failed.' outputscreen.warning(msg) msg = '[*] Please input your FoFa Email and API Key below.' outputscreen.warning(msg) email = input("[*] Fofa Email: ").strip() key = input('[*] Fofa API Key: ').strip() if not check(email, key): msg = '[-] Fofa API authorization failed, Please re-run it and enter a valid key.' outputscreen.error(msg) sys.exit() query = base64.b64encode(query) request = "https://fofa.so/api/v1/search/all?email={0}&key={1}&qbase64={2}".format( email, key, query) try: response = requests.get(request) resp = response.readlines()[0] resp = json.loads(resp) if resp["error"] is None: for item in resp.get('results'): cong.target.append(item[0]) if resp.get('size') >= 100: outputscreen.warning( "{0} items found! just 100 returned....".format( resp.get('size'))) except Exception as e: outputscreen.error(e) sys.exit()
def TargetRegister(args): msg = '[*] Initialize targets...' outputscreen.warning(msg) # init target queue conf.target = queue.Queue() # single target to queue if args.target_single: msg = '[+] Load target : %s' % args.target_single outputscreen.success(msg) conf.target.put(args.target_single) # file target to queue elif args.target_file: if not os.path.isfile(args.target_file): msg = '[-] TargetFile not found: %s' % args.target_file outputscreen.error(msg) sys.exit() msg = '[+] Load targets from : %s' % args.target_file outputscreen.success(msg) with open(args.target_file, 'r', encoding='utf8') as f: targets = f.readlines() for target in targets: conf.target.put(target.strip('\n')) # range of ip target to queue .e.g. 192.168.1.1-192.168.1.100 elif args.target_range: try: lists = gen_ip(args.target_range) if (len(lists)) > 100000: warnmsg = "[*] Loading %d targets, Maybe it's too much, continue? [y/N]" % ( len(lists)) outputscreen.warning(warnmsg) flag = input() if flag in ('Y', 'y', 'yes', 'YES', 'Yes'): pass else: msg = '[-] User quit!' outputscreen.warning(msg) sys.exit() msg = '[+] Load targets from : %s' % args.target_range outputscreen.success(msg) # save to conf for target in lists: conf.target.put(target) except: helpmsg = "Invalid input in [-iR], Example: -iR 192.168.1.1-192.168.1.100" outputscreen.error(helpmsg) sys.exit() # ip/mask e.g. 192.168.1.2/24 elif args.target_network: try: # get 192.168.1.2 -->192.168.1.0 ip_format = args.target_network.split('/') ip_str = IP(ip_format[0]).strBin() ip_str = ip_str[0:int(ip_format[1])] + '0' * (32 - int(ip_format[1])) ip = "%s.%s.%s.%s" % ( str(int(ip_str[0:8], 2)), str(int(ip_str[8:16], 2)), str(int(ip_str[16:24], 2)), str(int(ip_str[24:32], 2))) ip_range = IP('%s/%s' % (ip, ip_format[1])) msg = '[+] Load targets from : %s' % args.target_network outputscreen.success(msg) for i in ip_range: conf.target.put(i) except: msg = "[-] Invalid input in [-iN], Example: -iN 192.168.1.0/24" outputscreen.error(msg) sys.exit() else: # set search limit of api if args.api_limit <= 0: errormsg = 'Invalid input in [-limit] (can not be negative number)' outputscreen.error(errormsg) sys.exit() elif args.api_limit > 100000: warnmsg = "Loading %d targets, Maybe it's too much, continue? [y/N]" % ( len(lists)) outputscreen.warning(warnmsg) flag = input() if flag in ('Y', 'y', 'yes', 'YES', 'Yes'): pass else: msg = 'User quit!' outputscreen.warning(msg) sys.exit() conf.limit = args.api_limit # set search offset of api conf.offset = args.api_offset if args.zoomeye_dork: # verify search_type for zoomeye if args.search_type not in ['web', 'host']: msg = '[-] Invalid value in [--search-type], show usage with [-h]' outputscreen.error(msg) sys.exit() conf.search_type = args.search_type handle_zoomeye(query=args.zoomeye_dork, limit=conf.limit, type=conf.search_type, offset=conf.offset) elif args.fofa_dork: handle_fofa(query=args.fofa_dork, limit=conf.limit, offset=conf.offset) elif args.shodan_dork: handle_shodan(query=args.shodan_dork, limit=conf.limit, offset=conf.offset) elif args.google_dork: conf.google_proxy = args.google_proxy handle_google(query=args.google_dork, limit=conf.limit, offset=conf.offset) # verify targets number if conf.target.qsize() == 0: errormsg = msg = 'No targets found\nPlease load targets with [-iU|-iF|-iR|-iN] or use API with [-aZ|-aS|-aG|-aF]' outputscreen.error(errormsg) sys.exit()
def bruter(url): ''' @description: 扫描插件入口函数 @param {url:目标} @return: ''' #url初始化 conf.parsed_url = urllib.parse.urlparse(url) #填补协议 if conf.parsed_url.scheme != 'http' and conf.parsed_url.scheme != 'https': url = 'http://' + url conf.parsed_url = urllib.parse.urlparse(url) #全局target的url,给crawl、fuzz模块使用。XXX:要放在填补url之前,否则fuzz模式会出现这样的问题:https://target.com/phpinfo.{dir}/ conf.url = url #填补url后的/ if not url.endswith('/'): url = url + '/' #打印当前target msg = '[+] Current target: {}'.format(url) outputscreen.success('\r' + msg + ' ' * (th.console_width - len(msg) + 1)) #自动识别404-预先获取404页面特征 if conf.auto_check_404_page: outputscreen.warning("[*] Launching auto check 404") # Autodiscriminator (probably deprecated by future diagnostic subsystem) i = Inspector(url) (result, notfound_type) = i.check_this() if notfound_type == Inspector.TEST404_MD5 or notfound_type == Inspector.TEST404_OK: conf.autodiscriminator_md5.add(result) #加载payloads payloads.all_payloads = scanModeHandler() #FIXME:设置后缀名。当前以拼接方式实现,遍历一遍payload。 try: if conf.file_extension: outputscreen.warning('[+] Use file extentsion: {}'.format( conf.file_extension)) for i in range(len(payloads.all_payloads)): payloads.all_payloads[i] += conf.file_extension except: outputscreen.error('[+] plz check extension!') sys.exit() #debug模式,打印所有payload,并退出 if conf.debug: outputscreen.blue('[+] all payloads:{}'.format(payloads.all_payloads)) sys.exit() #payload入队task队列 for payload in payloads.all_payloads: #FIXME:添加fuzz模式时,引入的url_payload构造判断 if conf.fuzz_mode: url_payload = conf.parsed_url.scheme + '://' + conf.parsed_url.netloc + payload else: url_payload = url + payload #print(url_payload) #payload入队,等待处理 tasks.all_task.put(url_payload) #设置进度条长度,若是递归模式或爬虫模式,则不设置任务队列长度,即无法显示进度,仅显示耗时 if not conf.recursive_scan: #NOTE:这里取所有payloads的长度*target数量计算任务总数,修复issue#2 tasks.task_length = len(payloads.all_payloads) * conf.target_nums bar.log.start(tasks.task_length) #FIXME:循环任务数不能一次性取完所有的task,暂时采用每次执行30个任务。这样写还能解决hub.LoopExit的bug while not tasks.all_task.empty(): all_task = [gevent.spawn(boss) for i in range(conf.request_limit)] gevent.joinall(all_task)
def scanModeHandler(): ''' @description: 扫描模式处理,加载payloads @param {type} @return: ''' if conf.recursive_scan: msg = '[*] Use recursive scan: Yes' outputscreen.warning('\r' + msg + ' ' * (th.console_width - len(msg) + 1)) else: msg = '[*] Use recursive scan: No' outputscreen.warning('\r' + msg + ' ' * (th.console_width - len(msg) + 1)) payloadlists = [] # fuzz模式处理,只能单独加载 if conf.fuzz_mode: outputscreen.warning('[*] Use fuzz mode') if conf.fuzz_mode == 1: return generateSingleFuzzDict(conf.fuzz_mode_load_single_dict) if conf.fuzz_mode == 2: return generateMultFuzzDict(conf.fuzz_mode_load_mult_dict) # 其他模式处理,可同时加载 else: if conf.dict_mode: outputscreen.warning('[*] Use dict mode') if conf.dict_mode == 1: payloadlists.extend( loadSingleDict(conf.dict_mode_load_single_dict)) elif conf.dict_mode == 2: payloadlists.extend(loadMultDict( conf.dict_mode_load_mult_dict)) else: outputscreen.error("[-] You must select a dict") sys.exit() if conf.blast_mode: outputscreen.warning('[*] Use blast mode') outputscreen.warning('[*] Use char set: {}'.format( conf.blast_mode_custom_charset)) outputscreen.warning('[*] Use paylaod min length: {}'.format( conf.blast_mode_min)) outputscreen.warning('[*] Use paylaod max length: {}'.format( conf.blast_mode_max)) payloadlists.extend(generateBlastDict()) #TODO:递归爬取url if conf.crawl_mode: outputscreen.warning('[*] Use crawl mode') #自定义header headers = {} if conf.request_headers: try: for header in conf.request_headers.split(','): k, v = header.split('=') #print(k,v) headers[k] = v except Exception as e: outputscreen.error( "[x] Check personalized headers format: header=value,header=value.\n[x] error:{}" .format(e)) sys.exit() #自定义ua if conf.request_header_ua: headers['User-Agent'] = conf.request_header_ua #自定义cookie if conf.request_header_cookie: headers['Cookie'] = conf.request_header_cookie try: response = requests.get(conf.url, headers=headers, timeout=conf.request_timeout, verify=False, allow_redirects=conf.redirection_302, proxies=conf.proxy_server) except requests.exceptions.ConnectionError as e: outputscreen.error( "[x] Crawler network connection error!plz check whether the target is accessible" ) sys.exit() #获取页面url if response.status_code in conf.response_status_code: try: contentDecode = response.content.decode('utf-8') except UnicodeDecodeError: try: contentDecode = response.content.decode('gbk') except: outputscreen.error( "[x] Unrecognized page coding errors") html = etree.HTML(contentDecode) #加载自定义xpath用于解析html urls = html.xpath(conf.crawl_mode_parse_html) for url in urls: #去除相似url if urlSimilarCheck(url): #判断:1.是否同域名 2.netloc是否为空(值空时为同域)。若满足1或2,则添加到temp payload if (urllib.parse.urlparse(url).netloc == urllib.parse.urlparse(conf.url).netloc ) or urllib.parse.urlparse(url).netloc == '': payloads.crawl_mode_dynamic_fuzz_temp_dict.add(url) payloads.crawl_mode_dynamic_fuzz_temp_dict = payloads.crawl_mode_dynamic_fuzz_temp_dict - { '#', '' } if conf.crawl_mode_dynamic_fuzz: #加载动态fuzz后缀,TODO:独立动态生成字典模块 loadSuffix( os.path.join(paths.DATA_PATH, conf.crawl_mode_dynamic_fuzz_suffix)) #生成新爬虫动态字典 for i in payloads.crawl_mode_dynamic_fuzz_temp_dict: payloads.crawl_mode_dynamic_fuzz_dict.extend( generateCrawlDict(i)) for i in payloads.crawl_mode_dynamic_fuzz_temp_dict: payloads.crawl_mode_dynamic_fuzz_dict.append( urllib.parse.urlparse(i).path) payloadlists.extend(set(payloads.crawl_mode_dynamic_fuzz_dict)) else: for i in payloads.crawl_mode_dynamic_fuzz_temp_dict: payloads.crawl_mode_dynamic_fuzz_dict.append( urllib.parse.urlparse(i).path) payloadlists.extend(set(payloads.crawl_mode_dynamic_fuzz_dict)) if payloadlists: return payloadlists else: outputscreen.error( "[-] You have to select at least one mode , plz check mode config") sys.exit()
""" Copyright (c) saucerman (https://saucer-man.com) See the file 'LICENSE' for copying permission """ import sys import json import base64 from lib.utils.config import ConfigFileParser from lib.core.common import outputscreen from lib.core.data import paths, conf try: import requests except: outputscreen.error("[-] Can't import requests") outputscreen.warning("[*] Try pip install requests") sys.exit() # verify email and key def check(email, key): if email and key: auth_url = "https://fofa.so/api/v1/info/my?email={0}&key={1}".format( email, key) try: response = requests.get(auth_url) if response.code == 200: return True except Exception as e: # logger.error(e) return False
def TargetRegister(args): """ 加载目标模块 """ msg = '[*] Initialize targets...' outputscreen.warning(msg) #初始化目标队列 conf.target = queue.Queue() #单目标入队 if args.target_single: msg = '[+] Load target: %s' % args.target_single outputscreen.success(msg) conf.target.put(args.target_single) #多目标入队 elif args.target_file: if not os.path.isfile(args.target_file): msg = '[-] TargetFile not found: %s' % args.target_file outputscreen.error(msg) sys.exit() msg = '[+] Load targets from: %s' % args.target_file outputscreen.success(msg) with open(args.target_file, 'r', encoding='utf8') as f: targets = f.readlines() for target in targets: conf.target.put(target.strip('\n')) #ip范围目标入队.e.g. 192.168.1.1-192.168.1.100 elif args.target_range: try: lists = genIP(args.target_range) if (len(lists)) > 100000: warnmsg = "[*] Loading %d targets, Maybe it's too much, continue? [y/N]" % ( len(lists)) outputscreen.warning(warnmsg) flag = input() if flag in ('Y', 'y', 'yes', 'YES', 'Yes'): pass else: msg = '[-] User quit!' outputscreen.warning(msg) sys.exit() msg = '[+] Load targets from: %s' % args.target_range outputscreen.success(msg) # save to conf for target in lists: conf.target.put(target) except: helpmsg = "Invalid input in [-iR], Example: -iR 192.168.1.1-192.168.1.100" outputscreen.error(helpmsg) sys.exit() # ip/mask e.g. 192.168.1.2/24 elif args.target_network: try: # get 192.168.1.2 -->192.168.1.0 ip_format = args.target_network.split('/') ip_str = IP(ip_format[0]).strBin() ip_str = ip_str[0:int(ip_format[1])] + '0' * (32 - int(ip_format[1])) ip = "%s.%s.%s.%s" % ( str(int(ip_str[0:8], 2)), str(int(ip_str[8:16], 2)), str(int(ip_str[16:24], 2)), str(int(ip_str[24:32], 2))) ip_range = IP('%s/%s' % (ip, ip_format[1])) msg = '[+] Load targets from: %s' % args.target_network outputscreen.success(msg) for i in ip_range: conf.target.put(i) except: msg = "[-] Invalid input in [-iN], Example: -iN 192.168.1.0/24" outputscreen.error(msg) sys.exit() #验证目标数量 if conf.target.qsize() == 0: errormsg = msg = 'No targets found\nPlease load targets with [-iU|-iF|-iR|-iN] or use API with [-aZ|-aS|-aG|-aF]' outputscreen.error(errormsg) sys.exit()
def ScanModeHandler(): ''' @description: 选择扫描模式,加载payloads,一次只能加载一个模式,TODO:可一次运行多个模式 @param {type} @return: ''' if conf.recursive_scan: outputscreen.warning('[*] Use recursive scan: Yes') else: outputscreen.warning('[*] Use recursive scan: No') if conf.dict_mode: outputscreen.warning('[*] Use dict mode') if conf.dict_mode == 1: return loadSingleDict(conf.dict_mode_load_single_dict) elif conf.dict_mode == 2: return loadMultDict(conf.dict_mode_load_mult_dict) else: outputscreen.error("[-] You must select a dict") sys.exit() elif conf.blast_mode: outputscreen.warning('[*] Use blast mode') outputscreen.warning('[*] Use char set: {}'.format( conf.blast_mode_custom_charset)) outputscreen.warning('[*] Use paylaod min length: {}'.format( conf.blast_mode_min)) outputscreen.warning('[*] Use paylaod max length: {}'.format( conf.blast_mode_max)) return generateBlastDict() #TODO:递归爬取url elif conf.crawl_mode: outputscreen.warning('[*] Use crawl mode') headers = {} headers[ "User-Agent"] = "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36" response = requests.get(conf.url, headers=headers, timeout=5) if response.status_code in conf.response_status_code: html = etree.HTML(response.text) #加载自定义xpath用于解析html urls = html.xpath(conf.crawl_mode_parse_html) for url in urls: #去除相似url if urlSimilarCheck(url): #判断:1.是否同域名 2.netloc是否为空(值空时为同域).。若满足1和2,则添加到temp payload if (urllib.parse.urlparse(url).netloc == urllib.parse.urlparse(conf.url).netloc ) or urllib.parse.urlparse(url).netloc == '': payloads.crawl_mode_dynamic_fuzz_temp_dict.add(url) payloads.crawl_mode_dynamic_fuzz_temp_dict = payloads.crawl_mode_dynamic_fuzz_temp_dict - { '#', '' } #加载后缀,TODO:独立动态生成字典模块 #这里的路径考虑单独做一个配置文件 loadSuffix(os.path.join(paths.DATA_PATH, 'crawl_mode_suffix.txt')) #生成新url for i in payloads.crawl_mode_dynamic_fuzz_temp_dict: payloads.crawl_mode_dynamic_fuzz_dict.extend(generateCrawlDict(i)) return payloads.crawl_mode_dynamic_fuzz_dict elif conf.fuzz_mode: outputscreen.warning('[*] Use fuzz mode') if conf.fuzz_mode == 1: return generateSingleFuzzDict(conf.fuzz_mode_load_single_dict) if conf.fuzz_mode == 2: return generateMultFuzzDict(conf.fuzz_mode_load_mult_dict) else: outputscreen.error("[-] You must select a scan mode") sys.exit()
See the file 'LICENSE' for copying permission """ import sys from lib.core.common import outputscreen from lib.core.enums import PROXY_TYPE from lib.utils.config import ConfigFileParser from lib.core.data import conf from httplib2 import Http, ProxyInfo from socket import error as SocketError try: from googleapiclient.discovery import build from googleapiclient.errors import HttpError as ServerHttpDenied except: outputscreen.error("[-] Can't import googleapiclient") outputscreen.warning("[*] Try pip install google-api-python-client") sys.exit() def _initHttpClient(): if conf.google_proxy: proxy_str = conf.google_proxy elif ConfigFileParser().google_proxy(): proxy_str = ConfigFileParser().google_proxy() else: proxy_str = None if not proxy_str: return Http() msg = 'Proxy: %s' % proxy_str
# -*- coding: utf-8 -*- """ Copyright (c) saucerman (https://xiaogeng.top) See the file 'LICENSE' for copying permission """ import sys from lib.core.data import paths, conf from lib.core.common import outputscreen from lib.utils.config import ConfigFileParser try: import shodan from shodan.exception import APIError except: outputscreen.error("[-] Can't import shodan") outputscreen.warning("[*] Try pip install shodan") sys.exit() class ShodanBase: def __init__(self, query, limit, offset): self.query = query self.limit = limit self.offset = offset self.api_key = None def login(self): msg = '[+] Trying to login with credentials in config file: %s.' % paths.CONFIG_PATH outputscreen.success(msg) self.api_key = ConfigFileParser().shodan_apikey()
def bruter(url): ''' @description: 扫描插件入口函数 @param {url:目标} @return: ''' #全局url,给crawl、fuzz模块使用。FIXME conf.url = url #url初始化 conf.parsed_url = urllib.parse.urlparse(url) #填补协议 if conf.parsed_url.scheme != 'http' and conf.parsed_url.scheme != 'https': url = 'http://' + url conf.parsed_url = urllib.parse.urlparse(url) #填补url后的/ if not url.endswith('/'): url = url + '/' #加载payloads #添加payloads是否加载成功判断 payloads.all_payloads = ScanModeHandler() if payloads.all_payloads == None: outputscreen.error('[x] load payloads error!') if conf.dict_mode: outputscreen.error('[x] plz check dict mode config!') if conf.blast_mode: outputscreen.error('[x] plz check blast mode config!') if conf.crawl_mode: outputscreen.error('[x] plz check crawl mode config!') if conf.fuzz_mode: outputscreen.error('[x] plz check fuzz mode config!') sys.exit() #FIXME:设置后缀名。当前以拼接方式实现,遍历一遍payload。 try: if conf.file_extension: outputscreen.warning('[+] Use file extentsion: {}'.format( conf.file_extension)) for i in range(len(payloads.all_payloads)): payloads.all_payloads[i] += conf.file_extension except: outputscreen.error('[+] plz check extension!') #debug模式,打印所有payload,并退出 if conf.debug: outputscreen.blue('[+] all payloads:{}'.format(payloads.all_payloads)) sys.exit() #payload入队task队列 for payload in payloads.all_payloads: #FIXME:添加fuzz模式时,引入的url_payload构造判断 if conf.fuzz_mode: url_payload = conf.parsed_url.scheme + '://' + conf.parsed_url.netloc + payload else: url_payload = url + payload #payload入队,等待处理 tasks.all_task.put(url_payload) #设置进度条长度,若是递归模式,则不设置任务队列长度,即无法显示进度,仅显示耗时 if not conf.recursive_scan: tasks.task_length = tasks.all_task.qsize() bar.log.start(tasks.task_length) #FIXME:循环任务数不能一次性取完所有的task,暂时采用每次执行30个任务。这样写还能解决hub.LoopExit的bug while not tasks.all_task.empty(): all_task = [gevent.spawn(boss) for i in range(conf.request_limit)] gevent.joinall(all_task)
def bruter(url): ''' @description: 扫描插件入口函数 @param {url:目标} @return: ''' #全局target的url,给crawl、fuzz模块使用。FIXME conf.url = url #url初始化 conf.parsed_url = urllib.parse.urlparse(url) #填补协议 if conf.parsed_url.scheme != 'http' and conf.parsed_url.scheme != 'https': url = 'http://' + url conf.parsed_url = urllib.parse.urlparse(url) #填补url后的/ if not url.endswith('/'): url = url + '/' #自动识别404-预先获取404页面特征 if conf.auto_check_404_page: outputscreen.warning("[*] Launching auto check 404") # Autodiscriminator (probably deprecated by future diagnostic subsystem) i = Inspector(url) (result, notfound_type) = i.check_this() if notfound_type == Inspector.TEST404_URL: conf.autodiscriminator_location = result outputscreen.success("[+] 404 ---> 302 ----> {}".format( conf.autodiscriminator_location)) elif notfound_type == Inspector.TEST404_MD5: conf.autodiscriminator_md5 = result outputscreen.success("[+] 404 ---> PAGE_MD5 ----> {}".format( conf.autodiscriminator_md5)) #加载payloads #添加payloads是否加载成功判断 payloads.all_payloads = scanModeHandler() if payloads.all_payloads == None: outputscreen.error('[x] load payloads error!') if conf.dict_mode: outputscreen.error('[x] plz check dict mode config!') if conf.blast_mode: outputscreen.error('[x] plz check blast mode config!') if conf.crawl_mode: outputscreen.error('[x] plz check crawl mode config!') if conf.fuzz_mode: outputscreen.error('[x] plz check fuzz mode config!') sys.exit() #FIXME:设置后缀名。当前以拼接方式实现,遍历一遍payload。 try: if conf.file_extension: outputscreen.warning('[+] Use file extentsion: {}'.format( conf.file_extension)) for i in range(len(payloads.all_payloads)): payloads.all_payloads[i] += conf.file_extension except: outputscreen.error('[+] plz check extension!') #debug模式,打印所有payload,并退出 if conf.debug: outputscreen.blue('[+] all payloads:{}'.format(payloads.all_payloads)) sys.exit() #payload入队task队列 for payload in payloads.all_payloads: #FIXME:添加fuzz模式时,引入的url_payload构造判断 if conf.fuzz_mode: url_payload = conf.parsed_url.scheme + '://' + conf.parsed_url.netloc + payload else: url_payload = url + payload #payload入队,等待处理 tasks.all_task.put(url_payload) #设置进度条长度,若是递归模式,则不设置任务队列长度,即无法显示进度,仅显示耗时 if not conf.recursive_scan: tasks.task_length = tasks.all_task.qsize() bar.log.start(tasks.task_length) #FIXME:循环任务数不能一次性取完所有的task,暂时采用每次执行30个任务。这样写还能解决hub.LoopExit的bug while not tasks.all_task.empty(): all_task = [gevent.spawn(boss) for i in range(conf.request_limit)] gevent.joinall(all_task)