def doPassiveScan(self, baseRequestResponse): if not self.cbPassiveChecks.isSelected(): return None scanIssues = list() analyzedResponse = self.helpers.analyzeResponse( baseRequestResponse.getResponse()) # Filter responses for code 200 and content-type text/html # We are only interested in code 200 html or other status codes if analyzedResponse.getInferredMimeType() != 'HTML': return None req = baseRequestResponse.getRequest().tostring() resp = baseRequestResponse.getResponse().tostring() exchange = {'request': req, 'response': resp} wt = webtech.WebTech() wt_report = wt.start_from_exchange(exchange) if wt_report['tech'] == [] and wt_report['headers'] == []: # nothing detected return None scanIssues.append( WebTechScanIssue(baseRequestResponse, wt_report, self.helpers, self.callbacks)) return scanIssues
def techno(url): print( "[+] Detecting CMS with Identified Technologies and Custom Headers from target url\n" ) print("[+] Target: " + url) obj = webtech.WebTech() results = obj.start_from_url(url, timeout=1) system('tput setaf 9') sys.stdout.write(results)
def techno(url): print( bcolors.green + "[+] Detecting CMS with Identified Technologies and Custom Headers from target url\n" ) print("[+] Target: ", bcolors.red, url, bcolors.lightcyan) obj = webtech.WebTech() results = obj.start_from_url(url, timeout=1) sys.stdout.write(results)
def getDataFromWebTech(url): wt = webtech.WebTech(options={'json': True, 'random-user-agent': True}) try: report = wt.start_from_url(url) return report except: return ("Connection Error")
def web(self): try: x = PrettyTable() x.field_names = ["Name", "Version", "Module"] wt = webtech.WebTech(options={'json': True, 'random-user-agent': True}) report = wt.start_from_url(self.url) for i in range(len(report['tech'])): x.add_row([f"{report['tech'][i]['name']}", f"{report['tech'][i]['version']}", "WebTech"]) for i in range(len(report['headers'])): x.add_row([f"{report['headers'][i]['name']}", f"{report['headers'][i]['value']}", "WebTech"]) print(x) except webtech.utils.ConnectionException: print("Connection error")
def webtech_result(domain): wt = webtech.WebTech(options={'json': True}) report = wt.start_from_url(str(domain)) tech = len(report['tech']) headers = len(report['headers']) print("***** Technologies *****") for i in range(0, tech): print(Fore.GREEN + str(report['tech'][i]['name']) + ":" + str(report['tech'][i]['version'])) print(Style.RESET_ALL) print("******************************************************************") print("***** Headers *****") for i in range(0, headers): print(Fore.BLUE + str(report['headers'][i]['name']) + ":" + str(report['headers'][i]['value'])) print(Style.RESET_ALL)
def get_tech_info(url): obj = webtech.WebTech(options={"json":True}) data =obj.start_from_url(url) # print(data) tech_data = data["tech"] header_data = data["headers"] print("\n[+] Web-Technologies : ") for datas in tech_data: print("\t", "Name:", datas["name"], " \tVersion :", datas["version"]) print("\n[+] Header Info : ") for datas in header_data: print("\t", datas["name"], datas["value"]) print("\n")
def get_tech_info(url): obj = webtech.WebTech(options={"json": True}) data = obj.start_from_url(url) # print(data) tech_data = data["tech"] header_data = data["headers"] print(colors.red + "[+] Technologies : ") print(colors.green + "\n[+] Web-Technologies : ") for datas in tech_data: print( "\t" + colors.yellow + "Name:" + " " + datas["name"] + " " + " \tVersion :", datas["version"]) print(colors.green + "\n[+] Header Info : ") for datas in header_data: print("\t" + colors.yellow + datas["name"] + " " + datas["value"]) print( colors.blue + "\n\n----------------------------------------------------------------\n\n" )
#!/usr/bin/env python3 import webtech # make sure to have the latest db version webtech.database.update_database(force=True) # you can use options, same as from the command line wt = webtech.WebTech(options={'json': True}) # scan a single website report = wt.start_from_url('https://google.com', timeout=1) print(report) # scan multiple websites from a list for site in ['https://shielder.it', 'http://connectionerror']: try: report = wt.start_from_url(site) techs = report['tech'] print("Site: {}".format(site)) for tech in techs: print(" - {}".format(tech)) except webtech.utils.ConnectionException: print("Site unavailable: {}".format(site)) print("Done")
def techno(url): obj = webtech.WebTech() results = obj.start_from_url(url, timeout=1) system('tput setaf 9') sys.stdout.write(results)
def fun(): choice = ("1") banner() while choice != ("12"): menu() choice = input("\033[1;34m[+]\033[1;m \033[1;91mEnter your choice:\033[1;m ") if choice == ("3"): try: target = input("\033[1;91m[+] Enter Domain or IP Address: \033[1;m").lower() os.system("reset") print("\033[34m[~] Searching for Whois Lookup: \033[0m".format(target) + target) time.sleep(1.5) command = ("whois " + target) proces = os.popen(command) results = str(proces.read()) print(results + command) except Exception: pass elif choice == ("2"): try: target = input("\033[1;91m[+] Enter Domain or IP Address: \033[1;m").lower() os.system("reset") print("\033[34m[~] Searching for DNS Lookup: \033[0m".format(target) + target) time.sleep(1.5) command = ("dig " + target + " +trace ANY") proces = os.popen(command) results = str(proces.read()) print(results + command) except Exception: pass elif choice == ("1"): try: os.system("reset") os.system("gnome-terminal -e 'bash -c \"sudo etherape; exec bash\"'") except Exception: pass elif choice == ("4"): try: target = input("\033[1;91m[+] Enter Domain or IP Address: \033[1;m").lower() os.system("reset") print("\033[34m[~] Scanning Nmap Port Scan: \033[0m" + target) print("This will take a moment... Get some coffee 😃 )\n") time.sleep(1.5) scanner = nmap.PortScanner() command = ("nmap -Pn " + target) process = os.popen(command) results = str(process.read()) logPath = "logs/nmap-" + strftime("%Y-%m-%d_%H:%M:%S", gmtime()) print(results + command + logPath) print("\033[34mNmap Version: \033[0m", scanner.nmap_version()) except KeyboardInterrupt: print("\n") print("[-] User Interruption Detected..!") time.sleep(1) elif choice == ("5"): try: target = input("\033[1;91m[+] Enter Domain or IP Address: \033[1;m").lower() os.system("reset") print("\033[34m[~] Scanning HTTP Header Grabber: \033[0m\n" + target) time.sleep(1.5) command = ("http -v " + target) proces = os.popen(command) results = str(proces.read()) print(results + command) except Exception: pass elif choice == ("6"): target = input("\033[1;91m[+] Enter the Domain to test: \033[1;m").lower() os.system("reset") if not (target.startswith("http://") or target.startswith("https://")): target = "http://" + target print("\033[1;34m[~] Testing Clickjacking Test: \033[1;m" + target) time.sleep(2) try: resp = requests.get(target) headers = resp.headers print("\nHeader set are: \n") for item, xfr in headers.items(): print("\033[1;34m" + item + ":" + xfr + "\033[1;m") if "X-Frame-Options" in headers.keys(): print("\n[+] \033[1;34mClick Jacking Header is present\033[1;m") print("[+] \033[1;34mYou can't clickjack this site !\033[1;m\n") else: print("\n[*] \033[1;34mX-Frame-Options-Header is missing ! \033[1;m") print("[!] \033[1;34mClickjacking is possible,this site is vulnerable to Clickjacking\033[1;m\n") except Exception as ex: print("\033[1;34mException caught: " + str(ex)) elif choice == ("7"): try: target = input("\033[1;91m[+] Enter Domain: \033[1;m").lower() os.system("reset") print("\033[34m[~] Scanning Robots.txt Scanner: \033[0m\n" + target) time.sleep(1.5) if not (target.startswith("http://") or target.startswith("https://")): target = "http://" + target robot = target + "/robots.txt" try: bots = urlopen(robot).read().decode("utf-8") print("\033[34m" + (bots) + "\033[1;m") except URLError: print("\033[1;31m[-] Can\'t access to {page}!\033[1;m".format(page=robot)) except Exception as ex: print("\033[1;34mException caught: " + str(ex)) elif choice == ("8"): target = input("\033[1;91m[+] Enter Domain: \033[1;m").lower() if not (target.startswith("http://") or target.startswith("https://")): target = "http://" + target os.system("reset") print("[+] Cloudflare cookie scraper ") time.sleep(1.5) sess = cfscrape.create_scraper() try: print("[+] Target: " + target) request = "GET / HTTP/1.1\r\n" cookie_value, user_agent = cfscrape.get_cookie_string(target) request += "Cookie: %s\r\nUser_Agent: %s\r\n" % (cookie_value, user_agent) data = sess.get(target) out = BeautifulSoup(data.content,'html.parser') print("[+] Print Cookie\n") print(request) os.system('tput setaf 10') print("\n[+] Scraper ") print(out) except ValueError: print('[X] Unable to find Cloudflare cookies. This website does not have Cloudflare IUAM enabled.') elif choice == ("9"): try: target = input("\033[1;91m[+] Enter Domain: \033[1;m").lower() os.system("reset") print("\033[34m[~] Scanning Link Grabber: \033[0m\n" + target) time.sleep(2) if not (target.startswith("http://") or target.startswith("https://")): target = "http://" + target deq = deque([target]) pro = set() try: while len(deq): url = deq.popleft() pro.add(url) parts = urlsplit(url) base = "{0.scheme}://{0.netloc}".format(parts) print("[+] Crawling URL " + "\033[34m" + url + "\033[0m") try: response = requests.get(url) except (requests.exceptions.MissingSchema, requests.exceptions.ConnectionError): continue soup = BeautifulSoup(response.text, "lxml") for anchor in soup.find_all("a"): link = anchor.attrs["href"] if "href" in anchor.attrs else '' if link.startswith("/"): link = base + link if not link in deq and not link in pro: deq.append(link) continue except KeyboardInterrupt: print("\n") print("[-] User Interruption Detected..!") time.sleep(1) print("\n \t\033[34m[!] I like to See Ya, Hacking Anywhere ..!\033[0m\n") except Exception: pass elif choice == ("10"): try: target = input("\033[1;91m[+] Enter Domain or IP Address: \033[1;m").lower() url = ("http://ip-api.com/json/") response = urllib.request.urlopen(url + target) data = response.read() jso = json.loads(data) os.system("reset") print("\033[34m[~] Searching IP Location Finder: \033[0m".format(url) + target) time.sleep(1.5) print("\n [+] \033[34mUrl: " + target + "\033[0m") print(" [+] " + "\033[34m" + "IP: " + jso["query"] + "\033[0m") print(" [+] " + "\033[34m" + "Status: " + jso["status"] + "\033[0m") print(" [+] " + "\033[34m" + "Region: " + jso["regionName"] + "\033[0m") print(" [+] " + "\033[34m" + "Country: " + jso["country"] + "\033[0m") print(" [+] " + "\033[34m" + "City: " + jso["city"] + "\033[0m") print(" [+] " + "\033[34m" + "ISP: " + jso["isp"] + "\033[0m") print(" [+] " + "\033[34m" + "Lat & Lon: " + str(jso['lat']) + " & " + str(jso['lon']) + "\033[0m") print(" [+] " + "\033[34m" + "Zipcode: " + jso["zip"] + "\033[0m") print(" [+] " + "\033[34m" + "TimeZone: " + jso["timezone"] + "\033[0m") print(" [+] " + "\033[34m" + "AS: " + jso["as"] + "\033[0m" + "\n") except URLError: print("\033[1;31m[-] Please provide a valid IP address!\033[1;m") elif choice == ("11"): try: target = input("\033[1;91m[+] Enter Domain: \033[1;m").lower() if not (target.startswith("http://") or target.startswith("https://")): target = "https://" + target os.system("reset") print("\033[34m[~] Detecting CMS with Identified Technologies and Custom Headers from target url: \033[0m") time.sleep(5) command = ("mtr " + "-4 -rwc 1 " + target) obj = webtech.WebTech() results = obj.start_from_url(target, timeout=1) sys.stdout.write(results) except Exception: pass elif choice == ("12"): try: target = input("\033[1;91m[+] Enter Domain or IP Address: \033[1;m").lower() os.system("reset") print("\033[34m[~] Searching for Traceroute \033[0m".format(target) + target) print(">> This will take a moment... Get some coffee << )\n") time.sleep(5) command = ("mtr " + "-4 -rwc 1 " + target) proces = os.popen(command) results = str(proces.read()) print("\033[1;34m" + results + command + "\033[1;m") fun() except KeyError: pass elif choice == ("13"): target = input("\033[1;91m[+] Enter Domain: \033[1;m").lower() os.system("reset") print("\033[34m[~] Start crawler... \033[0m") time.sleep(5) print("[+] Target: " + target) if not (target.startswith("http://") or target.startswith("https://")): target = "http://" + target try: content = get(target).text regex_t = re.compile(r"<title>(.*?)<\/title>") tit = re.findall(regex_t, content) regex_l = re.compile(r"http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+") link = re.findall(regex_l, content) robots = get(target + "/robots.txt").text print("[+] Title: "+ ''.join(tit) + "\n") print("[+] Extract links: \n" + '\n'.join(link) + "\n") print("[+] Robots.txt: \n" + robots) except KeyError: pass elif choice == ("14"): target = input("\033[1;91m[+] Enter Domain: \033[0m") os.system("reset") print("\033[34m[~] Scanning Certificate Transparency log monitor: \033[0m\n" + target) time.sleep(1.5) print("[+] Target: " + target) try: headers = { 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.94 Safari/537.36', } results = requests.get('https://api.certspotter.com/v1/issuances?domain='+target+'&expand=dns_names&expand=issuer&expand=cert | jq ".[].dns_names[]" | sed "s/\"//g" | sed "s/\*\.//g" | sort -u | grep '+target,headers=headers) results = results.text.split('\n') print(*results, sep = "\n") except KeyError: pass elif choice == ("15"): time.sleep(1) print("\n\t\033[34mBlue Eye\033[0m DONE... Exiting... \033[34mLike to See Ya Hacking Anywhere ..!\033[0m\n") sys.exit() else: os.system("reset") print("\033[1;31m[-] Invalid option..! \033[1;m")
#!/usr/bin/env python3 import webtech # you can use options, same as from the command line wt = webtech.WebTech(args=None) # scan a single website report = wt.start_from_url('https://google.com', timeout=1) print(report) # scan multiple websites from a list for site in ['https://example.com', 'http://connectionerror']: try: report = wt.start_from_url(site, timeout=10) print("Site: {}".format(site)) print(report) except webtech.utils.ConnectionException: print("Site unavailable: {}".format(site)) print("Done")