def wappalyzer(self, target, verbose=False): """ All verified subdomains are scanned with Wappalyzer to find out the technology stack used in each of them. Once wappalyzer is run, it prints out all verified domains """ print("\n" + self.Y + "[i] Verified and Analyzed Subdomains: \n") wappalyzer = Wappalyzer.latest() # Tech stack db which contains the tech stack of all the sub domains collection = self.dbname['tech_stack'] collection.create_index('domain', unique=True) count = self.dbname.collection.count() for url in self.verified_domains: try: webpage = WebPage.new_from_url('http://' + url, verify=False) tech_stack = wappalyzer.analyze(webpage) if tech_stack and verbose: print(self.G + "[i] URL: " + url) print(self.B + "[i] Wappalyzer: " + str(list(tech_stack)) + "\n") # Push the above data to DB data = {"id": count+1, "domain": url, "time": datetime.now()} data["parent"] = target data['tech_stack'] = list(tech_stack) dataid = collection.insert(data) count += 1 except Exception as e: continue return
def test_new_from_url(): HTTPretty.register_uri(HTTPretty.GET, 'http://example.com/', body='snerble') webpage = WebPage.new_from_url('http://example.com/') assert webpage.html == 'snerble'
def test_new_from_url(self): HTTPretty.register_uri(HTTPretty.GET, 'http://example.com/', body='snerble') webpage = WebPage.new_from_url('http://example.com/') self.assertEquals(webpage.html, 'snerble')
def test_new_from_url(self): HTTPretty.register_uri(HTTPretty.GET, 'http://example.com/', body='snerble') webpage = WebPage.new_from_url('http://example.com/') self.assertEquals(webpage.html, 'snerble')
def run_wappalyze(self, domain): webpage = WebPage.new_from_url(domain) analyze_result = analyzer.analyze(webpage) if analyze_result: for result in analyze_result: log.console_log(result) else: log.console_log("Result Not Found")
def extract_used_techs(url): # webpage = urlopen(url, 60) # content = webpage.read().decode('utf-8') # headers = dict(webpage.getheaders()) # webpage = WebPage(url, html=content, headers=headers)#.new_from_url(url, verify=False) webpage = WebPage.new_from_url(url, verify=False) wappalyzer = Wappalyzer.latest() return wappalyzer.analyze_with_versions_and_categories(webpage)
def analyze(uplist): results = [] for host in goodlist: wappalyzer = Wappalyzer.latest() webpage = WebPage.new_from_url('https://' + host) data = wappalyzer.analyze_with_versions_and_categories(webpage) results.append(data) return results
def run(self): parser = argparse.ArgumentParser() parser.add_argument('-t', action = "store", #stored dest = "target", help = "for example: ./wp.py -t site.com") args = parser.parse_args() self.site = args.target if self.site is None: sys.exit('Url is empty') if self.site: print '\n--------------------------------------------' print("# Determining target {}".format(self.site)) print '--------------------------------------------' try: wappalyzer = Wappalyzer.latest() if self.site.startswith('http://') == False: self.site = ''.join(('http://',self.site)) webpage = WebPage.new_from_url(self.site) analyze = wappalyzer.analyze(webpage) for components in analyze: print("> {}".format(components)) print '\n--------------------------------------------' except requests.exceptions.Timeout: print("Warning: warning website is unreachable") print '--------------------------------------------' except requests.exceptions.ConnectionError: print 'Name or service not known' print '--------------------------------------------' except KeyboardInterrupt: print("Why man ?")
def main(): wap = Wappalyzer.latest() try: web = WebPage.new_from_url("https://www.example.com") tec = wap.analyze(web) for t in tec: print("Detectada: {}".format(t)) except: print("Error 404")
def test_pass_request_params(): try: webpage = WebPage.new_from_url('http://example.com/', timeout=0.00001) assert False #"Shoud have triggered TimeoutError" except requests.exceptions.ConnectTimeout: assert True except: assert False #"Shoud have triggered TimeoutError"
def main(): wap = Wappalyzer.latest() try: web = WebPage.new_from_url("https://www.botlabco.ga/") tecnologias = wap.analyze(web) for t in tecnologias: print("Tecnologia detectada: {}".format(t)) except: print("Ha ocurrido un error")
def main(): wap = Wappalyzer.latest() try: # web = WebPage.new_from_url("https://www.example.com") web = WebPage.new_from_url("https://200code.tech") tecnologias = wap.analyze(web) for t in tecnologias: print("Tecnologia detectada: {}".format(t)) except: print("Ha ocurrido un error")
def wappalyzeit(domain): wappalyzer = Wappalyzer.latest() webpage = WebPage.new_from_url(domain) set1 = wappalyzer.analyze(webpage) if set1: print "[+] Third party libraries in Use:" for s in set1: print s else: print "\t\t\t[-] Nothing found. Make sure domain name is passed properly"
def wappalyzeit(domain): wappalyzer = Wappalyzer.latest() webpage = WebPage.new_from_url(domain) set1 = wappalyzer.analyze(webpage) if set1: print "[+] Third party libraries in Use:" for s in set1: print s else: print "\t\t\t[-] Nothing found. Make sure domain name is passed properly"
def main(): wap = Wappalyzer.latest() try: web = WebPage.new_from_url('https://www.example.com') tecnologias = wap.analyze(web) #este es un tipo de dato de lista for t in tecnologias: print(f'Tecnología detectada: {t}') except: print('Ha ocurrido un error!')
def main(): wap = Wappalyzer.latest() try: web = WebPage.new_from_url( "https://curso--python-0-pruebas.000webhostapp.com/") tecnologias = wap.analyze(web) for t in tecnologias: print("Tecnologia detectada: {}".format(t)) except: print("Ha ocurrido un error")
def main(): if parser.page: wap = Wappalyzer.latest() try: web = WebPage.new_from_url(URL) tecnologias = wap.analyze(web) for t in tecnologias: print('Tecnologia detectada: {}'.format(t)) except: print('Ha ocurrido un error')
def useWappalyzer(url): try: wappalyzer = Wappalyzer.latest() webpage = WebPage.new_from_url(url) webprints = wappalyzer.analyze(webpage) if len(webprints) > 0: return list(webprints) else: return {} except Exception as e: print(e)
def main(): if parser.target: wap = Wappalyzer.latest() try: web = WebPage.new_from_url(parser.target) tecnologias = wap.analyze(web) for tecnologia in tecnologias: print("Tecnología detectada: {}".format(tecnologia)) except: print("Ha ocurrido un error") else: print("Imposible analizar el objetivo")
def wappalyzeit(domain): print colored(style.BOLD + '---> Wapplyzing web page:\n' + style.END, 'blue') time.sleep(0.3) wappalyzer = Wappalyzer.latest() webpage = WebPage.new_from_url(domain) set1 = wappalyzer.analyze(webpage) if set1: print "[+] Third party libraries in Use:" for s in set1: print s else: print "\t\t\t[-] Nothing found. Make sure domain name is passed properly"
def check(ig, url): if not url.startswith('http'): url = 'http://' + url try: webpage = WebPage.new_from_url(url) tech = wappalyzer.analyze(webpage) print(yellow+"[+] " + str(url) + end + bold + " | " + green + ", ".join(tech) + end) except Exception as e: if ig == 'True': pass else: print(red+"Error: " + end + "[ " + bold + str(url) + end + " ] | " + str(e))
def services(subdomain): found_services = [] # celery will feed result from subdomains scan to this. for subDomain in subdomain[0]: wappalyzer = Wappalyzer.latest() try: webpage = WebPage.new_from_url('http://' + subDomain) found_services.append(list(wappalyzer.analyze(webpage))) except: error_array = ['No Service Detected - Error'] found_services.append(error_array) return found_services
def wappalyzer_detection(): # pretty print the output (set; need to change to dict) target = "" if request.form.get('target'): target = request.form.get('target') req = requests.get('http://' + target) if req.status_code == 200: wappalyzer = Wappalyzer.latest() webpage = WebPage.new_from_url('https://' + target) output = wappalyzer.analyze(webpage) return render_template('detection.html', target=output) else: req = requests.get('https://' + target) wappalyzer = Wappalyzer.latest() webpage = WebPage.new_from_url('https://' + target) output = wappalyzer.analyze(webpage) return render_template('detection.html', target=output) else: return render_template('detection.html')
def wappalyzeit(domain): temp_list = [] time.sleep(0.3) wappalyzer = Wappalyzer.latest() webpage = WebPage.new_from_url(domain) set1 = wappalyzer.analyze(webpage) if set1: for s in set1: temp_list.append("\t%s" % s) return temp_list else: return temp_list
def wappalyzeit(domain): temp_list = [] time.sleep(0.3) wappalyzer = Wappalyzer.latest() webpage = WebPage.new_from_url(domain) set1 = wappalyzer.analyze(webpage) if set1: for s in set1: temp_list.append("\t%s" % s) return temp_list else: return temp_list
def main(): wap = Wappalyzer.latest() try: web = WebPage.new_from_url("https://prod.senasica.gob.mx/sisia/login" ) #Se pone la url a scanear tecno = wap.analyze(web) Categorias = wap.analyze_with_categories(web) for t in tecno: print("Tecnologias Detectadas son: {}".format(t)) for c in Categorias: print("Categorias Detectadas: {}".format(c)) except: print("Ha ocurriod un error")
def main(): wap = Wappalyzer.latest() try: if parser.address: web = WebPage.new_from_url(parser.address) # URL de la web que se va a escanear tecnologias = wap.analyze(web) # Dar un formato a la respuesta en forma de lista for t in tecnologias: cprint('Tecnología detectada:', 'yellow', end=' ') print(t) else: cprint('\nNecesito una dirección web', 'red', end='\n\n') except: cprint('\nHa ocurrido un error', 'red', end='\n\n')
def Tech_used(host_name): output = "\n" try: webpage = WebPage.new_from_url(f'http://{host_name}') wappalyzer = Wappalyzer.latest() details = wappalyzer.analyze_with_categories(webpage) for d in details: for i in details[d]['categories']: output += f"{i} : {d}\n" except Exception as e: output += "Sorry, The website SSL certificate could not be verified!\n" return output #Have to install package python-Wappalyzer
def wappalyzeit(domain): temp_list = [] time.sleep(0.3) wappalyzer = Wappalyzer.latest() webpage = WebPage.new_from_url(domain) set1 = wappalyzer.analyze(webpage) if set1: print "[+] Third party libraries in Use:" for s in set1: temp_list.append("\t%s" % s) print "\t%s" % s return temp_list else: print "\t\t\t[-] Nothing found. Make sure domain name is passed properly" return temp_list
def wappalyzeit(domain): temp_list = [] time.sleep(0.3) wappalyzer = Wappalyzer.latest() webpage = WebPage.new_from_url(domain) set1 = wappalyzer.analyze(webpage) if set1: print "[+] Third party libraries in Use:" for s in set1: temp_list.append("\t%s" % s) print "\t%s" % s return temp_list else: print "\t\t\t[-] Nothing found. Make sure domain name is passed properly" return temp_list
def detect(domain, ig, out): try: webpage = WebPage.new_from_url(domain) services = wappalyzer.analyze(webpage) print("[+] " + str(domain) + " | " + green + bold + " - ".join(services) + end) if out != 'None': with open(out, 'a') as f: f.write(domain + " | " + " - ".join(services) + "\n") f.close() except Exception as e: if ig == 'True': pass else: print(red + "Error: " + end + "[ " + bold + str(domain) + end + " ] > " + str(e))
def wappalyzeit(domain, taskId): try: wappalyzer = Wappalyzer.latest() odomain = "http://%s" % domain webpage = WebPage.new_from_url(odomain) set1 = wappalyzer.analyze(webpage) wap = [] if set1: print "[+] Third party libraries in Use:" for s in set1: wap.append(s) else: print "\t\t\t[-] Nothing found. Make sure domain name is passed properly" save_record(domain, taskId, "WapAlyzer", wap) return wap except: return []
def wappalyzeit(domain, taskId): try: wappalyzer = Wappalyzer.latest() odomain = "http://%s" % domain webpage = WebPage.new_from_url(odomain) set1 = wappalyzer.analyze(webpage) wap = [] if set1: print "[+] Third party libraries in Use:" for s in set1: wap.append(s) else: print "\t\t\t[-] Nothing found. Make sure domain name is passed properly" save_record(domain, taskId, "WapAlyzer", wap) return wap except: return []
def get_alyzer_res(self, level, url: str): """ 这里去获取结果 :param level: :param url: :return: """ # 为url添加头 target = url if not (url.startswith("https://") or url.startswith("http://")): target = "http://" + url # -w是ms单位,即超过那个时间后就不再继续搞了 try: # 只初始化一个wappalyzer wappalyzer = Wappalyzer.latest() webpage = WebPage.new_from_url(target, verify=False) info = wappalyzer.analyze_with_versions_and_categories(webpage) if not isinstance(info, dict) or info.__len__() <= 0: return for k, v in info.items(): name = k versions = v.get("versions", []) categories = v.get("categories", []) if name is None or name == "": continue for i in range(len(categories)): ctname = categories[i] if ctname.lower() == "cms": self._logger.debug("Start CMS ver detection: {}".format(target)) ver = self._recognize_cms_ver(target, name) if ver is not None: version = ver self._logger.debug( "Got cms version: {}:{}".format(name, version) ) com = Component(self.task, level, name) com.category = ctname com.url = target if len(versions) >= i + 1: com.ver = versions[i] yield com except Exception as errs: self._logger.error(f"Wappaylyzer found nothing\nerr:{errs}")
def check(out, ig, url): if not url.startswith('http'): url = 'http://' + url try: webpage = WebPage.new_from_url(url) tech = wappalyzer.analyze(webpage) print("[+] " + str(url) + " | " + green + bold + " - ".join(tech) + end) if out != 'None': with open(out, 'a') as f: f.write(url + " | " + " - ".join(tech) + "\n") f.close() except Exception as e: if ig == 'True': pass else: print(red + "Error: " + end + "[ " + bold + str(url) + end + " ] > " + str(e))
def ifWordpress(salida, url): isWordpress = False if int(salida.count('WORDPRESS')) != 0: isWordpress = True try: if requests.get(os.path.join(url + "/wp-admin"), verify=False, timeout=20).status_code == 200: isWordpress = True wappalyzer = Wappalyzer.latest() webpage = WebPage.new_from_url(url) salida = str(wappalyzer.analyze(webpage)).upper().split(" ") if int(salida.count('WORDPRESS')) != 0 or "U'WORDPRESS'," in salida: isWordpress = True except Exception as e: pass #print(Fore.RED + "ERROR") return isWordpress
import sys from Wappalyzer import Wappalyzer, WebPage url = sys.argv[1] w = Wappalyzer.latest() webpage = WebPage.new_from_url(url) text = w.analyze(webpage) print text
def getCms(url): webpage = WebPage.new_from_url(url) return "&".join(wappalyzer.analyze(webpage))
apps = TextField() # JSON str class Meta: database = db # This model uses the "people.db" database. counter = 0 total = Domain.select().count() for domain in Domain.select().iterator(): counter += 1 if counter % 100 == 0: print counter, '/', total if len(json.loads(domain.apps)) > 0: continue try: webpage = WebPage.new_from_url('http://'+domain.name) except Exception, e: continue domain.apps = json.dumps(list(wappalyzer.analyze(webpage))) domain.save() ######################## create db # db.create_tables([Domain]) # org_data = json.loads(open('../alexa.json').read()) # cats_data = {} # for org_datum in org_data: # domain = org_datum[0] # cat = org_datum[1] # if not cat in cats_data: