def poc(target): html = collector.get_domain_info(target, "body") headers = collector.get_domain_info(target, "headers") wappalyzer = Wappalyzer.latest() b = wappalyzer.analyze(html, headers) if b: collector.add_domain_info(target, {"app": list(b)})
def poc(target): html = collector.get_domain_info(target, "body") headers = collector.get_domain_info(target, "headers") # 为了速度,长度超过一百万的就放弃了~ if len(html) > 1000000: html = '' wappalyzer = Wappalyzer.latest() b = wappalyzer.analyze(html, headers) if b: collector.add_domain_info(target, {"app": list(b)})
def poc(arg): html = collector.get_domain_info(arg, "body") if html: m = re.search('password', html, re.I | re.M | re.S) if m: collector.add_domain_bug(arg, {"登录平台发现": arg})
def poc(target): ''' 这个插件的作用是从html或header中分离出有用的数据 :param target: :return: ''' def discern_from_header(name, discern_type, key, reg): if "Server" in headers: result.add("Server:" + headers["Server"]) if "X-Powered-By" in headers: result.add("X-Powered-By:" + headers["X-Powered-By"]) if key in headers and (re.search(reg, headers[key], re.I)): result.add(name) else: pass def discern_from_index(name, discern_type, key, reg): if re.search(reg, html, re.I): result.add(name) else: pass html = collector.get_domain_info(target, "body") headers = collector.get_domain_info(target, "headers") result = set() result_dict = {} if html and headers: mark_list = read_config() for mark_info in mark_list: name, discern_type, key, reg = mark_info if discern_type == 'headers': discern_from_header(name, discern_type, key, reg) elif discern_type == 'index': discern_from_index(name, discern_type, key, reg) for i in result: try: k, *v = i.split(":") v = ' '.join(v) # 'X-Powered-By:Servlet 2.4; JBoss-4.0.3SP1 (build: CVSTag=JBoss_4_0_3_SP1 date=200510231054)/Tomcat-5.5'" result_dict[k] = v except: logger.error("webeye error split:" + repr(i)) collector.add_domain_info(target, result_dict)
def poc(target): ''' 这个插件的作用是从html中获取网站title :param target: :return: ''' html = collector.get_domain_info(target, "body") if html: m = re.search('<title>(.*?)<\/title>', html, re.I | re.M | re.S) if m: collector.add_domain_info(target, {"title": m.group(1).strip()})
def poc(domain): cms = collector.get_domain_info(domain, "CMS") if cms: return False data = read_config() cache = {} header = { "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36"} for k, v in data.items(): for item in v: path = item["path"] _url = domain + path if path not in cache: try: r = requests.head(_url, timeout=10, headers=header) if r.status_code != 200: continue hh = requests.get(_url, headers=header) if hh.status_code != 200: continue content = hh.content cache[path] = content except: continue else: content = cache[path] try: html = content.decode('utf-8', 'ignore') except: html = str(content) option = item["option"] vaild = item["content"] if option == "md5": m = hashlib.md5() m.update(content) if m.hexdigest() == vaild: collector.add_domain_info(domain, {"CMS": k}) return True elif option == "keyword": if vaild in html: collector.add_domain_info(domain, {"CMS": k}) return True