def poc(target): html = collector.get_domain_info(target, "body") headers = collector.get_domain_info(target, "headers") wappalyzer = Wappalyzer.latest() b = wappalyzer.analyze(html, headers) if b: collector.add_domain_info(target, {"app": list(b)})
def poc(target): html = collector.get_domain_info(target, "body") headers = collector.get_domain_info(target, "headers") # 为了速度,长度超过一百万的就放弃了~ if len(html) > 1000000: html = '' wappalyzer = Wappalyzer.latest() b = wappalyzer.analyze(html, headers) if b: collector.add_domain_info(target, {"app": list(b)})
def poc(target): ''' 这个插件的作用是从html中获取网站title :param target: :return: ''' html = collector.get_domain_info(target, "body") if html: m = re.search('<title>(.*?)<\/title>', html, re.I | re.M | re.S) if m: collector.add_domain_info(target, {"title": m.group(1).strip()})
def poc(domain): cms = collector.get_domain_info(domain, "CMS") if cms: return False data = read_config() cache = {} header = { "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36"} for k, v in data.items(): for item in v: path = item["path"] _url = domain + path if path not in cache: try: r = requests.head(_url, timeout=10, headers=header) if r.status_code != 200: continue hh = requests.get(_url, headers=header) if hh.status_code != 200: continue content = hh.content cache[path] = content except: continue else: content = cache[path] try: html = content.decode('utf-8', 'ignore') except: html = str(content) option = item["option"] vaild = item["content"] if option == "md5": m = hashlib.md5() m.update(content) if m.hexdigest() == vaild: collector.add_domain_info(domain, {"CMS": k}) return True elif option == "keyword": if vaild in html: collector.add_domain_info(domain, {"CMS": k}) return True
def poc(target): ''' 这个插件的作用是从html或header中分离出有用的数据 :param target: :return: ''' def discern_from_header(name, discern_type, key, reg): if "Server" in headers: result.add("Server:" + headers["Server"]) if "X-Powered-By" in headers: result.add("X-Powered-By:" + headers["X-Powered-By"]) if key in headers and (re.search(reg, headers[key], re.I)): result.add(name) else: pass def discern_from_index(name, discern_type, key, reg): if re.search(reg, html, re.I): result.add(name) else: pass html = collector.get_domain_info(target, "body") headers = collector.get_domain_info(target, "headers") result = set() result_dict = {} if html and headers: mark_list = read_config() for mark_info in mark_list: name, discern_type, key, reg = mark_info if discern_type == 'headers': discern_from_header(name, discern_type, key, reg) elif discern_type == 'index': discern_from_index(name, discern_type, key, reg) for i in result: try: k, *v = i.split(":") v = ' '.join(v) # 'X-Powered-By:Servlet 2.4; JBoss-4.0.3SP1 (build: CVSTag=JBoss_4_0_3_SP1 date=200510231054)/Tomcat-5.5'" result_dict[k] = v except: logger.error("webeye error split:" + repr(i)) collector.add_domain_info(target, result_dict)
def hand_domain(self, serviceType): target = serviceType["target"] logger.info(target) # 添加这条记录 collector.add_domain(target) # 发起请求 try: r = requests.get(target, timeout=30, verify=False, allow_redirects=False) collector.add_domain_info(target, { "headers": r.headers, "body": r.text, "status_code": r.status_code }) except Exception as e: logger.error("request url error:" + str(e)) collector.del_domain(target) return logger.debug("target:{} over,start to scan".format(target)) # Get hostname hostname = urlparse(target).netloc.split(":")[0] if not is_ip_address_format(hostname): try: _ip = socket.gethostbyname(hostname) collector.add_domain_info(target, {"ip": _ip}) except: pass else: collector.add_domain_info(target, {"ip": hostname}) work_list = [ webeye.poc, webtitle.poc, wappalyzer.poc, password_found.poc ] if IS_START_PLUGINS: work_list.append(crossdomain.poc) work_list.append(directory_browse.poc) work_list.append(gitleak.poc) work_list.append(iis_parse.poc) work_list.append(phpinfo.poc) work_list.append(svnleak.poc) work_list.append(tomcat_leak.poc) work_list.append(whatcms.poc) # WorkList.append(bakfile.poc) # 去除备份文件扫描模块,原因:太费时 # th = [] # try: # for func in work_list: # i = threading.Thread(target=func, args=(target,)) # i.start() # th.append(i) # for thi in th: # thi.join() # except Exception as e: # logger.error("domain plugin threading error {}:{}".format(repr(Exception), str(e))) for func in work_list: try: func(target) except Exception as e: logger.error("domain plugin threading error {}:{}".format( repr(Exception), str(e))) logger.debug("target:{} End of scan".format(target)) infos = collector.get_domain(target) _pocs = [] temp = {} if IS_START_PLUGINS and "CMS" in infos: if infos.get("app"): temp["app"] = [] temp["app"].append(infos["CMS"]) else: temp["app"] = [infos["CMS"]] # update domain app collector.add_domain_info(target, temp) if temp.get("app"): keywords = temp["app"] # 远程读取插件 pocs = load_remote_poc() for poc in pocs: for keyword in keywords: if poc["name"] == keyword: webfile = poc["webfile"] logger.debug("load {0} poc:{1} poc_time:{2}".format( poc["type"], webfile, poc["time"])) # 加载插件 code = requests.get(webfile).text obj = load_string_to_module(code, webfile) _pocs.append(obj) # 并发执行插件 if _pocs: executor = futures.ThreadPoolExecutor(len(_pocs)) fs = [] for f in _pocs: taks = executor.submit(f.poc, target) fs.append(taks) for f in futures.as_completed(fs): try: res = f.result() except Exception as e: res = None logger.error("load poc error:{} error:{}".format( target, str(e))) if res: name = res.get("name") or "scan_" + str(time.time()) collector.add_domain_bug(target, {name: res}) collector.send_ok(target)
def hand_domain(self, serviceType): target = serviceType["target"] logger.info(target) # 添加这条记录 collector.add_domain(target) # 发起请求 try: r = requests.get(target, timeout=30, verify=False, allow_redirects=False) collector.add_domain_info(target, { "headers": r.headers, "body": r.text, "status_code": r.status_code }) except Exception as e: logger.error("request url error:" + str(e)) collector.del_domain(target) return logger.debug("target:{} over,start to scan".format(target)) # Get hostname # ???????????WDNMD hostname = urlparse(target).netloc.split(":")[0] if not is_ip_address_format(hostname): try: # return the host from socket _ip = socket.gethostbyname(hostname) collector.add_domain_info(target, {"ip": _ip}) except: pass else: collector.add_domain_info(target, {"ip": hostname}) # 需要启动那些poc进行目标信息扫描 work_list = [webeye.poc, webtitle.poc, wappalyzer.poc] # password_found.poc if IS_START_PLUGINS: pass work_list.append(crossdomain.poc) # work_list.append(directory_browse.poc) work_list.append(gitleak.poc) work_list.append(iis_parse.poc) work_list.append(phpinfo.poc) work_list.append(svnleak.poc) work_list.append(tomcat_leak.poc) # work_list.append(whatcms.poc) # 信息直接从函数的内部利用collector进行存储 for func in work_list: try: func(target) except Exception as e: logger.error("domain plugin threading error {}:{}".format( repr(Exception), str(e))) pass logger.debug("target:{} End of scan".format(target)) collector.print_domains() infos = collector.get_domain(target) _pocs = [] temp = {} if IS_START_PLUGINS and "CMS" in infos: if infos.get("app"): temp["app"] = [] temp["app"].append(infos["CMS"]) else: temp["app"] = [infos["CMS"]] # update domain app collector.add_domain_info(target, temp) if temp.get("app"): keywords = temp["app"] # 远程读取插件 pocs = load_remote_poc() for poc in pocs: for keyword in keywords: webfile = poc["webfile"] logger.debug("load {0} poc:{1} poc_time:{2}".format( poc["type"], webfile, poc["time"])) # 加载插件 加载远程文件目录 将其转换成实体 code = requests.get(webfile).text obj = load_string_to_moudle(code, webfile) # 在模块对象列表中加入远程模块 _pocs.append(obj) # 并发执行插件 if _pocs: executor = futures.ThreadPoolExecutor(len(_pocs)) fs = [] for f in _pocs: taks = executor.submit(f.poc, target) # 这儿返回的是啥子鸡巴啊 每个线程的控制类? fs.append(taks) for f in futures.as_completed(fs): try: res = f.result() except Exception as e: res = None logger.error("load poc error:{} error:{}".format( target, str(e))) if res: name = res.get("name") or "scan_" + str(time.time()) collector.add_domain_bug(target, {name: res}) # 通过异步调用插件得到返回结果,并且通过collector返送结果 collector.send_ok(target) print("print collector") print(collector.collect_domains)