def __init__(self): LogicalHttp.__init__(self, CVE20209054.vuln) self.ca = WebAlyzer(self._name) self.products = [ 'NAS326', 'NAS520', 'NAS540', 'NAS542', 'ATP100', 'ATP200', 'ATP500', 'ATP800', 'ZyWALL110', 'ZyWALL310', 'ZyWALL1100', 'NSA210', 'NSA-220', 'NSA221', 'NSA310', 'NSA310S', 'NSA320', 'NSA320S', 'NSA325', 'NSA325v2', 'USG20-VPN', 'USG20W-VPN', 'VPN50', 'VPN100', 'VPN300', 'VPN1000', 'USG40', 'USG40W', 'USG60', 'USG60W', 'USG110', 'USG210', 'USG310', 'USG1100', 'USG1900', 'USG2200' ]
def __init__(self): LogicalHttp.__init__(self, WordPress.vuln) self.ca = WebAlyzer(self._name)
class WordPress(LogicalHttp): vuln = 'WordpressFileMangerToolOk' def __init__(self): LogicalHttp.__init__(self, WordPress.vuln) self.ca = WebAlyzer(self._name) def run_logic_grabber(self, host: str, portinfo: PortInfo, **kwargs): try: outlog = kwargs.get('outlog') log = f"开始扫描漏洞: {WordPress.vuln}" self._logger.debug(log) outlog(log) urls = [ f"http://{host}/wordpress/wp-content/plugins/wp-file-manager/lib/php/connector.minimal.php", f"http://{host}/wp-content/plugins/wp-file-manager/lib/php/connector.minimal.php" ] if portinfo.ssl_flag: urls = [ f"https://{host}/wordpress/wp-content/plugins/wp-file-manager/lib/php/connector.minimal.php", f"https://{host}/wp-content/plugins/wp-file-manager/lib/php/connector.minimal.php" ] for url in urls: self._logger.debug( f"Start WordpressFileMangerToolOk url:{url}") try: p: ProxyItem = ProxyMngr.get_one_crosswall() proxydict = None if isinstance(p, ProxyItem): proxydict = p.proxy_dict self._logger.debug( f"proxy ip: {p._ip}, port: {p._port}") headers = f""" Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9 Accept-Encoding: gzip, deflate Accept-Language: zh-CN,zh;q=0.9,en;q=0.8 Cache-Control: no-cache Host: {host} Pragma: no-cache Proxy-Connection: keep-alive Upgrade-Insecure-Requests: 1 User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.83 Safari/537.36 """ resp: Response = self._ha.get_response(url, headers=headers, verify=False, timeout=10) if resp is None or resp.status_code != 200: self._logger.debug( f"Cannt connect {url},resp:{resp.status_code if resp is not None else None}" ) continue self._logger.debug( f"Succeed get WordpressFileMangerToolOk, url:{url}") siteinfo: SiteInfo = SiteInfo(url) respheard = "" for k, v in resp.headers.items(): respheard += f"{k}:{v}\n" siteinfo.set_httpdata(None, None, respheard, resp.text) wapres = 0 try: self._logger.info( f"Start WordpressFileMangerToolOk wappalyzer: {url}" ) for com in self.ca.get_alyzer_res(level=1, url=url): wapres += 1 siteinfo.set_components(com) except: self._logger.error( f"Get WordpressFileMangerToolOk components error") portinfo.set_siteinfo(siteinfo) self._logger.info( f"Stop WordpressFileMangerToolOk wappalyzer: {url} rescount:{wapres}" ) log = f"{WordPress.vuln} 漏洞扫描完成" outlog(log) except Exception as ex: self._logger.debug(f"Request {url} error") continue except Exception as ex: self._logger.error(f"Wordpress error")
def __init__(self) -> None: LogicalHttp.__init__(self, JoomlaCareers.vuln) self.ca = WebAlyzer(self._name)
class JoomlaCareers(LogicalHttp): """ grabber for looking for the special url of a website. /host/careers """ vuln = 'joomla_3_4_6' def __init__(self) -> None: LogicalHttp.__init__(self, JoomlaCareers.vuln) self.ca = WebAlyzer(self._name) def run_logic_grabber(self, host: str, portinfo: PortInfo, **kwargs): """ 2020/09/24 wsy:这个漏洞的目录应该是/index.php/component/users/ 目前应该是搜集的指定目录的 """ try: outlog = kwargs.get('outlog') log = f"开始扫描漏洞: {JoomlaCareers.vuln}" outlog(log) failnum = 0 url = None resp: Response = None got = False iserr = False while True: try: p: ProxyItem = ProxyMngr.get_one_crosswall() proxydict = None if isinstance(p, ProxyItem): proxydict = p.proxy_dict self._logger.debug( f"proxy ip: {p._ip}, port: {p._port}") url = f"http://{host}/index.php/component/users" if portinfo.ssl_flag: url = f"https://{host}/index.php/component/users" self._logger.debug(f"Start joomlacareers url:{url}") try: resp: Response = self._ha.get_response( url, timeout=10, headers=""" accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9 accept-encoding: gzip, deflate accept-language: en-US,en;q=0.9 cache-control: no-cache pragma: no-cache sec-fetch-dest: document sec-fetch-mode: navigate sec-fetch-site: none sec-fetch-user: ?1 upgrade-insecure-requests: 1 user-agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36""", verify=False, ) except: iserr = True if iserr or resp is None or resp.status_code != 200: url = f"http://{host}/index.php/component/users" if not portinfo.ssl_flag: url = f"https://{host}/index.php/component/users" else: got = True break p: ProxyItem = ProxyMngr.get_one_crosswall() proxydict = None if isinstance(p, ProxyItem): proxydict = p.proxy_dict self._logger.debug( f"proxy ip: {p._ip}, port: {p._port}") iserr = False try: resp: Response = self._ha.get_response( url, timeout=10, headers=""" accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9 accept-encoding: gzip, deflate accept-language: en-US,en;q=0.9 cache-control: no-cache pragma: no-cache sec-fetch-dest: document sec-fetch-mode: navigate sec-fetch-site: none sec-fetch-user: ?1 upgrade-insecure-requests: 1 user-agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36""", verify=False, ) except: iserr = True if iserr or resp is None or resp.status_code != 200: failnum += 1 continue got = True break except Exception as ex: self._logger.trace(f"Get {url} error") failnum += 1 finally: if not got or failnum >= 1: break if iserr or resp is None or resp.status_code != 200: return self._logger.debug(f"Succeed get {JoomlaCareers.vuln}, url:{url}") siteinfo: SiteInfo = SiteInfo(url) respheader = "" for k, v in resp.headers.items(): respheader += f"{k}:{v}\n" siteinfo.set_httpdata(None, None, respheader, resp.text) # 将组件信息加入到site里面 wapres = 0 try: self._logger.info(f"Start joomlacareers wappalyzer: {url}") for com in self.ca.get_alyzer_res(level=1, url=url): wapres += 1 siteinfo.set_components(com) except: self._logger.error(f"Get joomlacareers components error") portinfo.set_siteinfo(siteinfo) self._logger.info( f"Stop joomlacareers wappalyzer: {url} rescount:{wapres}") log = f"{JoomlaCareers.vuln} 漏洞扫描完成" outlog(log) except Exception as ex: self._logger.error(f"Joomla careers error")
def __init__(self): LogicalHttp.__init__(self, WeblogicRCE.vuln) self.ca = WebAlyzer(self._name)
def __init__(self): Zgrab2ParserBase.__init__(self) self.ca = WebAlyzer('isouttask')
class Zgrab2ParserHttp(Zgrab2ParserBase): """zgrab2 parser""" # _logger: MsLogger = MsLogManager.get_logger("Zgrab2ParserHttp") _re_title = re.compile(r"<title>(.*?)</title>", re.S | re.M) # <meta content="17173,17173.com,17173游戏网,网络游戏" name="Keywords" /> _re_meta = re.compile(r'<meta[^>]+?name="(keywords|description)"[^>]+?/>', re.S | re.M | re.IGNORECASE) def __init__(self): Zgrab2ParserBase.__init__(self) self.ca = WebAlyzer('isouttask') # self._name = type(self).__name__ def _parse_http(self, sj, portinfo: PortInfo) -> SiteInfo: """parse one json block and return a PortInfo, if failed retrn None""" res: SiteInfo = None try: if not sj.__contains__("data") or not sj["data"].__contains__( "http"): return sjhttp = sj["data"]["http"] succ = sjhttp["status"] if succ != "success": return # ??? what ? protocol = sjhttp["protocol"] if protocol != "http": return if portinfo.service != protocol: portinfo.service = protocol if portinfo.ssl_flag: portinfo.service = 'https' host: str = None if sj.__contains__("ip"): host = sj["ip"] elif sj.__contains__("domain"): host = sj["domain"] else: return self._get_port_timestamp(sjhttp, portinfo) res = SiteInfo(host) # append ips from portinfo to current siteinfo # 将组件信息加入到site里面 wapres = 0 try: self._logger.info(f"Start scout zgrab2http wappalyzer: {host}") for com in self.ca.get_alyzer_res(level=1, url=host): wapres += 1 res.set_components(com) except: self._logger.error( f"Get scout zgrab2http components error,err:{traceback.format_exc()}" ) self._logger.info( f"Stop scout zgrab2http wappalyzer: {host} rescount:{wapres}") # append ips from portinfo to current siteinfo # should here use dnspy? res.set_ips(*[ip for ip in portinfo.ips]) sjresult = sjhttp["result"] # location sjresp = sjresult["response"] if (sjresp.__contains__("request") and sjresp["request"].__contains__("url") and sjresp["request"]["url"].__contains__("path")): res.location = sjresp["request"]["url"]["path"] # redirects if sjhttp.__contains__("redirect_response_chain"): sjredirs = sjhttp["redirect_response_chain"] for sjredir in sjredirs: scheme = sjredir["request"]["url"]["scheme"] host = sjredir["request"]["url"]["host"] path = sjredir["request"]["url"]["path"] redir = "{}://{}{}".format(scheme, host, path) res.set_redirect(redir) # httpdata request headers/joint reqheaders: str = "" protocolline = sjresp["protocol"]["name"] sjreq = sjresp["request"] method = sjreq["method"] reqheaders += "{} {}\n".format(method, protocolline) if sjreq.__contains__("host"): reqheaders += "host: {}\n".format(sjreq["host"]) for k, values in sjreq["headers"].items(): if k != "unknown": for v in values: reqheaders += "{}: {}\n".format(k, v) else: for val in values: k = val["key"] for v in val: reqheaders += "{}: {}\n".format(k, v) res.set_httpdata(reqheader=reqheaders) # httpdata request body/joint # http request body is None, due to "Method" is "GET" # httpdata response headers/joint respheaders: str = "" statusline = sjresp["status_line"] respheaders += "{} {}\n".format(protocolline, statusline) for k, values in sjresp["headers"].items(): if k != "unknown": for v in values: respheaders += "{}: {}\n".format(k, v) else: for val in values: k = val["key"] for v in val: respheaders += "{}: {}\n".format(k, v) if sjresp.__contains__("transfer_encoding"): for v in sjresp["transfer_encoding"]: respheaders += "transfer-encoding: {}\n".format(v) if sjresp.__contains__( "content_length") and sjresp["content_length"] != -1: respheaders += "content-length: {}\n".format( sjresp["content_length"]) res.set_httpdata(respheader=respheaders) # httpdata response body respbody: str = None if sjresp.__contains__("body"): respbody = sjresp["body"] res.set_httpdata(respbody=respbody) # portinfo.banner/ reqheader+reqbody+respheader+respbody portinfo.banner += "{}\n\n{}\n\n{}\n\n{}".format( reqheaders.rstrip(), "", respheaders.rstrip(), respbody.rstrip()) # title title: str = None if not respbody is None and respbody != "": m = Zgrab2ParserHttp._re_title.search(respbody) if not m is None: title = m.group(1) res.title = title # meta/this should be joint to json str meta: dict = {} if not respbody is None and respbody != "": # <meta content="17173,17173.com,17173游戏网,网络游戏" name="Keywords" /> for m in Zgrab2ParserHttp._re_meta.finditer(respbody): if not m is None: k = m.group(1).lower() succ, v = helper_str.substringif( respbody[m.start():m.end()], 'content="', '"') if succ: meta[k] = v if len(meta) > 0: meta = json.dumps(meta) res.meta = meta # favicon/this requires extra http request # find out the url for favicon.ico # <link type="image/x-icon" rel="icon" href="//ue.17173cdn.com/images/lib/v1/favicon-hd.ico" /> # <link type="image/x-icon" rel="shortcut icon" href="//ue.17173cdn.com/images/lib/v1/favicon.ico" /> # web technologies recognize if not res is None: portinfo.set_siteinfo(res) except Exception: self._logger.error("Parse http json line error:{}".format( traceback.format_exc()))
class CVE20209054(LogicalHttp): vuln = 'zyxel_command_injection' def __init__(self): LogicalHttp.__init__(self, CVE20209054.vuln) self.ca = WebAlyzer(self._name) self.products = [ 'NAS326', 'NAS520', 'NAS540', 'NAS542', 'ATP100', 'ATP200', 'ATP500', 'ATP800', 'ZyWALL110', 'ZyWALL310', 'ZyWALL1100', 'NSA210', 'NSA-220', 'NSA221', 'NSA310', 'NSA310S', 'NSA320', 'NSA320S', 'NSA325', 'NSA325v2', 'USG20-VPN', 'USG20W-VPN', 'VPN50', 'VPN100', 'VPN300', 'VPN1000', 'USG40', 'USG40W', 'USG60', 'USG60W', 'USG110', 'USG210', 'USG310', 'USG1100', 'USG1900', 'USG2200' ] def make_siteinfo(self, url, response, portinfo): """ 可能会生成多个site所以就把方法独立出来了 """ self._logger.debug(f"Succeed get {self.vuln}, url:{url}") siteinfo: SiteInfo = SiteInfo(url) respheard = "" for k, v in response.headers.items(): respheard += f"{k}:{v}\n" siteinfo.set_httpdata(None, None, respheard, response.text) wapres = 0 try: self._logger.info(f"Start {self.vuln} wappalyzer: {url}") for com in self.ca.get_alyzer_res(level=1, url=url): wapres += 1 siteinfo.set_components(com) except: self._logger.error(f"Get {self.vuln} components error") portinfo.set_siteinfo(siteinfo) self._logger.info( f"Stop {self.vuln} wappalyzer: {url} rescount:{wapres}") def run_logic_grabber(self, host: str, portinfo: PortInfo, **kwargs): outlog = kwargs.get('outlog') log = f"开始扫描漏洞: {CVE20209054.vuln}" self._logger.debug(log) outlog(log) sa = requests.Session() headers = { 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9', 'Accept-Encoding': 'gzip, deflate', 'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8', 'Cache-Control': 'no-cache', 'Host': f'{host}', 'Pragma': 'no-cache', 'Proxy-Connection': 'keep-alive', 'Upgrade-Insecure-Requests': '1', 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.83 Safari/537.36' } sa.headers.update(headers) http_str = 'http' if portinfo.ssl_flag: http_str = 'https' # 最开始的url url = f"{http_str}://{host}" self._logger.debug(f"Start {self.vuln} url:{url}") try: # 这个页面有时候会自动跳转,需要允许自动跳转 response = sa.get(url, verify=False, timeout=10) if response is None or response.status_code != 200: self._logger.debug( f"Cannt connect {url},resp:{response.status_code if response is not None else None}" ) return self._logger.debug(f"Succeed get {self.vuln}, url:{url}") if response.text.find('/zyxel/loginwrap.html') == -1: self._logger.debug("Not found js page : /zyxel/loginwrap.html") return url = url + "/zyxel/loginwrap.html" self._logger.debug(f"Start {self.vuln} url:{url}") response = sa.get(url, verify=False, timeout=10) if response is None or response.status_code != 200 or response.text is None: self._logger.debug( f"Cannt connect {url},resp:{response.status_code if response is not None else None}" ) return find = False match = re.search('<title>(.*)</title>', response.text, re.I | re.M) if match: title = match.group(1).strip() for product in self.products: if title.lower().endswith(product.lower()): find = True break # 如果没有找到就需要去走这一步 if find: self.make_siteinfo(url, response, portinfo) # 走到上面就可以了,但是为了拿版本号可以继续走 if response.text.find('utility/flag.js') != -1: try: resurl = response.url[:response.url.rfind('/')] url = resurl + '/utility/flag.js' self._logger.debug(f"Start {self.vuln} url:{url}") response = sa.get(url, verify=False, timeout=10) if response is None or response.status_code != 200: self._logger.debug( f"Cannt connect {url} resp:{response.status_code if response is not None else None}" ) return self.make_siteinfo(url, response, portinfo) except: self._logger.error(f"Cant connect {url}") log = f"{CVE20209054.vuln} 漏洞扫描完成" outlog(log) except Exception: self._logger.error( f"{self.vuln} scan error\nerr:{traceback.format_exc()}")
def __init__(self): LogicalHttp.__init__(self, LotusSmtp.vuln) self.ca = WebAlyzer(self._name)
class LotusSmtp(LogicalHttp): vuln = 'ibm_lotus_domino_password_hash_extraction' def __init__(self): LogicalHttp.__init__(self, LotusSmtp.vuln) self.ca = WebAlyzer(self._name) def run_logic_grabber(self, host: str, portinfo: PortInfo, **kwargs): try: outlog = kwargs.get('outlog') log = f"开始扫描漏洞: {LotusSmtp.vuln}" self._logger.debug(log) outlog(log) failnum = 0 url = None resp: Response = None got = False while True: url = f"http://{host}/names.nsf" if portinfo.ssl_flag: url = f"https://{host}/names.nsf" self._logger.debug(f"Start lotussmtp url:{url}") try: p: ProxyItem = ProxyMngr.get_one_crosswall() proxydict = None if isinstance(p, ProxyItem): proxydict = p.proxy_dict self._logger.debug(f"proxy ip: {p._ip}, port: {p._port}") headers = f""" Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9 Accept-Encoding: gzip, deflate Accept-Language: zh-CN,zh;q=0.9,en;q=0.8 Cache-Control: no-cache Host: {host} Pragma: no-cache Proxy-Connection: keep-alive Upgrade-Insecure-Requests: 1 User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.83 Safari/537.36 """ resp: Response = self._ha.get_response( url, headers=headers, verify=False, timeout=10, allow_redirects=False ) if resp is None or resp.status_code != 200: self._logger.debug(f"Cannt connect {url},resp:{resp.status_code if resp is not None else None}") failnum += 1 continue got = True break except Exception as ex: self._logger.debug(f"Request {url} error") failnum += 1 finally: if not got and failnum >= 1: break if resp is None or resp.status_code != 200: self._logger.debug( f"Connect {url} fail three times, get nothing, resp:{resp.status_code if resp is not None else None}") return self._logger.debug(f"Succeed get lotussmtp, url:{url}") siteinfo: SiteInfo = SiteInfo(url) respheard = "" for k, v in resp.headers.items(): respheard += f"{k}:{v}\n" siteinfo.set_httpdata(None, None, respheard, resp.text) wapres = 0 try: self._logger.info(f"Start lotussmtp wappalyzer: {url}") for com in self.ca.get_alyzer_res(level=1, url=url): wapres += 1 siteinfo.set_components(com) except: self._logger.error( f"Get lotussmtp components error" ) portinfo.set_siteinfo(siteinfo) self._logger.info(f"Stop lotussmtp wappalyzer: {url} rescount:{wapres}") log = f"{LotusSmtp.vuln} 漏洞扫描完成" outlog(log) except Exception as ex: self._logger.error(f"Lotus smtp error")