def subnetScan(host, hostOnly=False, configFile=None): ''' C段扫描 ''' if not URL.check(host): return None host = URL.getHost(host) confFile = configFile if configFile else os.path.join( conf['ptdpath'], "port_mapping.yaml") portsConf = YamlConf(confFile) httpPorts = [ str(k) for k in portsConf if portsConf[k]['protocol'] == "http" ] httpPorts = ",".join(httpPorts) if not hostOnly: nmapCmd = "nmap -n -PS{ports} -p{ports} {host}/24 -oX -".format( ports=httpPorts, host=host) else: nmapCmd = "nmap -n -PS{ports} -p{ports} {host} -oX -".format( ports=httpPorts, host=host) return nmapScan(nmapCmd)
def __init__(self, url, notFoundPattern=None, cmsEnhance=False): self._url = url.strip() self._notFoundPattern = notFoundPattern self._cmsEnhance = cmsEnhance if not URL.check(self._url): raise PenError("Service Identify, URL format error") self._target = URL.format(self._url) self._fp = YamlConf(os.path.join(sys.path[0],"script","data","app_fingerprint.yaml")) # debug>>>>>>>>>>>>>>>>>>> name = 'Drupal' ddddd = self._fp['Applications'][name] #self._fp['Applications'] = {name:ddddd} # debug>>>>>>>>>>>>>>>>>>>>> # metaInfo 页面元信息 # url, statusCode, headers, html, title, robots self._metaInfo = {} self._initMetaInfo() # result 中存储的信息 self._result = MatchsInfo(self._fp) self._matchFuncs = {} self._initHandleFuncs() self._log = Log("service_identify")
def __init__(self, url="", cookies={}, headers={}, args={}, proxy={}): ''' @params url: 目标url cookie: 自定义cookie,字符串类型 headers: http headers, 字典类型 args: 其他参数, 字典类型 ''' self.url = url.strip() if self.url: formated = URL.format(self.url) self.protocol = formated.protocol self.uri = formated.uri self.host = formated.host self.path = formated.path self.baseURL = formated.baseURL self.params = formated.params else: self.uri, self.host, self.path, self.baseURL, self.params = "", "", "", "", "" self.args = args self.http = requests.Session() if cookies: for key, value in cookies.iteritems(): self.http.cookies.set(key, value) if headers: self.http.headers.update(headers) if proxy: self.http.proxies = proxy self.register()
def __init__(self, url="", cookie="", headers={}, args={}): ''' 参数: url: 目标url cookie: 自定义cookie,字符串类型 headers: http headers, 字典类型 args: 其他参数, 字典类型 ''' self.url = url.strip() if self.url: formated = URL.format(self.url) self.protocol = formated.protocol self.uri = formated.uri self.host = formated.host self.path = formated.path self.baseURL = formated.baseURL self.params = formated.params else: self.uri,self.host,self.path,self.baseURL,self.params = "","","","","" self.args = args cookie = cookie.strip() self.http = requests.Session() if cookie: for item in _strCookie2Dict(cookie): self.http.cookies.set(item['name'],item['value']) self.http.headers.update(headers) self.register()
def __init__(self, url="", cookies={}, headers={}, args={}, proxy={}): ''' @params url: 目标url cookie: 自定义cookie,字符串类型 headers: http headers, 字典类型 args: 其他参数, 字典类型 ''' self.url = url.strip() if self.url: formated = URL.format(self.url) self.protocol = formated.protocol self.uri = formated.uri self.host = formated.host self.path = formated.path self.baseURL = formated.baseURL self.params = formated.params else: self.uri,self.host,self.path,self.baseURL,self.params = "","","","","" self.args = args self.http = requests.Session() if cookies: for key,value in cookies.iteritems(): self.http.cookies.set(key,value) if headers: self.http.headers.update(headers) if proxy: self.http.proxies = proxy self.register()
def __init__(self, baseURL, notFoundPattern=None): baseURL = URL.getURI(baseURL) self.baseURL = baseURL.rstrip("/") self.notFoundPattern = notFoundPattern self.fp = YamlConf(self._fingerprintFile) self.log = Log("cmsidentify")
def subnetScan(host, hostOnly=False, configFile=None): ''' C段扫描 ''' if not URL.check(host): return None host = URL.getHost(host) confFile = configFile if configFile else os.path.join(sys.path[0],"script","data","port_mapping.yaml") conf = YamlConf(confFile) httpPorts = [str(k) for k in conf if conf[k]['protocol']=="http"] httpPorts = ",".join(httpPorts) if not hostOnly: nmapCmd = "nmap -n -PS{ports} -p{ports} {host}/24 -oX -".format(ports=httpPorts, host=host) else: nmapCmd = "nmap -n -PS{ports} -p{ports} {host} -oX -".format(ports=httpPorts, host=host) return nmapScan(nmapCmd)
def __init__(self, baseURL, notFoundPattern=None): ''' @params: baseURL: 待识别的站点的URL notFoundPattern: 指定notFoundPattern,有时候website只返回301或200,这时候需要该字段来识别‘404’ ''' baseURL = URL.getURI(baseURL) self.baseURL = baseURL.rstrip("/") self.notFoundPattern = notFoundPattern self.fp = YamlConf(self._fingerprintFile) self.log = Log("cmsidentify")
def __init__(self, url, notFoundPattern=None, cmsEnhance=False): self._url = url.strip() self._notFoundPattern = notFoundPattern self._cmsEnhance = cmsEnhance if not URL.check(self._url): raise PenError("Service Identify, URL format error") self._target = URL.format(self._url) self._fp = YamlConf( os.path.join(conf['ptdpath'], "app_fingerprint.yaml")) # metaInfo 页面元信息 # url, statusCode, headers, html, title, robots self._metaInfo = {} self._initMetaInfo() # result 中存储的信息 self._result = MatchsInfo(self._fp) self._matchFuncs = {} self._initHandleFuncs() self._log = Log("service_identify")
def bruteforce(self, baseURL, notFoundPattern=None, safeURL=None, timeout=10, delay=0): ''' 爆破 ''' baseURL = URL.getURI(baseURL) keyword = self._getKeywordFromURL(baseURL) if keyword: self.keywords.append(keyword) matchs = [] baseURL = baseURL.rstrip("/") for line in self._dictIter(): time.sleep(delay) self._safeRequest(safeURL) url = baseURL.rstrip("/") + line try: self.log.debug(u"request url '{0}'".format(url)) #response = http.get(url, timeout=timeout) response = http.get(url, timeout=timeout, allow_redirects=False) except http.ConnectionError: continue if response.status_code == 200: if notFoundPattern: if notFoundPattern in response.content: continue #if response.history: # if notFoundPattern in response.history[0].content: # continue else: self.log.debug(u"find available url '{0}'".format(url)) matchs.append(url) else: continue return matchs
def _getKeywordFromURL(self, url): ''' 从URL中提取关键字,例如xxx.com 提取 xxx,该关键字将用于生成web备份文件字典 ''' host = urlparse.urlparse(url)[1] if not host: return None if URL.isIP(url): return None hostsp = host.split(".") try: if host.startswith("www."): keyword = hostsp[1] else: keyword = hostsp[0] except IndexError: return None return keyword
def __init__(self, url="", cookie="", headers={}, elseArgs={}): self.url = url.strip() if self.url: formated = URL.format(self.url) self.uri = formated.uri self.host = formated.host self.path = formated.path self.baseURL = formated.baseURL self.params = formated.params else: self.uri, self.host, self.path, self.baseURL, self.params = "", "", "", "", "" self.elseArgs = elseArgs cookie = cookie.strip() self.http = requests.Session() if cookie: for item in _strCookie2Dict(cookie): self.http.cookies.set(item['name'], item['value']) self.http.headers.update(headers) self.register()
def doGoogleHacking(args, out): ''' Google Hacking功能 ''' out.init(u"Google Hacking功能", args.output) keywords = args.keywords.decode(sys.stdin.encoding) engineName = args.engine.lower().strip() if args.engine else "baidu" size = args.size if args.size else 20 if engineName == "baidu": engine = Baidu() elif engineName == "bing": engine = Bing() elif engineName == "google": engine = Google() else: out.error(u"不支持 '{0}' 搜索引擎,必须为 baidu/bing/google 之一".format(engineName)) return False hostSet = set() out.warnning(u"'{0}' 在 '{1}' 中的搜索结果如下:\n".format(keywords, engineName)) for item in engine.search(keywords,size): if not args.unique: out.info(out.Y("{0:>6} : ".format("title")) + item.title) out.info(out.Y("{0:>6} : ".format("url")) + item.url + "\n") out.writeLine(item.url) else: host = URL.getHost(item.url) if host: if host not in hostSet: hostSet.add(host) out.info(out.Y("{0:>6} : ".format("title")) + item.title) out.info(out.Y("{0:>6} : ".format("url")) + item.url + "\n") out.writeLine(item.url) else: continue