Exemple #1
0
def SenFileScan(domain, url):
    """
    敏感文件、目录扫描
    字典:dict\SEN_scan.txt
    :param domain:
    :param
    :return:
    """
    pools = 20
    urlList = []
    for i in range(0, redispool.llen("SenScan")):
        suffix = redispool.lindex("SenScan", i)
        senurl = "{}/{}".format(url, suffix)
        urlList.append(senurl)
    pool = ThreadPool(pools)
    SenFileMessage = pool.map(UrlRequest, urlList)
    SenFileMessage2 = ""
    pool.close()
    pool.join()
    url404 = "{}/springbird404page".format(url)
    try:
        rep404 = requests.get(url404,
                              headers=core.GetHeaders(),
                              timeout=3,
                              verify=False).text
    except Exception as e:
        print("超时")
        rep404 = str(e)
        pass
    if len(SenFileMessage) != 0:
        with app.app_context():
            print("Sen file and dir : \n")
            for url in SenFileMessage:
                try:
                    if url is None:
                        continue
                    rep = requests.get(url,
                                       headers=core.GetHeaders(),
                                       timeout=1,
                                       verify=False)
                    #添加404界面的判断,避免过多杂乱信息
                    if not core.is_similar_page(rep404, rep.text, radio=0.85):
                        print(url)
                        bug = BugList(oldurl=domain,
                                      bugurl=url,
                                      bugname="SenDir",
                                      buggrade=redispool.hget(
                                          'bugtype', "SenDir"),
                                      payload=url,
                                      bugdetail=rep.text)
                        SenFileMessage2 += url + "\n"
                        redispool.pfadd(redispool.hget('bugtype', "SenDir"),
                                        url)
                        redispool.pfadd("SenDir", url)
                        db.session.add(bug)
                except Exception as e:
                    # print(e)
                    pass
            db.session.commit()
    return SenFileMessage2
Exemple #2
0
 def __init__(self, url, redispool):
     print("hi!")
     self.domain = url
     self.redispool = redispool
     try:
         if not (url.startswith("http://") or url.startswith("https://")):
             self.url = "http://" + url
         else:
             self.url = url
         self.rep = requests.get(self.url,
                                 headers=core.GetHeaders(),
                                 timeout=5,
                                 verify=False)
     except:
         self.rep = None
         pass
     if self.rep == None:
         try:
             self.url = "https://" + url
             self.rep = requests.get(self.url,
                                     headers=core.GetHeaders(),
                                     timeout=5,
                                     verify=False)
         except:
             pass
Exemple #3
0
def inputfilter(url):
    '''
    入口过滤函数
    输入源的格式可多变:
    127.0.0.1
    http://127.0.0.1
    www.baidu.com
    https://www.baidu.com
    等
    返回格式为 : return www.baidu.com,https://www.baidu.com,baidu.rep
    :param url:
    :return:
    '''
    rep,rep1,rep2=None,None,None
    if url.endswith("/"):
        url=url[:-1]
    if not url.startswith("http://") and not url.startswith("https://"):
        attackurl1="http://"+url
        attackurl2="https://"+url
        try:
            rep1=requests.get(attackurl1, headers=core.GetHeaders(), timeout=4, verify=False)
        except Exception as e:
            pass
        try:
            rep2=requests.get(attackurl2, headers=core.GetHeaders(), timeout=4, verify=False)
        except Exception as e:
            pass
        if rep1:
            return url,attackurl1,rep1
        elif rep2:
            return url,attackurl2,rep2
        else:
            print("None data")
            try:
                count=redispool.hget('targetscan', 'waitcount')
                if 'str' in str(type(count)):
                    waitcount=int(count)-1
                    redispool.hset("targetscan", "waitcount", str(waitcount))
                else:
                    redispool.hset("targetscan", "waitcount", "0")
                redispool.hdel("targetscan", "nowscan")
            except Exception as e:
                print(e)
                pass
            return None,None,None
    else:
        attackurl=url
        try:
            rep=requests.get(attackurl, headers=core.GetHeaders(), timeout=4, verify=False)
        except:
            pass
        if rep:
            if "http://" in url:
                return url.replace("http://",""),attackurl,rep
            else:
                return url.replace("https://",""),attackurl,rep
        else:
            print("{}访问超时".format(attackurl))
            return None,None,None
Exemple #4
0
def UrlRequest(url):
    try:
        r = requests.get(url, headers=core.GetHeaders(), timeout=1.0, verify=False)
        if r.status_code == 200 or r.status_code==403:
            return url
    except Exception:
        pass
Exemple #5
0
def SenFileScan(domain, redispool):
    """
    敏感文件、目录扫描
    字典:dict\SEN_scan.txt
    :param domain:
    :param
    :return:
    """
    pools = 20
    urlList = []
    for i in range(0, redispool.llen("SenScan")):
        url="http://{}/{}".format(domain, redispool.lindex("SenScan", i))
        urlList.append(url)
    pool = ThreadPool(pools)
    SenFileMessage = pool.map(UrlRequest, urlList)
    pool.close()
    pool.join()
    if len(SenFileMessage)!=0:
        with app.app_context():
            for url in SenFileMessage:
                try:
                    rep = requests.get(url, headers=core.GetHeaders(), timeout=3, verify=False)
                    bug = BugList(oldurl=domain, bugurl=url, bugname="SenDir",buggrade=redispool.hget('bugtype', "SenDir"),payload=url, bugdetail=rep.text)
                    db.session.add(bug)
                except Exception as e:
                    print(e)
                    pass
            db.session.commit()
    return "\n".join(list(filter(None, SenFileMessage)))
Exemple #6
0
    def getPageLinks(self,url):
        '''
            获取页面中的所有链接
        '''
        try:
            headers = core.GetHeaders()
            content = requests.get(url, timeout=5, headers=headers, verify=False).text.encode('utf-8')
            links = []
            tags = ['a', 'A', 'link', 'script', 'area', 'iframe', 'form']  # img
            tos = ['href', 'src', 'action']
            if url[-1:] == '/':
                url = url[:-1]
            try:
                for tag in tags:
                    for to in tos:
                        link1 = re.findall(r'<%s.*?%s="(.*?)"' % (tag, to), str(content))
                        link2 = re.findall(r'<%s.*?%s=\'(.*?)\'' % (tag, to), str(content))
                        for i in link1:
                            links.append(i)

                        for i in link2:
                            if i not in links:
                                links.append(i)

            except Exception as e:
                print(e)
                print('[!] Get link error')
                pass
            return links
        except:
            return []
Exemple #7
0
def GetSiteStation(ip):
    """
    旁站查询
    查询网站1:https://www.webscan.cc/search/
    查询网站2:http://stool.chinaz.com
    查询网站3:http://www.114best.com/ip/114.aspx
    :param ip:
    :return:
    """
    data = {'domain': ip}
    url_1 = 'https://www.webscan.cc/search/'
    url_2_base = 'http://stool.chinaz.com'
    url_2 = 'http://stool.chinaz.com/same?s=' + ip + '&page=1'
    text2 = []
    try:
        rep1 = requests.post(url_1,
                             data=data,
                             headers=core.GetHeaders(),
                             timeout=2.0)
        rep1 = etree.HTML(rep1.text)
        text1 = rep1.xpath('//a[@class="domain"]/text()')
    except:
        text1 = []
        pass
    try:
        while 1:
            rep2 = requests.get(url_2, headers=core.GetHeaders(), timeout=2.0)
            rep2 = etree.HTML(rep2.text)
            new_list = rep2.xpath('//div[@class="w30-0 overhid"]/a/text()')
            if len(new_list) == 0:
                break
            text2 += new_list
            next_url = "".join(rep2.xpath('//a[@title="下一页"]/@href'))
            url_2 = url_2_base + next_url
    except:
        text2 = []
        pass
    text = list(set(text1).union(set(text2)))
    for i in text:
        if "屏蔽的关键字" in i:
            text.remove(i)
    return "\n".join(text)
Exemple #8
0
 def getPageLinks_bak(self,url):
     '''
     获取页面中的所有链接
     '''
     try:
         headers = core.GetHeaders()
         time.sleep(0.5)
         pageSource = requests.get(url, timeout=5, headers=headers, verify=False).text.encode('utf-8')
         pageLinks = re.findall(r'(?<=href=\").*?(?=\")|(?<=href=\').*?(?=\')', pageSource)
         # print pageLinks
     except:
         # print ('open url error')
         return []
     return pageLinks
Exemple #9
0
def FindIpAdd(ip):
    """
    查找IP真实地址
    :param ip:
    :return:
    """
    str = ""
    url = "http://ip.yqie.com/ip.aspx?ip=" + ip
    try:
        rep = requests.get(url, headers=core.GetHeaders(), timeout=2)
        rep = etree.HTML(rep.text)
        context = rep.xpath('//input[@id="AddressInfo"]/@value')
        str = "\n".join(context)
    except:
        pass
    return str
Exemple #10
0
def GetSubDomain(domain):
    chinaz_base_url = 'https://tool.chinaz.com/'
    chinaz_url = 'https://tool.chinaz.com/subdomain?domain=' + domain + '&page=1'
    attacklist=[]
    while 1:
        try:
            rep = requests.get(chinaz_url, headers=core.GetHeaders(), timeout=2.0)
            rep = etree.HTML(rep.text)
            data = rep.xpath('//div[@class="w23-0"]/a[@href="javascript:"]/text()')
            attacklist.extend(data)
            next_url = rep.xpath('//a[@title="下一页"]/@href')[0]
            chinaz_url = chinaz_base_url + next_url
        except:
            break
    attacklist[0]="http://"+attacklist[0]
    return "\nhttp://".join(attacklist)
Exemple #11
0
def POCScanConsole(attackurl,url):
    allpoc=POC.query.all()
    with app.app_context():
        for poc in allpoc:
            try:
                rep = requests.get(url+poc.rule, headers=core.GetHeaders(),timeout=2)
                if rep.status_code!=404 and poc.expression in rep.text:
                    bug = BugList(oldurl=attackurl, bugurl=url, bugname=poc.name,buggrade=redispool.hget('bugtype', poc.name), payload=url+poc,
                                  bugdetail=rep.text)
                    redispool.pfadd(redispool.hget('bugtype', poc.name), url)
                    redispool.pfadd(poc.name, url)
                    db.session.add(bug)
            except Exception as e:
                print(e)
                pass
        db.session.commit()
Exemple #12
0
def FindDomainAdd(domain):
    """
    查找域名真实地址
    :param domain:
    :return:
    """
    str=""
    url = "http://ip.yqie.com/ip.aspx?ip=" + domain
    try:
        rep = requests.get(url, headers=core.GetHeaders(),timeout=4)
        rep = etree.HTML(rep.text)
        context = rep.xpath('//div[@style="text-align: center; line-height: 30px;"]/text()')
        str = "\n".join(context)
    except Exception as e:
        print(e)
        pass
    return str.lstrip()
Exemple #13
0
def CScan(ip):
    """
    C段扫描
    状态码为200有title时返回title
    :param ip:
    :return:
    """
    try:
        rep = requests.get("http://" + ip, headers=core.GetHeaders(), timeout=1, verify=False)
        if rep.status_code == 200:
            title = re.findall(r'<title>(.*?)</title>', rep.text)
            if title:
                return "[T]" + ip + ' : ' + title[0] + "\n"
            else:
                return "[H]" + ip + " : have reason\n"
    except Exception as e:
        pass
Exemple #14
0
def GetWhois(domain):
    """
    get_whois函数爬取http://whois.bugscaner.com/网站的英文搜索结果,并以字符串的方式将结果返回
    需要传入一个合法的域名domain
    爬取使用的requests 和 xpath 库
    :param domain:
    :return:
    """
    whois_url = 'http://whois.bugscaner.com/'
    try:
        rep = requests.get(whois_url + domain, headers=core.GetHeaders(), timeout=4.0)
        rep = etree.HTML(rep.text)
        data = rep.xpath('//div[@class="stats_table_91bf7bf"]/b[not(@style)]/text()')[0:19]
        str = "\n".join(data)
    except:
        str = None
        pass
    return str
Exemple #15
0
def GetBindingIP(domain):
    '''
    返回域名的历史解析记录字符串
    :param domain:
    :return:
    '''
    pattern = re.compile('^\d+\.\d+\.\d+\.\d+$')
    ip138_url = 'https://site.ip138.com/' + domain
    try:
        rep = requests.get(ip138_url, headers=core.GetHeaders(), timeout=1.0)
        rep = etree.HTML(rep.text)
        if pattern.findall(domain):
            context = rep.xpath('//ul[@id="list"]/li/a/text()')
        else:
            context = rep.xpath('//div[@id="J_ip_history"]//a/text()')
        str = "\n".join(context)
    except:
        pass
    return str
Exemple #16
0
def GetRecordInfo(domain):
    '''
    返回域名的备案信息
    :param domain:
    :return:
    '''
    icpurl='https://icp.chinaz.com/'+domain
    context=""
    try:
        rep = requests.get(icpurl, headers=core.GetHeaders(),timeout=4)
        rep = etree.HTML(rep.text)
        companyname=rep.xpath('//ul[@id="first"]/li/p/text()')[0]
        type=rep.xpath('//ul[@id="first"]/li/p/strong/text()')[0]
        icpnum=rep.xpath('//ul[@id="first"]/li/p/font/text()')[0]
        wwwname=rep.xpath('//ul[@id="first"]/li/p/text()')[1]
        wwwurl=rep.xpath('//ul[@id="first"]/li/p/text()')[2]
        icpdate=rep.xpath('//ul[@id="first"]/li/p/text()')[3]
        context='''主办单位名称:{}\n主办单位性质:{}\n网站备案许可证号:{}\n网站名称:{}\n网站首页地址:{}\n审核时间:{}\n'''.format(companyname,type,icpnum,wwwname,wwwurl,icpdate)
    except Exception as e:
        pass
    return context
Exemple #17
0
    def AngelSwordMain(self):
        redispool.append("runlog", "正在使用碎遮内置POC进行{}漏洞检测!\n".format(self.url))
        print("正在使用碎遮内置POC进行漏洞检测!")
        try:
            selfpocscan2.AngelSwordMain(self.url)
        except Exception as e:
            print(e)
            pass


if __name__ == '__main__':
    # redispool=redis.ConnectionPool(host='127.0.0.1',port=6379, decode_responses=True)
    # redispool = redis.Redis(connection_pool=ImportToRedis.redisPool)
    try:
        rep = requests.get(url="http://127.0.0.1/",
                           headers=core.GetHeaders(),
                           timeout=10)
        test = GetBaseMessage("127.0.0.1", "http://127.0.0.1", rep)
        print(test.GetDate())
        # test.AngelSwordMain()
        # print(test.GetStatus())
        # print(test.GetTitle())
        # print(test.GetResponseHeader())
        # print(test.GetFinger())
        # print(test.PortScan())
        # print(test.SenDir())
    except Exception as e:
        print(e)
        print(">>>>>>>>>超时", "cyan")
        pass
Exemple #18
0
    def analyze(self, webpage):
        """
        Return a list of applications that can be detected on the web page.
        """
        detected_apps = set()

        for app_name, app in self.apps.items():
            if self._has_app(app, webpage):
                detected_apps.add(app_name)

        detected_apps |= self._get_implied_apps(detected_apps)

        return detected_apps

    def analyze_with_categories(self, webpage):
        detected_apps = self.analyze(webpage)
        categorised_apps = {}

        for app_name in detected_apps:
            cat_names = self.get_categories(app_name)
            categorised_apps[app_name] = {"categories": cat_names}

        return categorised_apps


if __name__ == '__main__':
    url = 'https://www.cnblogs.com/'
    rep = requests.get(url, headers=core.GetHeaders(), timeout=2)
    finger = WebPage(url, rep)
    print(finger.info())
Exemple #19
0
            pass

    def AngelSwordMain(self):
        print("正在使用碎遮内置POC进行漏洞检测!")
        try:
            selfpocscan.AngelSwordMain(self.url)
        except Exception as e:
            print(e)
            pass



if __name__=='__main__':
    # redispool=redis.ConnectionPool(host='127.0.0.1',port=6379, decode_responses=True)
    # redispool = redis.Redis(connection_pool=ImportToRedis.redisPool)
    try:
        rep=requests.get(url="https://www.nowcoder.com",headers=core.GetHeaders(),timeout=10)
        test=GetBaseMessage("www.nowcoder.com","https://www.nowcoder.com",rep)
        # test.AngelSwordMain()
        print(test.GetStatus())
        print(test.GetTitle())
        print(test.GetResponseHeader())
        print(test.GetFinger())
        print(test.PortScan())
        print(test.SenDir())

    except Exception as e:
        print(e)
        pass