Exemple #1
0
    def searchOtherSiteInIp(self, ip):
        try:
            getSiteByIpUrl = 'http://test.com/dns?ip=%s' % (ip)
            res, content = self.http.request(url)

            if content == '':
                return False
            content = json.read(content)
            if isinstance(content, list) == False:
                return False

            for row in content:
                siteObj = {
                    'scheme': 'http',
                    'domain': '',
                    'path': '/',
                    'ip': ip,
                    'title': '',
                    'policy': 1,
                    'include_url': '',
                    'exclude_url': '',
                    'cookie': '',
                    'sub_domain_scan': 0,
                    'ip_domain_scan': 0
                }

                if row.has_key('domain') == False:
                    continue
                siteObj['domain'] = row['domain']

                if row.has_key('scheme'):
                    siteObj['scheme'] = row['scheme']

                if row.has_key('path'):
                    siteObj['path'] = row['path']

                if row.has_key('title'):
                    siteObj['title'] = row['title']

                self.updateTaskSites(siteObj)

            return True
        except Exception, e:
            logger.exception(e)
Exemple #2
0
    def scanSiteMain(self, siteId):
        try:
            logger.debug("start to scan site, siteId: %s" % (siteId))
            if siteId == None:
                return False

            dao = MysqlDao()
            siteObj = dao.getSiteData(siteId)
            if siteObj == None:
                logger.error("start to get site config exception, siteId: %s" %
                             (siteId))
                return False

            #scheme
            scheme = siteObj['scheme'].encode('utf8')
            #ip address
            ip = siteObj['ip'].encode('utf8')
            #site domain
            domain = siteObj['domain'].encode('utf8')
            #site scan state
            state = siteObj['state']
            #site path
            path = siteObj['path'].encode('utf8')
            #site title
            title = siteObj['title'].encode('utf8')
            #site type
            siteType = siteObj['site_type'].encode('utf8')
            #site cookie
            cookie = siteObj['cookie'].encode('utf8')
            #site include url
            includeUrl = siteObj['include_url'].encode('utf8')
            if includeUrl == '':
                includeUrl = []
            else:
                includeUrl = json.read(includeUrl)
            #site exclude url
            excludeUrl = siteObj['exclude_url'].encode('utf8')
            if excludeUrl == '':
                excludeUrl = []
            else:
                excludeUrl = json.read(excludeUrl)
            #scan progress
            progress = siteObj['progress'].encode('utf8')
            #site scan policy
            policy = siteObj['policy']

            if state == 1:
                self.finishSiteScan(siteId, ip)
                return True

            #在DNS配置文件中加入这个域名的DNS信息
            # self.threadLock.acquire()
            # self.updateHosts(ip, domain, self.taskId, siteId, 'add')
            # self.threadLock.release()
            '''
            #  注释此段,在后文(代码第700行附近)重写网站存活性检测,提高稳健性,并将结果写入报告  20170804
            flag = res = content = checkOk = None
            target = []
            target.append("%s://%s%s"%(scheme,domain,path))
            # -------------UPDATE BY MCJ 扫到site即可开始扫描,无需再检测网站状态
            checkOk = True
            # for url in target:
            #     flag, res, content = self.PreSiteScan(url)
            #     if not flag:
            #         continue
            #     else:
            #         if self.checkSiteWorkMode(res, title) == False:
            #             continue
            #         else:
            #             checkOk = 1
            #             break
            # ----------
            if not checkOk:
                self.updateSiteException("网站无法访问", siteId, ip)
                return
            else:
                siteCode = self.getSiteCode(content)
                if title == "" and res and res.has_key('status') and res['status'] == '200':
                    title = self.updateSiteTitle(content, siteId)
                if siteType == "":
                    siteType = self.updateSiteType(res, siteId)
                if siteCode == "":
                    siteCode = self.getSiteCode(content)
            '''
            if self.taskCnf['web_scan_timeout']:
                socket.setdefaulttimeout(self.taskCnf['web_scan_timeout'])

            siteDb = {'state': 0, 'exception': ''}
            if siteObj['start_time'] is None or siteObj[
                    'start_time'] == '0000-00-00 00:00:00':
                siteDb['start_time'] = time.strftime("%Y-%m-%d %X",
                                                     time.localtime())
            if siteObj['progress'] == '':
                siteDb['progress'] = '0'
            self.dao.updateData('sites', siteDb, {'id': siteId})

            ###############################
            #policy:
            #    1:快速扫描,只扫描指定的域名
            #    2:完全扫描,扫描指定的域名,并且扫描二级域名
            #    3:扫描指定目录及子目录
            #    4:扫描指定的URL,这个情况下,不需要爬虫
            #    5:通过域名反查得到的域名
            #    6:登陆型扫描
            ###############################
            ## 禁用spider by mcj
            # if self.taskCnf['spider_enable'] == 1 and siteObj['spider_state'] == 0:
            #     logger.debug('spider is start')
            #
            #     progress = '0'
            #
            #     self.dao.deleteData('web_result', {'site_id':siteId})
            #     self.dao.deleteData('web_result_data', {'site_id':siteId})
            #     self.dao.deleteData('spider_url', {'site_id':siteId})
            #
            #     #开启爬虫,当扫描指定的URL时,不需要爬虫
            #     if siteObj['policy'] != 4:
            #         spiderCnf = {}
            #         spiderCnf['taskId'] = self.taskId
            #         spiderCnf['assetTaskId'] = self.assetTaskId
            #         spiderCnf['siteId'] = siteId
            #         spiderCnf['spiderUrlCount'] = self.taskCnf['spider_url_count']
            #         spiderCnf['webScanTime'] = self.taskCnf['web_scan_timeout']
            #         spiderCnf['policy'] = siteObj['policy']
            #         spiderCnf['scheme'] = siteObj['scheme'].encode('utf8')
            #         spiderCnf['domain'] = domain
            #         spiderCnf['path'] = path
            #         spiderCnf['maxTimeCount'] = 30
            #         spiderCnf['webScanTimeout'] = self.taskCnf['web_scan_timeout']
            #         spiderCnf['endTime'] = time.time() + 1800
            #         spiderCnf['maxnum'] = self.taskCnf['spider_url_count']
            #         spiderCnf['title'] = title
            #         spiderCnf['ip'] = ip
            #         spiderCnf['cookie'] = cookie
            #         spiderCnf['webSearchSiteState'] = self.taskCnf['web_search_site_state']
            #         spiderCnf['webSearchSiteTimeout'] = self.taskCnf['web_search_site_timeout']
            #         spiderCnf['includeUrl'] = includeUrl
            #         spiderCnf['excludeUrl'] = excludeUrl
            #         spiderCnf['downloadDir'] = SCANER_SPIDER_DOWNLOAD_DIR
            #
            #         if self.taskCnf['spider_type'] == 2:
            #             import Spider2 as Spider
            #         else:
            #             import Spider
            #
            #         logger.debug("spiderCnf start")
            #         logger.debug(spiderCnf)
            #         logger.debug("spiderCnf end")
            #         spider = Spider.Spider(spiderCnf)
            #         spider.start()
            #
            #     logger.debug('spider is end')

            self.dao.updateData('sites', {'spider_state': 1}, {'id': siteId})

            siteCnf = dao.getSiteData(siteId)
            domain = siteCnf['domain'].encode('utf8')
            path = siteCnf['path'].encode('utf8')

            #检测网站的状态,有的网站访问后直接访问500或者其他的情况。
            if self.checkSiteWorkMode({}, title) == False:
                self.finishSiteScan(siteId, ip)
                return

            logger.debug('get site scan config')

            scanCnf = {}
            scanCnf['taskId'] = self.taskId
            scanCnf['assetTaskId'] = self.assetTaskId
            scanCnf['siteId'] = siteId
            scanCnf['maxThread'] = 10
            scanCnf['scriptThread'] = 10
            scanCnf['webTimeout'] = self.taskCnf['web_scan_timeout']
            scanCnf['ip'] = ip
            # 新增源站ip参数,add by mcj
            target = json.read(str(self.taskCnf['target']))
            source_ip = target[0].get('source_ip')
            if source_ip:
                scanCnf['source_ip'] = source_ip
            scanCnf['scheme'] = scheme
            scanCnf['domain'] = domain
            scanCnf['path'] = path
            scanCnf['errorCount'] = 0
            scanCnf['errorLenDict'] = {}
            scanCnf['maxTimeoutCount'] = 20
            scanCnf['cookie'] = cookie
            scanCnf['len404'] = []
            scanCnf['isForce'] = 0
            scanCnf['excludeUrl'] = excludeUrl
            scanCnf['threadLock'] = threading.Lock()
            scanCnf['isstart'] = '1'

            # ----------- 判断网站存活性, 如存活,获取cookie等内容 by lichao
            if source_ip:
                test_url = "%s://%s%s" % (scheme, source_ip, path)
            else:
                test_url = "%s://%s%s" % (scheme, domain, path)
            test_header = {'Host': domain}
            checkOk, siteCode = False, None
            for i in range(3):
                try:
                    http = HttpRequest({
                        'domain': domain,
                        'timeout': 15,
                        'follow_redirects': True,
                        'cookie': cookie
                    })
                    res, content = http.request(test_url,
                                                'GET',
                                                headers=test_header)
                    if self.checkSiteWorkMode(res, title):
                        siteCode = self.getSiteCode(content)
                        if not title:
                            title = self.updateSiteTitle(content, siteId)
                        if not siteType:
                            siteType = self.updateSiteType(res, siteId)
                        if not cookie:
                            cookie = res.get('set-cookie')
                        checkOk = True
                        break
                    else:
                        sleep(5)
                except:
                    sleep(5)

            if cookie:
                scanCnf['cookie'] = cookie
            if title:
                scanCnf['title'] = title
            if siteType:
                scanCnf['siteType'] = siteType
            if siteCode:
                scanCnf['siteCode'] = siteCode

            if not checkOk:
                self.updateSiteException("网站无法访问", siteId, ip)
            # --------------------------------------------

            # ------------------- 检测网站建站系统指纹 by lichao 预留功能,暂时没有用到
            # if checkOk:
            #     from engine.engine_utils.check_web_fingerprint import web_frame_fingerprint
            #     scanCnf['web_frame'] = web_frame_fingerprint(ob=scanCnf)
            # -------------------

            # ----------- get sites_dirs by mcj
            site_dirs = get_site_dirs(self.taskId)
            scanCnf['site_dirs'] = site_dirs
            # -----------
            # ---------------- get web fingerprint  by lichao
            if checkOk:
                scanCnf['webServer'] = web_server_fingerprint(
                    scheme, source_ip, domain,
                    path)  # 'apache|nginx|iis|unknown'
                scanCnf['os'] = os_fingerprint(
                    source_ip)  # 'linux|windows|unknown'
            # -----------------

            # ---------------- verify 404 page and waf page by lichao
            scanCnf['404_page'], scanCnf['app_404_page'], scanCnf[
                'waf_page'] = get_invaild_page(scheme, source_ip, domain,
                                               siteType)
            scanCnf['404_page']['similar_rate'] = 0.8
            scanCnf['app_404_page']['similar_rate'] = 0.8
            scanCnf['waf_page']['similar_rate'] = 0.8
            # ---------------------------

            # 判断该域名扫描进度,加载未扫描的漏洞ID
            logger.debug('load unscaned script start')
            scanVulList = []
            progress = progress.split('|')
            for vulId in self.taskCnf['vulList']:
                if vulId not in progress:
                    scanVulList.append(vulId)

            logger.debug('script scan is start')
            if len(scanVulList) > 0:
                urlList = []
                if policy == 4:
                    for url in includeUrl:
                        if url in excludeUrl:
                            continue
                        t = url.split('?')
                        url = t[0]
                        params = ''
                        if len(t) > 1:
                            params = t[1]
                        urlList.append({
                            'url': url,
                            'params': params,
                            'method': 'get'
                        })
                else:
                    res = self.dao.getUrlList(siteId)
                    for r in res:
                        url = r['url'].encode('utf8')
                        if nonascii(url): url = safeUrlString(url)
                        urlList.append({
                            'url': url,
                            'params': r['params'].encode('utf8'),
                            'method': r['method'].encode('utf8'),
                            'refer': r['refer'].encode('utf8')
                        })

                # ----------- 检测网站存活性 by lichao  拿到检测网站存活性的插件id
                check_ok_vul_id = ""
                db_session = DBSession()
                try:
                    vul = db_session.query(WebVulList).filter(
                        WebVulList.script == 'check_web_alive').first()
                    check_ok_vul_id = str(vul.id)
                except Exception, e:
                    logger.error(e)
                db_session.close()
                # -----------

                for vulId in scanVulList:
                    from time import time as during_time
                    t1 = during_time()
                    vulId = vulId.replace(" ", "")
                    if vulId == "":
                        continue

                    # ----------- 检测网站存活性 by lichao
                    if not checkOk and len(urlList) <= 1:  # 判断网站无法访问
                        if vulId != check_ok_vul_id:  # 网站无法访问时,只运行 check_web_alive 这一个插件
                            continue
                    else:  # 网站可以访问时
                        if vulId == check_ok_vul_id:  # 网站可以访问时,不运行 check_web_alive 这个插件
                            continue
                    # ------------

                    progress.append(vulId)
                    self.dao.updateData('sites',
                                        {'progress': '|'.join(progress)},
                                        {'id': siteId})
                    self.dao.deleteData('web_result', {
                        'vul_id': vulId,
                        'site_id': siteId
                    })

                    scanCnf['vulId'] = vulId
                    scanCnf['vulName'] = self.taskCnf['vulDict'][vulId][
                        'vul_name']
                    scanCnf['level'] = self.taskCnf['vulDict'][vulId][
                        'level'].encode('utf8')
                    scanCnf['scanType'] = self.taskCnf['vulDict'][vulId][
                        'scan_type']
                    scanCnf['script'] = self.taskCnf['vulDict'][vulId][
                        'script']
                    scanCnf['status'] = '0'
                    scanCnf['endTime'] = time.time() + 1800
                    scanCnf['timeoutCount'] = 0

                    #测试爬虫爬出来的路径
                    if scanCnf['scanType'] == 1:
                        scanCnf['queue'] = Queue()
                        for r in urlList:
                            scanCnf['queue'].put(r)
                        scanUrlScript = ScanScriptForUrl(scanCnf)
                        scanUrlScript.start()

                    #如果只测试指定的URL则不需要运行测试域名和测试漏洞库
                    if policy != 4:
                        #测试域名
                        if scanCnf['scanType'] == 2:
                            scanDomainScript = ScanScriptForDomain(scanCnf)
                            scanDomainScript.start()
                    duration = during_time() - t1
                    # -----------统计插件运行时间 by mcj
                    try:

                        from common.plugin_speed import PluginSpeed
                        db_session = DBSession()
                        plu_speed = PluginSpeed(self.taskId, vulId, duration)
                        db_session.add(plu_speed)
                        db_session.commit()
                        db_session.close()
                    except Exception, e:
                        logger.info(str(e))
                        db_session.rollback()
                        db_session.close()
                    # -----------统计插件运行时间 by mcj
                    if not checkOk and len(urlList) <= 1:
                        break
                urlList = []
Exemple #3
0
    def run(self):
        try:
            logger.debug('start to search site')
            # 获取上一次未扫描完成的域名
            logger.debug('taskId %s ' % self.taskId)
            siteIds = self.dao.getUnscandSite(self.taskId, self.assetTaskId)
            logger.debug(siteIds)

            for siteId in siteIds:
                logger.debug('siteQueue put %s ' % siteId)
                siteQueue.put(siteId)

            if self.taskCnf['web_search_site_state'] == 0:
                target = self.taskCnf['target'].encode('utf8')
                if target == '':
                    target = []
                else:
                    target = json.read(target)

                ipList = []

                logger.debug('target: ' + jsonSys.dumps(target))
                for row in target:
                    try:
                        siteObj = {
                            'scheme': 'http',
                            'domain': '',
                            'path': '/',
                            'ip': '',
                            'title': '',
                            'policy': '1',
                            'include_url': '',
                            'exclude_url': '',
                            'cookie': '',
                            'sub_domain_scan': 0,
                            'ip_domain_scan': 0
                        }

                        if row.has_key('scheme'):
                            siteObj['scheme'] = row['scheme']

                        if row.has_key('domain') == False:
                            logger.error("can not get domain")
                            continue

                        siteObj['domain'] = row['domain']
                        if row.has_key('path'):
                            siteObj['path'] = row['path']

                        if row.has_key('ip'):
                            # 支持配置自定义IP指向
                            ip = row['ip']
                        else:
                            ip = domainToip(siteObj['domain'])

                        if ip == False:
                            logger.error("can not get ip, domain: %s" %
                                         (siteObj['domain']))
                            continue

                        siteObj['ip'] = ip

                        if row.has_key('title'):
                            siteObj['title'] = row['title']

                        if row.has_key('policy'):
                            siteObj['policy'] = row['policy']
                        else:
                            siteObj['policy'] = 1

                        if row.has_key('include_url'):
                            siteObj['include_url'] = json.write(
                                row['include_url'])

                        if row.has_key('exclude_url'):
                            siteObj['exclude_url'] = json.write(
                                row['exclude_url'])

                        if row.has_key('cookie'):
                            siteObj['cookie'] = row['cookie']

                        # 是否扫描二级域名
                        if row.has_key('sub_domain_scan'):
                            siteObj['sub_domain_scan'] = row['sub_domain_scan']

                        # 该IP的其他域名扫描
                        if row.has_key('ip_domain_scan'):
                            siteObj['ip_domain_scan'] = row['ip_domain_scan']

                        if siteObj['ip_domain_scan'] == 1:
                            # 根据IP获取IP下的域名
                            if ip not in ipList:
                                otherSites = self.searchOtherSiteInIp(ip)
                                target.extend(otherSites)
                                ipList.append(ip)

                        self.updateTaskSites(siteObj)
                    except Exception, ee:
                        logger.error(ee)

            self.finish()

            logger.debug('end to search site')
def run_url(http,ob,item):
    
    result = []
    detail=""
    domain=ob['domain']

    try:
        isstart='0'
        tmp_url = urllib.unquote(item['url'])      
        parse=urlparse.urlparse(tmp_url)
        path=parse.path
        path=path.lower()
        host_domain = domain      
        if (path.find(".css")>=0 or path.find(".doc")>=0 or path.find(".txt")>=0 or path.find(".pdf")>=0 or path.find(".js")>=0) and path.find("jsp")<0:
            return result
        #end if
        url_parse = urlparse.urlparse(tmp_url)
        scheme = url_parse.scheme
        netloc = url_parse.netloc
        path = url_parse.path
        query = url_parse.query
        source_ip = ob.get('source_ip')
        if source_ip:
            netloc = source_ip
        if query:
            tmp_url = "%s://%s%s?%s" % (scheme, netloc, path, query)
        else:
            tmp_url = "%s://%s%s" % (scheme, netloc, path)
        if item['params']!='':    
            url="%s?%s"%(tmp_url,item['params'])
        else:
            url=tmp_url

        headers={"Host":host_domain,"User-Agent":" Mozilla/5.0 (Windows NT 5.1; rv:14.0)\
                     Gecko/20100101 Firefox/14.0.1","Accept":" text/html,application/xhtml+xml,application/xml;q=0.9,\
                     */*;q=0.8","Accept-Language": "zh-cn,zh;q=0.8,en-us;q=0.5,en;q=0.3","Accept-Encoding":" gzip, \
                     deflate","Connection": "keep-alive","X-Forwarded-For": "231.23.233.2'"}
        headers1={"Host":host_domain,"User-Agent":" Mozilla/5.0 (Windows NT 5.1; rv:14.0)\
                     Gecko/20100101 Firefox/14.0.1","Accept":" text/html,application/xhtml+xml,application/xml;q=0.9,\
                     */*;q=0.8","Accept-Language": "zh-cn,zh;q=0.8,en-us;q=0.5,en;q=0.3","Accept-Encoding":" gzip, \
                     deflate","Connection": "keep-alive","X-Forwarded-For": "231.23.233.2"}
        
        if item['method']=='get':
            response,content=http.request(url,'GET','',headers)
            r,c=http.request(url,'GET','',headers1)
            
            res1,res2,res3 = GetDatabaseError(content)
            res11,res22,res33 = GetDatabaseError(c)
            if res1 and response.has_key('status') and response['status'] == '500' and res11 == False:
                request = getRequest(url,"GET",headers,"", domain=ob['domain'])
                response = getResponse(response)
                output = "发现数据库错误信息:%s" % (res3)
                result.append(getRecord(ob,url,ob['level'],detail,request,response,output))
            #end if 
        #end if
              
        if item['method']=='post':
            reject_key = ['__viewstate', 'ibtnenter.x', 'ibtnenter.y','password']
            ret=[]
            par=json.read(item['params'])
            for i in par:
                if i and len(i.keys()) > 0:
                    for k in i.keys():
                        if k == 'name':
                            if i[k].lower() not in reject_key:
                                ret.append(i[k]+"=1111")
                            #end if
                        #end if
                    #end for
                #end if
            #end for
            
            post_data= "&".join(ret)
            r,c = http.request(url,'POST',post_data,headers1)
            response,content = http.request(url,'POST',post_data,headers)
            
            res1,res2,res3 = GetDatabaseError(content)
            res11,res22,res33 = GetDatabaseError(c)
            if res1 and response.has_key('status') and response['status'] == '500' and res11 == False:
                request = postRequest(url,"POST",headers,post_data, domain=ob['domain'])
                response = getResponse(response)
                output = "发现数据库错误信息:%s" % (res3)
                result.append(getRecord(ob,url,ob['level'],detail,request,response,output))
                
            #end if 
        #end if
            
    except Exception, e:
        logger.error("File:x-forwarded-for sqlinjectionscript.py, run_url function :" + str(e))
def run_url(http, ob, item):
    detail = ""
    ret = []
    result = []
    domain = ob['domain']
    headers = {
        "Host": domain,
        "User-Agent": " Mozilla/5.0 (Windows NT 5.1; rv:14.0)\
                     Gecko/20100101 Firefox/14.0.1",
        "Accept": " text/html,application/xhtml+xml,application/xml;q=0.9,\
                     */*;q=0.8",
        "Accept-Language": "zh-cn,zh;q=0.8,en-us;q=0.5,en;q=0.3",
        "Accept-Encoding": " gzip, \
                     deflate",
        "Connection": "keep-alive"
    }
    headers1 = {
        "Host": domain,
        "User-Agent": " Mozilla/5.0 (Windows NT 5.1; rv:14.0)\
                 Gecko/20100101 Firefox/14.0.1",
        "Accept": " text/html,application/xhtml+xml,application/xml;q=0.9,\
                 */*;q=0.8",
        "Accept-Language": "zh-cn,zh;q=0.8,en-us;q=0.5,en;q=0.3",
        "Accept-Encoding": " gzip, \
                 deflate",
        "Connection": "keep-alive"
    }
    try:
        isstart = '0'
        tmp_url = urllib.unquote(item['url'])
        parse = urlparse.urlparse(tmp_url)
        path = parse.path
        path = path.lower()
        url_parse = urlparse.urlparse(tmp_url)
        scheme = url_parse.scheme
        domain = url_parse.netloc
        path = url_parse.path
        query = url_parse.query
        source_ip = ob.get('source_ip')
        if source_ip:
            domain = source_ip
        if query:
            tmp_url = "%s://%s%s?%s" % (scheme, domain, path, query)
        else:
            tmp_url = "%s://%s%s" % (scheme, domain, path)
        if (path.find(".css") >= 0 or path.find(".doc") >= 0
                or path.find(".txt") >= 0 or path.find(".pdf") >= 0
                or path.find(".js") >= 0) and path.find("jsp") < 0:
            return result
        if item['params'] != '':

            url = "%s?%s" % (tmp_url, item['params'])
        else:
            url = tmp_url
        headers['Referer'] = url + "'"
        headers1['Referer'] = url
        # print headers
        if item['method'] == 'get':
            r, c = http.request(url, 'GET', '', headers1)
            response, content = http.request(url, 'GET', '', headers)
            if GetDatabaseError(content)[0] and response[
                    'status'] == '500' and GetDatabaseError(c)[0] == False:
                request = getRequest(url,
                                     "GET",
                                     headers,
                                     "",
                                     domain=ob['domain'])
                response = getResponse(response)
                result.append(
                    getRecord(
                        ob, url, ob['level'], detail + "验证性扫描结果:\n" +
                        "发现数据库错误信息:" + GetDatabaseError(content)[2], request,
                        response))
                # END IF
        # END IF

        if item['method'] == 'post':
            reject_key = [
                '__viewstate', 'ibtnenter.x', 'ibtnenter.y', 'password'
            ]
            par = json.read(item['params'])
            for i in par:

                if i and len(i) > 0:

                    for k in i.keys():

                        if k == 'name':

                            if i[k].lower() not in reject_key:
                                ret.append(i[k] + "=1111")

            post_data = "&".join(ret)
            r, c = http.request(tmp_url, 'POST', post_data, headers1)
            response, content = http.request(tmp_url, 'POST', post_data,
                                             headers)
            if GetDatabaseError(content)[0] and response[
                    'status'] == '500' and GetDatabaseError(c)[0] == False:
                request = postRequest(tmp_url,
                                      "POST",
                                      headers,
                                      post_data,
                                      domain=ob['domain'])
                response = getResponse(response)
                result.append(
                    getRecord(
                        ob, tmp_url, ob['level'], detail + "验证性扫描结果:\n" +
                        "发现数据库错误信息:" + GetDatabaseError(content)[2], request,
                        response))
                # END IF
                # END IF

    except Exception, e:
        logger.error("File:referer sqlinjectionscript.py, run_url function :" +
                     str(e))
Exemple #6
0
def run_url(http, ob, item):

    resultlist = []

    try:
        tmp_url = urllib.unquote(item['url'])
        if item['params'] == "":
            return resultlist
        #end if
        if checkUrlType(tmp_url) == False:
            return resultlist
        #end if
        if item['method'] == 'get':

            url_list = []
            params = changeParams(item['params'])
            for row in params:
                url = "%s?%s" % (tmp_url, row)
                res = GetXssCheck(http, ob, url)
                if len(res) > 0:
                    resultlist.extend(res)
                #end if
            #end for
        elif item['method'] == 'post':
            print item['params']
            ret = []
            reject_key = ['__viewstate', 'ibtnenter.x', 'ibtnenter.y']
            par = json.read(item['params'])
            for i in par:

                if i and len(i) > 0:

                    for k in i.keys():

                        if k == 'name':

                            if i[k].lower() not in reject_key:

                                ret.append(i[k] + "=1111")

            post_data = "&".join(ret)
            params = changeParams(post_data)
            response = ""
            request = ""
            for post_Data in params:
                post_T_Data = post_Data + "\'\"%27%2527%22%2522<ScRiPt>alert(133)</ScRiPt>"
                headers = {"Content-Type": "application/x-www-form-urlencoded"}
                res, content = http.request(tmp_url,
                                            'POST',
                                            post_T_Data,
                                            headers=headers)
                if res['status'] == '404' or len(content) <= 0:
                    return []
                flag1, keyword1 = XssGetKeyWord(content,
                                                "<ScRiPt>alert(133)</ScRiPt>")
                if flag1:
                    r, c = http.request(tmp_url,
                                        'POST',
                                        post_Data + keyword1,
                                        headers=headers)
                    if c.find("<script>alert(133)</script>") < 0:
                        keyword1 = "<script>alert(133)</script>"

                    detail = "漏洞参数:" + post_Data + keyword1
                    request = get_post_request(tmp_url, post_Data + keyword1)
                    response = getResponse(res)
                    resultlist.append(
                        getRecord(ob, tmp_url, ob['level'], detail, request,
                                  response))
            #end if

        #end if
    except Exception, e:
        logger.error("File:XssScript.py, run_url function :" + str(e))
        return []
Exemple #7
0
    def scanSiteMain(self, siteId):
        try:
            logger.debug("start to scan site, siteId: %s" % (siteId))
            if siteId == None:
                return False

            dao = MysqlDao()
            siteObj = dao.getSiteData(siteId)
            if siteObj == None:
                logger.error("start to get site config exception, siteId: %s" %
                             (siteId))
                return False

            #scheme
            scheme = siteObj['scheme'].encode('utf8')
            #ip address
            ip = siteObj['ip'].encode('utf8')
            #site domain
            domain = siteObj['domain'].encode('utf8')
            #site scan state
            state = siteObj['state']
            #site path
            path = siteObj['path'].encode('utf8')
            #site title
            title = siteObj['title'].encode('utf8')
            #site type
            siteType = siteObj['site_type'].encode('utf8')
            #site cookie
            cookie = siteObj['cookie'].encode('utf8')
            #site include url
            includeUrl = siteObj['include_url'].encode('utf8')
            if includeUrl == '':
                includeUrl = []
            else:
                includeUrl = json.read(includeUrl)
            #site exclude url
            excludeUrl = siteObj['exclude_url'].encode('utf8')
            if excludeUrl == '':
                excludeUrl = []
            else:
                excludeUrl = json.read(excludeUrl)
            #scan progress
            progress = siteObj['progress'].encode('utf8')
            #site scan policy
            policy = siteObj['policy']

            logger.debug("scanSiteMain siteId: %s" % (siteId))
            if state == 1:
                self.finishSiteScan(siteId, ip)
                return True

            #在DNS配置文件中加入这个域名的DNS信息
            self.threadLock.acquire()
            self.updateHosts(ip, domain, self.taskId, siteId, 'add')
            self.threadLock.release()

            flag = res = content = checkOk = None
            target = []
            logger.debug("scanSiteMain siteId: %s  preSiteScan before" %
                         (siteId))
            target.append("%s://%s%s" % (scheme, domain, path))
            for url in target:
                flag, res, content = self.PreSiteScan(url)
                if not flag:
                    continue
                else:
                    if self.checkSiteWorkMode(res, title) == False:
                        continue
                    else:
                        checkOk = 1
                        break
            if not checkOk:
                self.updateSiteException("网站无法访问", siteId, ip)
                return
            else:
                siteCode = self.getSiteCode(content)
                if title == "" and res and res.has_key(
                        'status') and res['status'] == '200':
                    title = self.updateSiteTitle(content, siteId)
                if siteType == "":
                    siteType = self.updateSiteType(res, siteId)
                if siteCode == "":
                    siteCode = self.getSiteCode(content)

            if self.taskCnf['web_scan_timeout']:
                socket.setdefaulttimeout(self.taskCnf['web_scan_timeout'])

            siteDb = {'state': 0, 'exception': ''}
            if siteObj['start_time'] is None or siteObj[
                    'start_time'] == '0000-00-00 00:00:00':
                siteDb['start_time'] = time.strftime("%Y-%m-%d %X",
                                                     time.localtime())
            if siteObj['progress'] == '':
                siteDb['progress'] = '0'
            self.dao.updateData('sites', siteDb, {'id': siteId})

            logger.debug("scanSiteMain siteId: %s  policy before" % (siteId))
            ###############################
            #policy:
            #    1:快速扫描,只扫描指定的域名
            #    2:完全扫描,扫描指定的域名,并且扫描二级域名
            #    3:扫描指定目录及子目录
            #    4:扫描指定的URL,这个情况下,不需要爬虫
            #    5:通过域名反查得到的域名
            #    6:登陆型扫描
            ###############################
            if self.taskCnf['spider_enable'] == 1 and siteObj[
                    'spider_state'] == 0:
                logger.debug('spider is start')

                progress = '0'

                logger.debug("scanSiteMain siteId: %s  cleandata before" %
                             (siteId))
                self.dao.deleteData('web_result', {'site_id': siteId})
                self.dao.deleteData('web_result_data', {'site_id': siteId})
                self.dao.deleteData('spider_url', {'site_id': siteId})

                #开启爬虫,当扫描指定的URL时,不需要爬虫
                if siteObj['policy'] != 4:
                    spiderCnf = {}
                    spiderCnf['taskId'] = self.taskId
                    spiderCnf['assetTaskId'] = self.assetTaskId
                    spiderCnf['siteId'] = siteId
                    spiderCnf['spiderUrlCount'] = self.taskCnf[
                        'spider_url_count']
                    spiderCnf['webScanTime'] = self.taskCnf['web_scan_timeout']
                    spiderCnf['policy'] = siteObj['policy']
                    spiderCnf['scheme'] = siteObj['scheme'].encode('utf8')
                    spiderCnf['domain'] = domain
                    spiderCnf['path'] = path
                    spiderCnf['maxTimeCount'] = 30
                    spiderCnf['webScanTimeout'] = self.taskCnf[
                        'web_scan_timeout']
                    spiderCnf['endTime'] = time.time() + 1800
                    spiderCnf['maxnum'] = self.taskCnf['spider_url_count']
                    spiderCnf['title'] = title
                    spiderCnf['ip'] = ip
                    spiderCnf['cookie'] = cookie
                    spiderCnf['webSearchSiteState'] = self.taskCnf[
                        'web_search_site_state']
                    spiderCnf['webSearchSiteTimeout'] = self.taskCnf[
                        'web_search_site_timeout']
                    spiderCnf['includeUrl'] = includeUrl
                    spiderCnf['excludeUrl'] = excludeUrl
                    spiderCnf['downloadDir'] = './log/'

                    logger.debug("scanSiteMain siteId: %s startSpider before" %
                                 (siteId))
                    if self.taskCnf['spider_type'] == 2:
                        import Spider2 as Spider
                    else:
                        import Spider

                    logger.debug("spiderCnf start")
                    logger.debug(spiderCnf)
                    logger.debug("spiderCnf end")
                    spider = Spider.Spider(spiderCnf)
                    spider.start()

                logger.debug('spider is end')

            self.dao.updateData('sites', {'spider_state': 1}, {'id': siteId})
            ####################################### 至此蜘蛛结束,后面是扫描的工作 #############################################
            return True

            siteCnf = dao.getSiteData(siteId)
            domain = siteCnf['domain'].encode('utf8')
            path = siteCnf['path'].encode('utf8')
            title = siteCnf['title']
            if title != '':
                title = title.encode('utf8')

            #检测网站的状态,有的网站访问后直接访问500或者其他的情况。
            if self.checkSiteWorkMode({}, title) == False:
                self.finishSiteScan(siteId, ip)
                return

            logger.debug('get site scan config')

            scanCnf = {}
            scanCnf['taskId'] = self.taskId
            scanCnf['assetTaskId'] = self.assetTaskId
            scanCnf['siteId'] = siteId
            scanCnf['maxThread'] = 10
            scanCnf['scriptThread'] = 10
            scanCnf['webTimeout'] = self.taskCnf['web_scan_timeout']
            scanCnf['ip'] = ip
            scanCnf['scheme'] = scheme
            scanCnf['domain'] = domain
            scanCnf['path'] = path
            scanCnf['cookie'] = cookie
            scanCnf['errorCount'] = 0
            scanCnf['errorLenDict'] = {}
            scanCnf['siteType'] = siteType
            scanCnf['maxTimeoutCount'] = 20
            scanCnf['siteCode'] = siteCode
            scanCnf['cookie'] = cookie
            scanCnf['len404'] = []
            scanCnf['isForce'] = 0
            scanCnf['excludeUrl'] = excludeUrl
            scanCnf['threadLock'] = threading.Lock()
            scanCnf['isstart'] = '1'

            #判断该域名扫描进度,加载未扫描的漏洞ID
            logger.debug('load unscaned script start')
            scanVulList = []
            progress = progress.split('|')
            for vulId in self.taskCnf['vulList']:
                if vulId not in progress:
                    scanVulList.append(vulId)

            logger.debug('script scan is start')
            if len(scanVulList) > 0:
                urlList = []
                if policy == 4:
                    for url in includeUrl:
                        if url in excludeUrl:
                            continue
                        t = r.split('?')
                        url = t[0]
                        params = ''
                        if len(t) > 1:
                            params = t[1]
                        urlList.append({
                            'url': url,
                            'params': params,
                            'method': 'get'
                        })
                else:
                    res = self.dao.getUrlList(siteId)
                    for r in res:
                        url = r['url'].encode('utf8')
                        if nonascii(url): url = safeUrlString(url)
                        urlList.append({
                            'url': url,
                            'params': r['params'].encode('utf8'),
                            'method': r['method'].encode('utf8'),
                            'refer': r['refer'].encode('utf8')
                        })

                for vulId in scanVulList:
                    vulId = vulId.replace(" ", "")
                    if vulId == "":
                        continue

                    progress.append(vulId)
                    self.dao.updateData('sites',
                                        {'progress': '|'.join(progress)},
                                        {'id': siteId})
                    self.dao.deleteData('web_result', {
                        'vul_id': vulId,
                        'site_id': siteId
                    })

                    scanCnf['vulId'] = vulId
                    scanCnf['vulName'] = self.taskCnf['vulDict'][vulId][
                        'vul_name']
                    scanCnf['level'] = self.taskCnf['vulDict'][vulId][
                        'level'].encode('utf8')
                    scanCnf['scanType'] = self.taskCnf['vulDict'][vulId][
                        'scan_type']
                    scanCnf['script'] = self.taskCnf['vulDict'][vulId][
                        'script']
                    scanCnf['status'] = '0'
                    scanCnf['endTime'] = time.time() + 1800
                    scanCnf['timeoutCount'] = 0

                    #测试爬虫爬出来的路径
                    if scanCnf['scanType'] == 1:
                        scanCnf['queue'] = Queue()
                        for r in urlList:
                            scanCnf['queue'].put(r)
                        scanUrlScript = ScanScriptForUrl(scanCnf)
                        scanUrlScript.start()

                    #如果只测试指定的URL则不需要运行测试域名和测试漏洞库
                    if policy != 4:
                        #测试域名
                        if scanCnf['scanType'] == 2:
                            scanDomainScript = ScanScriptForDomain(scanCnf)
                            scanDomainScript.start()
                urlList = []

            #结束扫描
            self.finishSiteScan(siteId, ip)
            self.threadLock.acquire()
            self.updateHosts(ip, domain, self.taskId, siteId, 'remove')
            self.threadLock.release()

            return None
        except Exception, e:
            logger.exception(e)
            return siteId
Exemple #8
0
    def init(self):
        try:
            if self.action == 'reinit':
                logger.debug('start to reinit task')
                reinitDb = {'state':1,'init_state':0,'prescan_state':0,'web_scan_state':0,'weak_pwd_scan_state':0,'port_scan_state':0,'host_scan_state':0,'start_time':'0000-00-00 00:00:00','end_time':'0000-00-00 00:00:00'}
                self.dao.updateData('task', reinitDb, {'id':self.taskId})
            #end if

            currentTime = time.strftime("%Y-%m-%d %X",time.localtime())
            
            self.dao.updateData('task', {'state':2,'start_time':currentTime,'end_time':'0000-00-00 00:00:00'}, {'id':self.taskId,'state':1})

            self.dao.updateData('task', {'state':3,'end_time':currentTime}, {'id':self.taskId,'init_state':1,'prescan_state':1,'web_scan_state':1,'weak_pwd_scan_state':1,'port_scan_state':1,'host_scan_state':1})

            taskCnf = self.dao.getTaskData(self.taskId)
            self.assetTaskId = taskCnf['asset_task_id']

            if taskCnf['init_state'] == 1:
                return
            #end if
            
            target = taskCnf['target'].encode('utf8')
            if target == '':
                target = []
            else:
                target = json.read(target.encode('utf8'))
            #end if

            #clear host_infos
            self.dao.deleteData('host_infos', {'task_id':self.taskId,'asset_task_id':self.assetTaskId})

            #clear host_ports
            self.dao.deleteData('host_ports', {'task_id':self.taskId,'asset_task_id':self.assetTaskId})

            #clear sites
            self.dao.deleteData('sites', {'task_id':self.taskId,'asset_task_id':self.assetTaskId})

            #clear spider_url
            self.dao.deleteData('spider_url', {'task_id':self.taskId,'asset_task_id':self.assetTaskId})

            #clear web_result
            self.dao.deleteData('web_result', {'task_id':self.taskId,'asset_task_id':self.assetTaskId})

            #clear web_result_data
            self.dao.deleteData('web_result_data', {'task_id':self.taskId,'asset_task_id':self.assetTaskId})

            #clear host_result
            self.dao.deleteData('host_result', {'task_id':self.taskId,'asset_task_id':self.assetTaskId})

            #clear weak_pwd_result
            self.dao.deleteData('weak_pwd_result', {'task_id':self.taskId,'asset_task_id':self.assetTaskId})

            siteList = []
            ipList = []

            for item in target:
                try:
                    domain = ''
                    ip = ''

                    if item.has_key('domain'):
                        domain = item['domain']
                    #end if

                    if item.has_key('ip'):
                        ip = item['ip']
                    #end if

                    if domain == '' and ip == '':
                        continue
                    #end if

                    if ip == '':
                        ip = domainToip(domain)
                        if ip == False:
                            continue
                        #end if
                    #end if

                    ipDb = {'task_id':self.taskId,'asset_task_id':self.assetTaskId,'ip':ip}
                    if ip not in ipList:
                        hostId = self.dao.insertData('host_infos', ipDb)
                        if hostId > 0:
                            ipList.append(ip)
                        #end if
                    #end if

                except Exception, e1:
                    logger.error(e1)
                #end try
            #end for

            self.dao.updateData('task', {'init_state':1}, {'id':self.taskId})
Exemple #9
0
def run_url(http, ob, item):
    try:
        result = []

        if item['method'] != 'post':
            return []
        #end if
        header = {"Host": ob['domain']}
        refer = item['refer']
        params = json.read(item['params'])

        flag = False
        for row in params:
            if row['type'] == 'password':
                flag = True
            #end if
            if row['type'] in ['file', 'textarea']:
                return []
            #end if
        #end for

        if flag:
            url_parse = urlparse(refer)
            scheme = url_parse.scheme
            domain = url_parse.netloc
            path = url_parse.path
            query = url_parse.query
            source_ip = ob.get('source_ip')
            if source_ip:
                domain = source_ip
            if query:
                new_url = "%s://%s%s?%s" % (scheme, domain, path, query)
            else:
                new_url = "%s://%s%s" % (scheme, domain, path)

            res, content = http.request(new_url, 'GET', headers=header)
            if res and res.has_key('status') and res['status'] == '200':
                if res.has_key('cache-control') and res['cache-control'].find(
                        'no-cache') >= 0:
                    return []
                #end if
                if res.has_key(
                        'pragma') and res['pragma'].find('no-cache') >= 0:
                    return []
                #end if
                if res.has_key('expires') and res['expires'] == '0':
                    return []
                #end if
                match = re.findall(
                    r"<(\s*)meta(\s+)http-equiv(\s*)=(\s*)('|\")(.+?)\5(\s+)content(\s*)=(\s*)('|\")(.+?)\10(\s*)/(\s*)>",
                    content, re.I | re.DOTALL)
                if match and len(match) > 0:
                    for row in match:
                        type = row[5].lower().replace(" ", "")
                        value = row[10].lower().replace(" ", "")
                        if type == 'cache-control' and value.find(
                                'no-cache') >= 0:
                            return []
                        elif type == 'pragma' and value.find('no-cache') >= 0:
                            return []
                        elif type == 'expires' and value == '0':
                            return []
                        #end if
                    #end for
                #end if

                detail = "不建议让 Web 浏览器保存任何登录信息,因为当有漏洞存在时,可能会危及这个信息。"
                request = getRequest(new_url, domain=ob['domain'])
                response = getResponse(res, type + value)
                output = ""
                result.append(
                    getRecord(ob, new_url, ob['level'], detail, request,
                              response, output))
            #end if
        #end if

        return result
    except Exception, e:
        logger.error("File:CheckCacheAdminPageScript.py, run_url function :" +
                     str(e))
        return []