Ejemplo n.º 1
0
 def __init__(self, taskCnf, target):
     threading.Thread.__init__(self)
     self.module = self.__class__.__name__
     self.taskCnf = taskCnf
     self.taskId = taskCnf['id']
     self.assetTaskId = taskCnf['asset_task_id']
     self.vulList = taskCnf['vulList']
     self.target = target
     self.dao = MysqlDao()
Ejemplo n.º 2
0
class HostScanThread(threading.Thread):
    def __init__(self, taskCnf, target):
        threading.Thread.__init__(self)
        self.module = self.__class__.__name__
        self.taskCnf = taskCnf
        self.taskId = taskCnf['id']
        self.assetTaskId = taskCnf['asset_task_id']
        self.vulList = taskCnf['vulList']
        self.target = target
        self.dao = MysqlDao()

    #end def

    def run(self):
        try:
            while True:
                try:
                    if self.target.empty() == False:
                        try:
                            ip = self.target.get_nowait()
                        except Exception, e2:
                            continue
                        #end try

                        scriptThreads = []
                        for vulIds in self.vulList:
                            scriptThreads.append(
                                ScriptScanThread(self.taskCnf, ip, vulIds))
                        #end for
                        for t in scriptThreads:
                            t.start()
                        #end for

                        for t in scriptThreads:
                            t.join()
                        #end for

                        self.dao.updateData(
                            'host_infos', {'host_scan_state': 1}, {
                                'task_id': self.taskId,
                                'asset_task_id': self.assetTaskId,
                                'ip': ip
                            })
                    else:
                        break
                    #end if
                except Exception, e1:
                    logger.error(e1)
Ejemplo n.º 3
0
    def __init__(self, scanCnf):
        try:
            self.module = self.__class__.__name__
            self.scanCnf = scanCnf
            self.taskId = scanCnf['taskId']
            self.assetTaskId = scanCnf['assetTaskId']
            self.siteId = scanCnf['siteId']
            self.vulId = scanCnf['vulId']
            self.scriptThread = scanCnf['scriptThread']
            self.ip = scanCnf['ip']
            self.scheme = scanCnf['scheme']
            self.domain = scanCnf['domain']
            self.path = scanCnf['path']
            self.cookie = scanCnf['cookie']
            self.endTime = scanCnf['endTime']
            self.maxTimeoutCount = scanCnf['maxTimeoutCount']
            self.level = scanCnf['level']
            self.script = scanCnf['script']
            self.webTimeout = scanCnf['webTimeout']
            self.dao = MysqlDao()
            self.urlQueue = scanCnf.get('queue')
            self.threadLock = threading.Lock()
            self.timeoutCount = 0

        except Exception, e:
            logger.error(e)
Ejemplo n.º 4
0
 def __init__(self, queue, taskId, assetTaskId):
     threading.Thread.__init__(self)
     self.module = self.__class__.__name__
     self.queue = queue
     self.taskId = str(taskId)
     self.assetTaskId = str(assetTaskId)
     self.dao = MysqlDao()
Ejemplo n.º 5
0
 def __init__(self, taskCnf, ip, vulIds):
     threading.Thread.__init__(self)
     self.module = self.__class__.__name__
     self.taskCnf = taskCnf
     self.taskId = taskCnf['id']
     self.assetTaskId = taskCnf['asset_task_id']
     self.vulIds = vulIds
     self.ip = ip
     self.dao = MysqlDao()
Ejemplo n.º 6
0
    def __init__(self, taskId, action):
        self.module = self.__class__.__name__
        self.taskId = taskId
        self.action = action
        self.assetTaskId = 0
        self.queue = Queue()
        self.ipList = []
        self.dao = MysqlDao()

        if self.checkProgress() == False:
            sys.exit()
Ejemplo n.º 7
0
 def __init__(self, target, taskId, assetTaskId, ports, timeout):
     try:
         threading.Thread.__init__(self)
         self.module = self.__class__.__name__
         self.target = target
         self.taskId = taskId
         self.assetTaskId = assetTaskId
         self.ports = ports
         self.timeout = timeout
         self.dao = MysqlDao()
     except Exception, e:
         logger.error(e)
Ejemplo n.º 8
0
    def __init__(self, taskId, assetTaskId, taskCnf, threadLock):
        try:
            threading.Thread.__init__(self)
            self.module = self.__class__.__name__
            self.taskId = taskId
            self.assetTaskId = assetTaskId
            self.taskCnf = taskCnf
            self.threadLock = threadLock
            self.threadName = threading.currentThread().getName()
            self.dao = MysqlDao()
            self.count = 0

        except Exception, e:
            logger.error(e)
Ejemplo n.º 9
0
    def __init__(self, taskId, taskCnf):
        try:
            threading.Thread.__init__(self)
            self.module = self.__class__.__name__
            self.taskId = taskId
            self.assetTaskId = taskCnf['asset_task_id']
            self.taskCnf = taskCnf
            self.sitePorts = [80, 81, 443, 8080]
            self.http = HttpRequest(
                {'timeout': self.taskCnf['web_search_site_timeout']})
            self.htmlParser = HTMLParser.HTMLParser()
            self.ipList = []
            self.dao = MysqlDao()

        except Exception, e:
            logger.exception(e)
Ejemplo n.º 10
0
    def __init__(self, taskId, action):
        self.taskId = str(taskId)
        self.action = action
        self.module = self.__class__.__name__
        self.assetTaskId = 0
        self.dao = MysqlDao()
        self.target = Queue()
        self.thread = 1
        self.timeout = 30
        self.ports = '80,81,8081,8089,443,22,23,3306,3389'

        #任务配置
        self.taskCnf = {}

        #线程锁
        self.threadLock = threading.Lock()

        if self.checkProgress() == False:
            sys.exit(3)
Ejemplo n.º 11
0
    def __init__(self, taskId, action):
        self.taskId = str(taskId)
        self.action = action
        self.module = self.__class__.__name__
        self.assetTaskId = 0
        self.taskName = ''
        self.policy = 0
        self.timeout = 30
        self.thread = 10
        self.maxScript = 50
        self.vulList = []
        self.dao = MysqlDao()

        #任务配置
        self.taskCnf = {}

        self.target = Queue()
        #线程锁
        self.threadLock = threading.Lock()

        if self.checkProgress() == False:
            sys.exit(3)
Ejemplo n.º 12
0
    def __init__(self, taskId, action):
        self.taskId = str(taskId)
        self.action = action
        self.module = self.__class__.__name__
        self.assetTaskId = 0
        self.dao = MysqlDao()
        self.thread = 1
        self.timeout = 30
        self.policy = []
        self.portDic = {
            '21': 'ftp',
            '22': 'ssh',
            '3389': 'rdp',
            '23': 'telnet',
            '1433': 'mssql',
            '3306': 'mysql',
            '1521': 'oracle',
            '445': 'smb',
            '139': 'smb',
            '5900': 'vnc'
        }
        #self.compiledRule = re.compile('\x5b[\d]+\x5d\x5b[\w]+\x5d\s+host_scan:.*.login:.*.password:.*.')
        self.compiledRule = re.compile(
            '\[([0-9]+)\]\[([0-9a-zA-Z]+)\]\s+host_scan:\s+([0-9\.]+)\s+login:\s+(.*)\s+password:\s+(.*)'
        )
        #[3306][mysql] host_scan: 127.0.0.1   login: root   password: 123456

        #任务配置
        self.taskCnf = {}

        self.target = Queue()
        #线程锁
        self.threadLock = threading.Lock()

        if self.checkProgress() == False:
            sys.exit(3)
Ejemplo n.º 13
0
    def scanSiteMain(self, siteId):
        try:
            logger.debug("start to scan site, siteId: %s" % (siteId))
            if siteId == None:
                return False

            dao = MysqlDao()
            siteObj = dao.getSiteData(siteId)
            if siteObj == None:
                logger.error("start to get site config exception, siteId: %s" %
                             (siteId))
                return False

            #scheme
            scheme = siteObj['scheme'].encode('utf8')
            #ip address
            ip = siteObj['ip'].encode('utf8')
            #site domain
            domain = siteObj['domain'].encode('utf8')
            #site scan state
            state = siteObj['state']
            #site path
            path = siteObj['path'].encode('utf8')
            #site title
            title = siteObj['title'].encode('utf8')
            #site type
            siteType = siteObj['site_type'].encode('utf8')
            #site cookie
            cookie = siteObj['cookie'].encode('utf8')
            #site include url
            includeUrl = siteObj['include_url'].encode('utf8')
            if includeUrl == '':
                includeUrl = []
            else:
                includeUrl = json.read(includeUrl)
            #site exclude url
            excludeUrl = siteObj['exclude_url'].encode('utf8')
            if excludeUrl == '':
                excludeUrl = []
            else:
                excludeUrl = json.read(excludeUrl)
            #scan progress
            progress = siteObj['progress'].encode('utf8')
            #site scan policy
            policy = siteObj['policy']

            if state == 1:
                self.finishSiteScan(siteId, ip)
                return True

            #在DNS配置文件中加入这个域名的DNS信息
            # self.threadLock.acquire()
            # self.updateHosts(ip, domain, self.taskId, siteId, 'add')
            # self.threadLock.release()
            '''
            #  注释此段,在后文(代码第700行附近)重写网站存活性检测,提高稳健性,并将结果写入报告  20170804
            flag = res = content = checkOk = None
            target = []
            target.append("%s://%s%s"%(scheme,domain,path))
            # -------------UPDATE BY MCJ 扫到site即可开始扫描,无需再检测网站状态
            checkOk = True
            # for url in target:
            #     flag, res, content = self.PreSiteScan(url)
            #     if not flag:
            #         continue
            #     else:
            #         if self.checkSiteWorkMode(res, title) == False:
            #             continue
            #         else:
            #             checkOk = 1
            #             break
            # ----------
            if not checkOk:
                self.updateSiteException("网站无法访问", siteId, ip)
                return
            else:
                siteCode = self.getSiteCode(content)
                if title == "" and res and res.has_key('status') and res['status'] == '200':
                    title = self.updateSiteTitle(content, siteId)
                if siteType == "":
                    siteType = self.updateSiteType(res, siteId)
                if siteCode == "":
                    siteCode = self.getSiteCode(content)
            '''
            if self.taskCnf['web_scan_timeout']:
                socket.setdefaulttimeout(self.taskCnf['web_scan_timeout'])

            siteDb = {'state': 0, 'exception': ''}
            if siteObj['start_time'] is None or siteObj[
                    'start_time'] == '0000-00-00 00:00:00':
                siteDb['start_time'] = time.strftime("%Y-%m-%d %X",
                                                     time.localtime())
            if siteObj['progress'] == '':
                siteDb['progress'] = '0'
            self.dao.updateData('sites', siteDb, {'id': siteId})

            ###############################
            #policy:
            #    1:快速扫描,只扫描指定的域名
            #    2:完全扫描,扫描指定的域名,并且扫描二级域名
            #    3:扫描指定目录及子目录
            #    4:扫描指定的URL,这个情况下,不需要爬虫
            #    5:通过域名反查得到的域名
            #    6:登陆型扫描
            ###############################
            ## 禁用spider by mcj
            # if self.taskCnf['spider_enable'] == 1 and siteObj['spider_state'] == 0:
            #     logger.debug('spider is start')
            #
            #     progress = '0'
            #
            #     self.dao.deleteData('web_result', {'site_id':siteId})
            #     self.dao.deleteData('web_result_data', {'site_id':siteId})
            #     self.dao.deleteData('spider_url', {'site_id':siteId})
            #
            #     #开启爬虫,当扫描指定的URL时,不需要爬虫
            #     if siteObj['policy'] != 4:
            #         spiderCnf = {}
            #         spiderCnf['taskId'] = self.taskId
            #         spiderCnf['assetTaskId'] = self.assetTaskId
            #         spiderCnf['siteId'] = siteId
            #         spiderCnf['spiderUrlCount'] = self.taskCnf['spider_url_count']
            #         spiderCnf['webScanTime'] = self.taskCnf['web_scan_timeout']
            #         spiderCnf['policy'] = siteObj['policy']
            #         spiderCnf['scheme'] = siteObj['scheme'].encode('utf8')
            #         spiderCnf['domain'] = domain
            #         spiderCnf['path'] = path
            #         spiderCnf['maxTimeCount'] = 30
            #         spiderCnf['webScanTimeout'] = self.taskCnf['web_scan_timeout']
            #         spiderCnf['endTime'] = time.time() + 1800
            #         spiderCnf['maxnum'] = self.taskCnf['spider_url_count']
            #         spiderCnf['title'] = title
            #         spiderCnf['ip'] = ip
            #         spiderCnf['cookie'] = cookie
            #         spiderCnf['webSearchSiteState'] = self.taskCnf['web_search_site_state']
            #         spiderCnf['webSearchSiteTimeout'] = self.taskCnf['web_search_site_timeout']
            #         spiderCnf['includeUrl'] = includeUrl
            #         spiderCnf['excludeUrl'] = excludeUrl
            #         spiderCnf['downloadDir'] = SCANER_SPIDER_DOWNLOAD_DIR
            #
            #         if self.taskCnf['spider_type'] == 2:
            #             import Spider2 as Spider
            #         else:
            #             import Spider
            #
            #         logger.debug("spiderCnf start")
            #         logger.debug(spiderCnf)
            #         logger.debug("spiderCnf end")
            #         spider = Spider.Spider(spiderCnf)
            #         spider.start()
            #
            #     logger.debug('spider is end')

            self.dao.updateData('sites', {'spider_state': 1}, {'id': siteId})

            siteCnf = dao.getSiteData(siteId)
            domain = siteCnf['domain'].encode('utf8')
            path = siteCnf['path'].encode('utf8')

            #检测网站的状态,有的网站访问后直接访问500或者其他的情况。
            if self.checkSiteWorkMode({}, title) == False:
                self.finishSiteScan(siteId, ip)
                return

            logger.debug('get site scan config')

            scanCnf = {}
            scanCnf['taskId'] = self.taskId
            scanCnf['assetTaskId'] = self.assetTaskId
            scanCnf['siteId'] = siteId
            scanCnf['maxThread'] = 10
            scanCnf['scriptThread'] = 10
            scanCnf['webTimeout'] = self.taskCnf['web_scan_timeout']
            scanCnf['ip'] = ip
            # 新增源站ip参数,add by mcj
            target = json.read(str(self.taskCnf['target']))
            source_ip = target[0].get('source_ip')
            if source_ip:
                scanCnf['source_ip'] = source_ip
            scanCnf['scheme'] = scheme
            scanCnf['domain'] = domain
            scanCnf['path'] = path
            scanCnf['errorCount'] = 0
            scanCnf['errorLenDict'] = {}
            scanCnf['maxTimeoutCount'] = 20
            scanCnf['cookie'] = cookie
            scanCnf['len404'] = []
            scanCnf['isForce'] = 0
            scanCnf['excludeUrl'] = excludeUrl
            scanCnf['threadLock'] = threading.Lock()
            scanCnf['isstart'] = '1'

            # ----------- 判断网站存活性, 如存活,获取cookie等内容 by lichao
            if source_ip:
                test_url = "%s://%s%s" % (scheme, source_ip, path)
            else:
                test_url = "%s://%s%s" % (scheme, domain, path)
            test_header = {'Host': domain}
            checkOk, siteCode = False, None
            for i in range(3):
                try:
                    http = HttpRequest({
                        'domain': domain,
                        'timeout': 15,
                        'follow_redirects': True,
                        'cookie': cookie
                    })
                    res, content = http.request(test_url,
                                                'GET',
                                                headers=test_header)
                    if self.checkSiteWorkMode(res, title):
                        siteCode = self.getSiteCode(content)
                        if not title:
                            title = self.updateSiteTitle(content, siteId)
                        if not siteType:
                            siteType = self.updateSiteType(res, siteId)
                        if not cookie:
                            cookie = res.get('set-cookie')
                        checkOk = True
                        break
                    else:
                        sleep(5)
                except:
                    sleep(5)

            if cookie:
                scanCnf['cookie'] = cookie
            if title:
                scanCnf['title'] = title
            if siteType:
                scanCnf['siteType'] = siteType
            if siteCode:
                scanCnf['siteCode'] = siteCode

            if not checkOk:
                self.updateSiteException("网站无法访问", siteId, ip)
            # --------------------------------------------

            # ------------------- 检测网站建站系统指纹 by lichao 预留功能,暂时没有用到
            # if checkOk:
            #     from engine.engine_utils.check_web_fingerprint import web_frame_fingerprint
            #     scanCnf['web_frame'] = web_frame_fingerprint(ob=scanCnf)
            # -------------------

            # ----------- get sites_dirs by mcj
            site_dirs = get_site_dirs(self.taskId)
            scanCnf['site_dirs'] = site_dirs
            # -----------
            # ---------------- get web fingerprint  by lichao
            if checkOk:
                scanCnf['webServer'] = web_server_fingerprint(
                    scheme, source_ip, domain,
                    path)  # 'apache|nginx|iis|unknown'
                scanCnf['os'] = os_fingerprint(
                    source_ip)  # 'linux|windows|unknown'
            # -----------------

            # ---------------- verify 404 page and waf page by lichao
            scanCnf['404_page'], scanCnf['app_404_page'], scanCnf[
                'waf_page'] = get_invaild_page(scheme, source_ip, domain,
                                               siteType)
            scanCnf['404_page']['similar_rate'] = 0.8
            scanCnf['app_404_page']['similar_rate'] = 0.8
            scanCnf['waf_page']['similar_rate'] = 0.8
            # ---------------------------

            # 判断该域名扫描进度,加载未扫描的漏洞ID
            logger.debug('load unscaned script start')
            scanVulList = []
            progress = progress.split('|')
            for vulId in self.taskCnf['vulList']:
                if vulId not in progress:
                    scanVulList.append(vulId)

            logger.debug('script scan is start')
            if len(scanVulList) > 0:
                urlList = []
                if policy == 4:
                    for url in includeUrl:
                        if url in excludeUrl:
                            continue
                        t = url.split('?')
                        url = t[0]
                        params = ''
                        if len(t) > 1:
                            params = t[1]
                        urlList.append({
                            'url': url,
                            'params': params,
                            'method': 'get'
                        })
                else:
                    res = self.dao.getUrlList(siteId)
                    for r in res:
                        url = r['url'].encode('utf8')
                        if nonascii(url): url = safeUrlString(url)
                        urlList.append({
                            'url': url,
                            'params': r['params'].encode('utf8'),
                            'method': r['method'].encode('utf8'),
                            'refer': r['refer'].encode('utf8')
                        })

                # ----------- 检测网站存活性 by lichao  拿到检测网站存活性的插件id
                check_ok_vul_id = ""
                db_session = DBSession()
                try:
                    vul = db_session.query(WebVulList).filter(
                        WebVulList.script == 'check_web_alive').first()
                    check_ok_vul_id = str(vul.id)
                except Exception, e:
                    logger.error(e)
                db_session.close()
                # -----------

                for vulId in scanVulList:
                    from time import time as during_time
                    t1 = during_time()
                    vulId = vulId.replace(" ", "")
                    if vulId == "":
                        continue

                    # ----------- 检测网站存活性 by lichao
                    if not checkOk and len(urlList) <= 1:  # 判断网站无法访问
                        if vulId != check_ok_vul_id:  # 网站无法访问时,只运行 check_web_alive 这一个插件
                            continue
                    else:  # 网站可以访问时
                        if vulId == check_ok_vul_id:  # 网站可以访问时,不运行 check_web_alive 这个插件
                            continue
                    # ------------

                    progress.append(vulId)
                    self.dao.updateData('sites',
                                        {'progress': '|'.join(progress)},
                                        {'id': siteId})
                    self.dao.deleteData('web_result', {
                        'vul_id': vulId,
                        'site_id': siteId
                    })

                    scanCnf['vulId'] = vulId
                    scanCnf['vulName'] = self.taskCnf['vulDict'][vulId][
                        'vul_name']
                    scanCnf['level'] = self.taskCnf['vulDict'][vulId][
                        'level'].encode('utf8')
                    scanCnf['scanType'] = self.taskCnf['vulDict'][vulId][
                        'scan_type']
                    scanCnf['script'] = self.taskCnf['vulDict'][vulId][
                        'script']
                    scanCnf['status'] = '0'
                    scanCnf['endTime'] = time.time() + 1800
                    scanCnf['timeoutCount'] = 0

                    #测试爬虫爬出来的路径
                    if scanCnf['scanType'] == 1:
                        scanCnf['queue'] = Queue()
                        for r in urlList:
                            scanCnf['queue'].put(r)
                        scanUrlScript = ScanScriptForUrl(scanCnf)
                        scanUrlScript.start()

                    #如果只测试指定的URL则不需要运行测试域名和测试漏洞库
                    if policy != 4:
                        #测试域名
                        if scanCnf['scanType'] == 2:
                            scanDomainScript = ScanScriptForDomain(scanCnf)
                            scanDomainScript.start()
                    duration = during_time() - t1
                    # -----------统计插件运行时间 by mcj
                    try:

                        from common.plugin_speed import PluginSpeed
                        db_session = DBSession()
                        plu_speed = PluginSpeed(self.taskId, vulId, duration)
                        db_session.add(plu_speed)
                        db_session.commit()
                        db_session.close()
                    except Exception, e:
                        logger.info(str(e))
                        db_session.rollback()
                        db_session.close()
                    # -----------统计插件运行时间 by mcj
                    if not checkOk and len(urlList) <= 1:
                        break
                urlList = []
Ejemplo n.º 14
0
    def scanSiteMain(self, siteId):
        try:
            logger.debug("start to scan site, siteId: %s" % (siteId))
            if siteId == None:
                return False

            dao = MysqlDao()
            siteObj = dao.getSiteData(siteId)
            if siteObj == None:
                logger.error("start to get site config exception, siteId: %s" %
                             (siteId))
                return False

            #scheme
            scheme = siteObj['scheme'].encode('utf8')
            #ip address
            ip = siteObj['ip'].encode('utf8')
            #site domain
            domain = siteObj['domain'].encode('utf8')
            #site scan state
            state = siteObj['state']
            #site path
            path = siteObj['path'].encode('utf8')
            #site title
            title = siteObj['title'].encode('utf8')
            #site type
            siteType = siteObj['site_type'].encode('utf8')
            #site cookie
            cookie = siteObj['cookie'].encode('utf8')
            #site include url
            includeUrl = siteObj['include_url'].encode('utf8')
            if includeUrl == '':
                includeUrl = []
            else:
                includeUrl = json.read(includeUrl)
            #site exclude url
            excludeUrl = siteObj['exclude_url'].encode('utf8')
            if excludeUrl == '':
                excludeUrl = []
            else:
                excludeUrl = json.read(excludeUrl)
            #scan progress
            progress = siteObj['progress'].encode('utf8')
            #site scan policy
            policy = siteObj['policy']

            logger.debug("scanSiteMain siteId: %s" % (siteId))
            if state == 1:
                self.finishSiteScan(siteId, ip)
                return True

            #在DNS配置文件中加入这个域名的DNS信息
            self.threadLock.acquire()
            self.updateHosts(ip, domain, self.taskId, siteId, 'add')
            self.threadLock.release()

            flag = res = content = checkOk = None
            target = []
            logger.debug("scanSiteMain siteId: %s  preSiteScan before" %
                         (siteId))
            target.append("%s://%s%s" % (scheme, domain, path))
            for url in target:
                flag, res, content = self.PreSiteScan(url)
                if not flag:
                    continue
                else:
                    if self.checkSiteWorkMode(res, title) == False:
                        continue
                    else:
                        checkOk = 1
                        break
            if not checkOk:
                self.updateSiteException("网站无法访问", siteId, ip)
                return
            else:
                siteCode = self.getSiteCode(content)
                if title == "" and res and res.has_key(
                        'status') and res['status'] == '200':
                    title = self.updateSiteTitle(content, siteId)
                if siteType == "":
                    siteType = self.updateSiteType(res, siteId)
                if siteCode == "":
                    siteCode = self.getSiteCode(content)

            if self.taskCnf['web_scan_timeout']:
                socket.setdefaulttimeout(self.taskCnf['web_scan_timeout'])

            siteDb = {'state': 0, 'exception': ''}
            if siteObj['start_time'] is None or siteObj[
                    'start_time'] == '0000-00-00 00:00:00':
                siteDb['start_time'] = time.strftime("%Y-%m-%d %X",
                                                     time.localtime())
            if siteObj['progress'] == '':
                siteDb['progress'] = '0'
            self.dao.updateData('sites', siteDb, {'id': siteId})

            logger.debug("scanSiteMain siteId: %s  policy before" % (siteId))
            ###############################
            #policy:
            #    1:快速扫描,只扫描指定的域名
            #    2:完全扫描,扫描指定的域名,并且扫描二级域名
            #    3:扫描指定目录及子目录
            #    4:扫描指定的URL,这个情况下,不需要爬虫
            #    5:通过域名反查得到的域名
            #    6:登陆型扫描
            ###############################
            if self.taskCnf['spider_enable'] == 1 and siteObj[
                    'spider_state'] == 0:
                logger.debug('spider is start')

                progress = '0'

                logger.debug("scanSiteMain siteId: %s  cleandata before" %
                             (siteId))
                self.dao.deleteData('web_result', {'site_id': siteId})
                self.dao.deleteData('web_result_data', {'site_id': siteId})
                self.dao.deleteData('spider_url', {'site_id': siteId})

                #开启爬虫,当扫描指定的URL时,不需要爬虫
                if siteObj['policy'] != 4:
                    spiderCnf = {}
                    spiderCnf['taskId'] = self.taskId
                    spiderCnf['assetTaskId'] = self.assetTaskId
                    spiderCnf['siteId'] = siteId
                    spiderCnf['spiderUrlCount'] = self.taskCnf[
                        'spider_url_count']
                    spiderCnf['webScanTime'] = self.taskCnf['web_scan_timeout']
                    spiderCnf['policy'] = siteObj['policy']
                    spiderCnf['scheme'] = siteObj['scheme'].encode('utf8')
                    spiderCnf['domain'] = domain
                    spiderCnf['path'] = path
                    spiderCnf['maxTimeCount'] = 30
                    spiderCnf['webScanTimeout'] = self.taskCnf[
                        'web_scan_timeout']
                    spiderCnf['endTime'] = time.time() + 1800
                    spiderCnf['maxnum'] = self.taskCnf['spider_url_count']
                    spiderCnf['title'] = title
                    spiderCnf['ip'] = ip
                    spiderCnf['cookie'] = cookie
                    spiderCnf['webSearchSiteState'] = self.taskCnf[
                        'web_search_site_state']
                    spiderCnf['webSearchSiteTimeout'] = self.taskCnf[
                        'web_search_site_timeout']
                    spiderCnf['includeUrl'] = includeUrl
                    spiderCnf['excludeUrl'] = excludeUrl
                    spiderCnf['downloadDir'] = './log/'

                    logger.debug("scanSiteMain siteId: %s startSpider before" %
                                 (siteId))
                    if self.taskCnf['spider_type'] == 2:
                        import Spider2 as Spider
                    else:
                        import Spider

                    logger.debug("spiderCnf start")
                    logger.debug(spiderCnf)
                    logger.debug("spiderCnf end")
                    spider = Spider.Spider(spiderCnf)
                    spider.start()

                logger.debug('spider is end')

            self.dao.updateData('sites', {'spider_state': 1}, {'id': siteId})
            ####################################### 至此蜘蛛结束,后面是扫描的工作 #############################################
            return True

            siteCnf = dao.getSiteData(siteId)
            domain = siteCnf['domain'].encode('utf8')
            path = siteCnf['path'].encode('utf8')
            title = siteCnf['title']
            if title != '':
                title = title.encode('utf8')

            #检测网站的状态,有的网站访问后直接访问500或者其他的情况。
            if self.checkSiteWorkMode({}, title) == False:
                self.finishSiteScan(siteId, ip)
                return

            logger.debug('get site scan config')

            scanCnf = {}
            scanCnf['taskId'] = self.taskId
            scanCnf['assetTaskId'] = self.assetTaskId
            scanCnf['siteId'] = siteId
            scanCnf['maxThread'] = 10
            scanCnf['scriptThread'] = 10
            scanCnf['webTimeout'] = self.taskCnf['web_scan_timeout']
            scanCnf['ip'] = ip
            scanCnf['scheme'] = scheme
            scanCnf['domain'] = domain
            scanCnf['path'] = path
            scanCnf['cookie'] = cookie
            scanCnf['errorCount'] = 0
            scanCnf['errorLenDict'] = {}
            scanCnf['siteType'] = siteType
            scanCnf['maxTimeoutCount'] = 20
            scanCnf['siteCode'] = siteCode
            scanCnf['cookie'] = cookie
            scanCnf['len404'] = []
            scanCnf['isForce'] = 0
            scanCnf['excludeUrl'] = excludeUrl
            scanCnf['threadLock'] = threading.Lock()
            scanCnf['isstart'] = '1'

            #判断该域名扫描进度,加载未扫描的漏洞ID
            logger.debug('load unscaned script start')
            scanVulList = []
            progress = progress.split('|')
            for vulId in self.taskCnf['vulList']:
                if vulId not in progress:
                    scanVulList.append(vulId)

            logger.debug('script scan is start')
            if len(scanVulList) > 0:
                urlList = []
                if policy == 4:
                    for url in includeUrl:
                        if url in excludeUrl:
                            continue
                        t = r.split('?')
                        url = t[0]
                        params = ''
                        if len(t) > 1:
                            params = t[1]
                        urlList.append({
                            'url': url,
                            'params': params,
                            'method': 'get'
                        })
                else:
                    res = self.dao.getUrlList(siteId)
                    for r in res:
                        url = r['url'].encode('utf8')
                        if nonascii(url): url = safeUrlString(url)
                        urlList.append({
                            'url': url,
                            'params': r['params'].encode('utf8'),
                            'method': r['method'].encode('utf8'),
                            'refer': r['refer'].encode('utf8')
                        })

                for vulId in scanVulList:
                    vulId = vulId.replace(" ", "")
                    if vulId == "":
                        continue

                    progress.append(vulId)
                    self.dao.updateData('sites',
                                        {'progress': '|'.join(progress)},
                                        {'id': siteId})
                    self.dao.deleteData('web_result', {
                        'vul_id': vulId,
                        'site_id': siteId
                    })

                    scanCnf['vulId'] = vulId
                    scanCnf['vulName'] = self.taskCnf['vulDict'][vulId][
                        'vul_name']
                    scanCnf['level'] = self.taskCnf['vulDict'][vulId][
                        'level'].encode('utf8')
                    scanCnf['scanType'] = self.taskCnf['vulDict'][vulId][
                        'scan_type']
                    scanCnf['script'] = self.taskCnf['vulDict'][vulId][
                        'script']
                    scanCnf['status'] = '0'
                    scanCnf['endTime'] = time.time() + 1800
                    scanCnf['timeoutCount'] = 0

                    #测试爬虫爬出来的路径
                    if scanCnf['scanType'] == 1:
                        scanCnf['queue'] = Queue()
                        for r in urlList:
                            scanCnf['queue'].put(r)
                        scanUrlScript = ScanScriptForUrl(scanCnf)
                        scanUrlScript.start()

                    #如果只测试指定的URL则不需要运行测试域名和测试漏洞库
                    if policy != 4:
                        #测试域名
                        if scanCnf['scanType'] == 2:
                            scanDomainScript = ScanScriptForDomain(scanCnf)
                            scanDomainScript.start()
                urlList = []

            #结束扫描
            self.finishSiteScan(siteId, ip)
            self.threadLock.acquire()
            self.updateHosts(ip, domain, self.taskId, siteId, 'remove')
            self.threadLock.release()

            return None
        except Exception, e:
            logger.exception(e)
            return siteId