Beispiel #1
0
 async def get_result_from_dns(self, subhosts):
     res = set()
     async with aiomultiprocess.Pool(
             processes=processes,
             childconcurrency=childconcurrency) as pool:
         # 如果你想跑满CPU 修改上面的线程和协程数量即可
         results = await pool.map(self.Aio_Subdomain, subhosts)
     for result in results:
         subdomain, answers = result
         if answers != None and subdomain != None:
             # res.add(subdomain)
             if answers != self.FakeDomain_IP:
                 # 这里则确认不存在泛解析,可以,但是没必要
                 res.add(subdomain)
             else:
                 try:
                     close_old_connections()
                     BLACKURL.objects.create(url='http://' + subdomain,
                                             ip=str(answers),
                                             title=RequestsTitle('http://' +
                                                                 subdomain),
                                             resons='泛解析过滤或者网址无法正常访问')
                 except:
                     pass
                 res.add(self.FakeDomain_IP)
     return list(res)
def Baidu(domain):
    Baidu_Res = Cralw_Baidu(domain)
    Bing_Res = Crawl_Bing(domain)
    Sougou_Res = Crawl_Sougou(domain)
    res = []
    returl_result = set()
    res.extend(Baidu_Res)
    res.extend(Bing_Res)
    res.extend(Sougou_Res)

    for real in res:
        try:
            UA = random.choice(headerss)
            headers = {'User-Agent': UA, 'Connection': 'close'}
            r = requests.get(url=real,
                             headers=headers,
                             verify=False,
                             timeout=timeout)
            time.sleep(0.5)
            if b'Service Unavailable' not in r.content and b'The requested URL was not found on' not in r.content and b'The server encountered an internal error or miscon' not in r.content:
                if r.status_code in Alive_Status:
                    # 这里读取验证码即可
                    real_url = r.url.rstrip('/')
                    bla = check_black(real_url)
                    NowUrl = str(
                        urlparse(real_url).scheme + '://' +
                        urlparse(real_url).netloc)
                    if bla == False:
                        returl_result.add(NowUrl)
                    else:
                        print(
                            '[+ URL Blacklist] 当前网址触发黑名单 : {}'.format(NowUrl))
                        try:
                            burl = ''
                            for blacurl in black_list:
                                if blacurl in NowUrl:
                                    burl = blacurl
                            close_old_connections()
                            BLACKURL.objects.create(
                                url=NowUrl,
                                title=RequestsTitle(NowUrl),
                                resons='触发网址黑名单:{}'.format(burl))
                        except Exception as e:
                            pass
        except Exception as e:
            # print(e)
            pass
    return list(set(returl_result))
def Api(domain):
    result = set()
    mid = set()
    Baidu_res = Baidu_Api(domain)
    SecTra = Sec_Api(domain)
    CertSP = Certspotter_Api(domain)
    XimcX = Ximcx_Api(domain)
    EntrU = Entrus_Api(domain)
    Hackert = Hackert_Api(domain)
    Sitedossier = Sitedossier_Api(domain)
    Threatminer = Threatminer_Api(domain)
    CertSh = CertSh_Api(domain)


    Baidu_res.extend(SecTra)
    Baidu_res.extend(CertSP)
    Baidu_res.extend(XimcX)
    Baidu_res.extend(EntrU)
    Baidu_res.extend(CertSh)
    Baidu_res.extend(Hackert)
    Baidu_res.extend(Sitedossier)
    Baidu_res.extend(Threatminer)

    '''整合数据'''

    if Baidu_res != [] and Baidu_res != None:
        for u in Baidu_res:
            bla = check_black(u)
            if bla == False:
                mid.add('http://'+u)
            else:
                print('[+ URL Blacklist] 当前网址触发黑名单 : http://{}'.format(u))
                try:
                    burl = ''
                    for blacurl in black_list:
                        if blacurl in u:
                            burl = blacurl
                    close_old_connections()
                    BLACKURL.objects.create(url='http://{}'.format(u),title=RequestsTitle('http://{}'.format(u)), resons='触发网址黑名单:{}'.format(burl))
                except Exception as e:
                    pass

    if mid != {}:
        result = Get_Alive_Url(list(mid))
        print('[+ ALL API] 接口 : {} 捕获子域名存活总数 : {}'.format(domain, len(result)))
    if result != []:
        return result
Beispiel #4
0
def Run_Crawl(Domains):
    Domains = ['.' + str(x) for x in Domains]
    time.sleep(random.randint(10, 20))
    time.sleep(random.randint(10, 20))
    time.sleep(random.randint(10, 20))
    time.sleep(random.randint(10, 20))
    time.sleep(random.randint(10, 20))
    time.sleep(random.randint(10, 20))
    time.sleep(random.randint(10, 20))
    time.sleep(random.randint(10, 20))
    time.sleep(random.randint(10, 20))
    '''
    2019-12-23
    新增监控域名是否监控状态,所以此处需要修改逻辑
    1. 如果你之前扫描过该网址
    2. 那么网址索引表就有该网址
    3. 但是如果中期停止监控该域名
    4. 那么按照上面的获取数据的定式方式,还是获取一样的结果
    5. 所以需要做一个判断
    6. 但是后期你又要把这个域名设置为监控状态
    7. 所以来了个翻转两次的逻辑
    最终结论:有可能在A网址爬到了B的子域名,但是在B的网址爬不到B的子域名
    所以多爬几次影响不大,不建议不爬行,所以此处不做修改
    for subd in ALL_DOMAINS:
        if subd in url:
            ins = True
            target_url.get = '是'
            # 这里需要提前设置的原因是,防止下一个进程启动重复 使用 同一个数据
            target_url.save()
    if ins == False:
        target_url.get = '空'
        target_url.save()
        return
    '''
    try:
        target_url = URL.objects.filter(get='否')[0]
        url = target_url.url
        target_url.get = '是'
        target_url.save()
        # 这里需要提前设置的原因是,防止下一个进程启动重复 使用 同一个数据
    except Exception as e:
        Except_Log(stat=31, url='|获取URL并设置扫描状态失败|', error='获取预爬行网址失败')
        # 在获取失败(数据库没数据存入),重试一次
        time.sleep(600)
        ResetCrawl(db=Dbname)
        return

    try:
        All_Urls = Crawl(url)
        if All_Urls != []:
            All_Urls = set(All_Urls)
            Other_Domains = []
            if list(All_Urls) != [] and All_Urls != None:
                try:
                    Sub_Domains1 = set(
                        [y for x in Domains for y in All_Urls if x in y])
                    if list(Sub_Domains1) != []:
                        with ThreadPoolExecutor(
                                max_workers=pool_count) as pool1:
                            result = pool1.map(Add_Data_To_Url,
                                               list(Sub_Domains1))
                    Other_Domains = list(All_Urls - Sub_Domains1)
                except Exception as e:
                    Except_Log(stat=11, url='|获取URL失败|', error=str(e))

                if Other_Domains != [] and Other_Domains != None:
                    try:
                        for urle in Other_Domains:
                            if '.gov.cn' not in urle and '.edu.cn' not in urle:
                                try:
                                    try:
                                        Test_Other_Url = list(
                                            Other_Url.objects.filter(url=urle))
                                    except:
                                        close_old_connections()
                                        Test_Other_Url = list(
                                            Other_Url.objects.filter(url=urle))
                                    if Test_Other_Url == []:
                                        ip = get_host(urle)
                                        res = Get_Url_Info(urle).get_info()
                                        res_url = res.get('url')
                                        try:
                                            res_title = pymysql.escape_string(
                                                res.get('title'))
                                        except:
                                            res_title = 'Error'
                                        res_power = res.get('power')
                                        res_server = res.get('server')
                                        status = res.get('status')
                                        res_ip = ip
                                        #if int(status) in Alive_Status:
                                        try:
                                            Other_Url.objects.create(
                                                url=res_url,
                                                title=res_title,
                                                power=res_power,
                                                server=res_server,
                                                status=status,
                                                ip=res_ip)
                                        except Exception as e:
                                            Except_Log(stat=33,
                                                       url=url + '|资产爬行错误|',
                                                       error=str(e))
                                            close_old_connections()
                                            Other_Url.objects.create(
                                                url=res_url,
                                                title='Error',
                                                power=res_power,
                                                server=res_server,
                                                status=status,
                                                ip=res_ip)
                                except Exception as e:
                                    Except_Log(stat=37,
                                               url=url + '|资产爬行错误|',
                                               error=str(e))
                    except Exception as e:
                        Except_Log(stat=36, url=url + '|资产爬行错误|', error=str(e))
        try:
            '''
            2019-12-23
            虽然上面的爬行网址不做逻辑修改
            但是此处获取下一级子域名就没必要获取没在监控列表的域名了
            '''
            for sub in Domains:
                if sub in url:
                    Br = Brute(url)
                    res = Br.substart()
                    res = list(set(res))
                    if res != []:
                        if len(res) > 150:
                            for r in res:
                                print(
                                    '[+ URL Universal] 泛解析网址自动过滤 : {}'.format(
                                        r))
                                try:
                                    close_old_connections()
                                    BLACKURL.objects.create(
                                        url=r,
                                        ip=get_host(r),
                                        title=RequestsTitle(r),
                                        resons='泛解析自动过滤')
                                except:
                                    pass
                        else:
                            with ThreadPoolExecutor(
                                    max_workers=pool_count) as pool2:
                                result = pool2.map(Add_Data_To_Url, list(res))
        except Exception as e:
            Except_Log(stat=65, url=url + '|下级子域名爆破失败|', error=str(e))
    except Exception as e:
        Except_Log(stat=32, url=url + '|网址爬行错误|', error=str(e))
Beispiel #5
0
def Add_Data_To_Url(url):
    '''
    2019-12-10
        1. 该函数作用为传入网址进行IP黑名单过滤
        2. 该函数作用为传入网址进行【网络资产数据入库,网址索引数据入库,主机资产数据入库,监控域名数量入库处理】
    2020-01-14
        1. 新增泛解析过滤规则
    '''
    time.sleep(random.randint(5, 20))
    time.sleep(random.randint(5, 20))
    close_old_connections()
    urlhasdomain = check_black(url, ALL_DOMAINS)
    if urlhasdomain == False:
        print('[+ Insert Url] 当前网址不在域名监控域名范围内 : {}'.format(url))
        try:
            close_old_connections()
            BLACKURL.objects.create(url=url,
                                    ip=get_host(url),
                                    title=RequestsTitle(url),
                                    resons='当前网址不在域名监控域名范围内')
            return
        except:
            close_old_connections()
            return
    if '.gov.cn' in url or '.edu.cn' in url:
        return
    urlinblackurl = check_black(url, black_url)
    if urlinblackurl == True:
        print('[+ URL Blacklist] 当前网址触发黑名单 : {}'.format(url))
        try:
            burl = ''
            for blacurl in black_url:
                if blacurl in url:
                    burl = blacurl
            close_old_connections()
            BLACKURL.objects.create(url=url,
                                    ip=get_host(url),
                                    title=RequestsTitle(url),
                                    resons='触发网址黑名单:{}'.format(burl))
        except Exception as e:
            pass
        return

    try:
        ip = get_host(url)
        if ip == '获取失败':
            try:
                BLACKURL.objects.create(url=url,
                                        ip=get_host(url),
                                        title=RequestsTitle(url),
                                        resons='获取网址IP失败')
            except Exception as e:
                pass
            return
        if ip in black_ip:
            '''触发IP黑名单机制'''
            print('[+ IP Blacklist] 当前IP触发黑名单 : {} --> {}'.format(ip, url))
            try:
                BLACKURL.objects.create(url=url,
                                        ip=get_host(url),
                                        title=RequestsTitle(url),
                                        resons='触发IP黑名单:{}'.format(ip))
            except Exception as e:
                pass
            return

        try:
            test_url = list(URL.objects.filter(url=url))
        except:
            try:
                test_url = list(URL.objects.filter(url=url))
            except:
                close_old_connections()
                test_url = list(URL.objects.filter(url=url))

        if test_url != []:
            '''网址索引表如果已经有该网址,则直接退出'''
            return
        '''
        2020-01-14
        1. 这里开始对比泛解析数据,判断是否为泛解析网址
        2. 分别获取泛解析对比的 标题,ip,网页内容
        3. 然后先对比标题,标题一致,说明不是泛解析哦~不过携程还是哪个大厂名字忘了,访问频率过快的话,网页标题会变成 填写验证码xxxx
        4. 其次对比网页内容,如果网页内容相似度过大,则说明泛解析哦~
        5. 有人问,为什么不直接对比ip不就行了吗?其实不是的,比如xxadasda.yy.com--->aedqwawrqw668.sdada.yy.com很明显都是泛解析,但是解析的ip都是不一样的
        '''
        infjx = [x for x in ALL_DOMAINS if x in url]
        if infjx == []:
            return
        else:
            infjx = infjx[0]
        inftitle, infip, infcontent = DOMAINSINFOS[infjx][
            'title'], DOMAINSINFOS[infjx]['ip'], DOMAINSINFOS[infjx]['content']
        DD = Get_Url_Info(url).get_info()
        comtitle, comip, comcontent = DD['title'], DD['ip'], DD['content']
        # if inftitle != comtitle:
        #     # 如果标题不一样,决策为不是泛解析~,大概是80%的准确率,但是对安居客来说,这一点判断是无效的
        #     pass
        #else:
        if infcontent != 'Error' and comcontent != 'Error':
            if Return_Content_Difflib(infcontent, comcontent) == True:
                try:
                    print('[+ URL Universal] 泛解析网址自动过滤 : {}'.format(url))
                    close_old_connections()
                    BLACKURL.objects.create(url=url,
                                            ip=get_host(url),
                                            title=RequestsTitle(url),
                                            resons='泛解析自动过滤')
                    return
                except:
                    return
        else:
            DD1 = Get_Url_Info(url.replace('://', '://yyyyyyyyy')).get_info()
            comtitle1, comip1, comcontent1 = DD1['title'], DD1['ip'], DD1[
                'content']
            if Return_Content_Difflib(comcontent, comcontent1) == True:
                try:
                    print('[+ URL Universal] 泛解析网址自动过滤 : {}'.format(url))
                    close_old_connections()
                    BLACKURL.objects.create(url=url,
                                            ip=get_host(url),
                                            title=RequestsTitle(url),
                                            resons='泛解析自动过滤')
                    return
                except:
                    return

        print('[+ Insert Url] 入库网址 : {}'.format(url))
        try:
            Test_Other_Url = Other_Url.objects.filter(url=url)
            '''判断网络资产表是否有这个数据,如果没有的话,就添加进去'''
            if list(Test_Other_Url) == []:
                res = Get_Url_Info(url).get_info()
                res_url = res.get('url')
                try:
                    res_title = pymysql.escape_string(res.get('title'))
                except Exception as e:
                    res_title = 'Error'
                    Except_Log(stat=11, url=url + '|网页内容转码失败', error=str(e))
                res_power = res.get('power')
                res_server = res.get('server')
                res_status = res.get('status')
                res_ip = ip
                try:
                    Other_Url.objects.create(url=res_url,
                                             title=res_title,
                                             power=res_power,
                                             server=res_server,
                                             status=res_status,
                                             ip=res_ip)
                except Exception as e:
                    close_old_connections()
                    Except_Log(stat=17, url=url + '|标题等信息编码不符合', error=str(e))
                    Other_Url.objects.create(url=res_url,
                                             title='Error',
                                             power='Error',
                                             server=res_server,
                                             status=res_status,
                                             ip=res_ip)
        except Exception as e:
            Except_Log(stat=29, url=url + '|网络资产表错误', error=str(e))
        try:
            '''
            再次获取状态码,判断是否符合入库状态,以保证数据统一
            这里添加网址资产到 索引表 和 清洗表
            '''
            test_url1 = list(URL.objects.filter(url=url))
            '''如果网址索引表有这个网站的话,就直接退出'''
            if test_url1 == []:
                URL.objects.create(url=url, ip=ip)
                '''添加网址到网址索引表'''
                try:
                    try:
                        ZHRND = Get_Url_Info(url)
                        Sconten = ZHRND.get_info()['content']
                        if Sconten == 'Error':
                            '''到这里说明获取网页内容失败了'''
                            # print('{}:获取网页内容失败'.format(url))
                            pass
                        else:
                            try:
                                blackconincon = check_black(Sconten, black_con)
                                if blackconincon == True:
                                    '''触发网页内容黑名单'''
                                    burl = ''
                                    for blacurl in black_con:
                                        if blacurl in Sconten:
                                            burl = blacurl
                                    print(
                                        '[+ Cont Blacklist] 当前网页内容触发黑名单 : {}'.
                                        format(url))
                                    try:
                                        close_old_connections()
                                        BLACKURL.objects.create(
                                            url=url,
                                            ip=get_host(url),
                                            title=RequestsTitle(url),
                                            resons='触发网页内容黑名单:{}'.format(burl))
                                    except Exception as e:
                                        pass
                                    return
                            except:
                                Sconten = '获取失败'
                        Show_contents = pymysql.escape_string(Sconten)
                        Cont = Content()
                        Cont.url = url
                        Cont.content = Show_contents
                        IP_Res = Get_Ip_Info(ip)
                        Show_cs = IP_Res.get_cs_name(ip)
                        Cont.save()
                        Show_Data.objects.create(url=url,
                                                 ip=ip,
                                                 cs=Show_cs,
                                                 content=Cont)
                    except Exception as e:
                        close_old_connections()
                        Except_Log(stat=4, url=url + '|外键添加错误', error=str(e))
                        Show_contents = 'Error'
                        Cont = Content()
                        Cont.url = url
                        Cont.content = Show_contents
                        IP_Res = Get_Ip_Info(ip)
                        Show_cs = IP_Res.get_cs_name(ip)
                        Cont.save()
                        Show_Data.objects.create(url=url,
                                                 ip=ip,
                                                 cs=Show_cs,
                                                 content=Cont)
                        '''添加网页内容,数据展示'''
                except Exception as e:
                    Except_Log(stat=8, url=url + '|外键添加错误', error=str(e))

            This_Sub = [x for x in ALL_DOMAINS if x in url]
            '''获取到当前子域名属于的主域名'''
            try:
                '''尝试进行域名总数据获取检测'''
                if This_Sub != []:
                    Domain_Count = Domains.objects.filter(url=This_Sub[0])[0]
                    counts = Other_Url.objects.filter(
                        url__contains=This_Sub[0])
                    Domain_Count.counts = str(len(counts))
                    # counts = int(Domain_Count.counts)+1
                    # Domain_Count.counts = counts
                    Domain_Count.save()
            except Exception as e:
                Except_Log(stat=15,
                           url=url + '|获取归属域名失败|' + This_Sub,
                           error=str(e))
        except Exception as e:
            Except_Log(stat=22, url=url + '|添加到网址索引表失败|', error=str(e))

        try:
            test_ip = list(IP.objects.filter(ip=ip))
        except:
            close_old_connections()
            test_ip = list(IP.objects.filter(ip=ip))
            '''开始添加ip 维护ip统一
            这里开始判断数据库中是否有这个ip,并且先添加然后修改(防止重复浪费资源)
            if test_ip != []:
                test_ip_0 = IP.objects.filter(ip=ip)[0]
                # 这里判断数据中IP时候存在,如果存在并且有扫描状态,就直接中断操作
                if test_ip_0.get == '是' or test_ip_0.get == '中':
                    return'''
        if test_ip == []:
            try:
                IP_Res = Get_Ip_Info(ip)
                area = IP_Res.get_ip_address(ip)
                cs_name = IP_Res.get_cs_name(ip)
                try:
                    IP.objects.create(ip=ip,
                                      servers='None',
                                      host_type='None',
                                      cs=cs_name,
                                      alive_urls='None',
                                      area=area)
                    '''这里先添加数据,异步执行获取到的数据作为结果给下个进程使用
                    这里本来是要扫描c段开放端口,但是这样就相当于把耗时操作加载到同步执行的线程中
                    于是把扫描开放端口  放在获取ip详细信息线程中处理'''
                except Exception as e:
                    Except_Log(stat=86, url=url + '|转换IP地区编码失败|', error=str(e))
                    IP.objects.create(ip=ip,
                                      servers='None',
                                      host_type='None',
                                      cs=cs_name,
                                      alive_urls='None',
                                      area='获取失败')

            except Exception as e:
                Except_Log(stat=21, url=url + '|添加IP资源失败|', error=str(e))

    except Exception as e:
        Except_Log(stat=30, url=url + '|维护传入网址失败|', error=str(e))
Beispiel #6
0
def Add_Data_To_Url(url):
    '''
    2019-12-10
        1. 该函数作用为传入网址进行IP黑名单过滤
        2. 该函数作用为传入网址进行【网络资产数据入库,网址索引数据入库,主机资产数据入库,监控域名数量入库处理】
    '''
    time.sleep(random.randint(5, 20))
    time.sleep(random.randint(5, 20))
    time.sleep(random.randint(5, 20))
    time.sleep(random.randint(5, 20))
    time.sleep(random.randint(5, 20))
    time.sleep(random.randint(5, 20))
    close_old_connections()
    print('[+ Insert Url] 入库网址 : {}'.format(url))
    if '.gov.cn' in url or '.edu.cn' in url:
        return
    urlinblackurl = check_black(url, black_url)
    if urlinblackurl == True:
        print('[+ URL Blacklist] 当前网址触发黑名单 : {}'.format(url))
        try:
            burl = ''
            for blacurl in black_url:
                if blacurl in url:
                    burl = blacurl
            close_old_connections()
            BLACKURL.objects.create(url=url,
                                    title=RequestsTitle(url),
                                    resons='触发网址黑名单:{}'.format(burl))
        except Exception as e:
            pass
        return

    try:
        ip = get_host(url)
        if ip == '获取失败':
            try:
                BLACKURL.objects.create(url=url,
                                        title=RequestsTitle(url),
                                        resons='获取网址IP失败')
            except Exception as e:
                pass
            return
        if ip in black_ip:
            '''触发IP黑名单机制'''
            print('[+ IP Blacklist] 当前IP触发黑名单 : {} --> {}'.format(ip, url))
            try:
                BLACKURL.objects.create(url=url,
                                        title=RequestsTitle(url),
                                        resons='触发IP黑名单:{}'.format(ip))
            except Exception as e:
                pass
            return

        try:
            test_url = list(URL.objects.filter(url=url))
        except:
            try:
                test_url = list(URL.objects.filter(url=url))
            except:
                close_old_connections()
                test_url = list(URL.objects.filter(url=url))

        if test_url != []:
            '''网址索引表如果已经有该网址,则直接退出'''
            return

        try:
            Test_Other_Url = Other_Url.objects.filter(url=url)
            '''判断网络资产表是否有这个数据,如果没有的话,就添加进去'''
            if list(Test_Other_Url) == []:
                res = Get_Url_Info(url).get_info()
                res_url = res.get('url')
                try:
                    res_title = pymysql.escape_string(res.get('title'))
                except Exception as e:
                    res_title = 'Error'
                    Except_Log(stat=11, url=url + '|网页内容转码失败', error=str(e))
                res_power = res.get('power')
                res_server = res.get('server')
                res_status = res.get('status')
                res_ip = ip
                try:
                    Other_Url.objects.create(url=res_url,
                                             title=res_title,
                                             power=res_power,
                                             server=res_server,
                                             status=res_status,
                                             ip=res_ip)
                except Exception as e:
                    close_old_connections()
                    Except_Log(stat=17, url=url + '|标题等信息编码不符合', error=str(e))
                    Other_Url.objects.create(url=res_url,
                                             title='Error',
                                             power='Error',
                                             server=res_server,
                                             status=res_status,
                                             ip=res_ip)
        except Exception as e:
            Except_Log(stat=29, url=url + '|网络资产表错误', error=str(e))
        try:
            '''
            再次获取状态码,判断是否符合入库状态,以保证数据统一
            这里添加网址资产到 索引表 和 清洗表
            '''
            test_url1 = list(URL.objects.filter(url=url))
            '''如果网址索引表有这个网站的话,就直接退出'''
            if test_url1 == []:
                URL.objects.create(url=url, ip=ip)
                '''添加网址到网址索引表'''
                try:
                    try:
                        ZHRND = Get_Url_Info(url)
                        Sconten = ZHRND.get_info()['content']
                        if Sconten == 'Error':
                            '''到这里说明获取网页内容失败了'''
                            # print('{}:获取网页内容失败'.format(url))
                            pass
                        else:
                            try:
                                blackconincon = check_black(Sconten, black_con)
                                if blackconincon == True:
                                    '''触发网页内容黑名单'''
                                    burl = ''
                                    for blacurl in black_con:
                                        if blacurl in Sconten:
                                            burl = blacurl
                                    print(
                                        '[+ Cont Blacklist] 当前网页内容触发黑名单 : {}'.
                                        format(url))
                                    try:
                                        close_old_connections()
                                        BLACKURL.objects.create(
                                            url=url,
                                            title=RequestsTitle(url),
                                            resons='触发网页内容黑名单:{}'.format(burl))
                                    except Exception as e:
                                        pass
                                    return
                            except:
                                Sconten = '获取失败'
                        Show_contents = pymysql.escape_string(Sconten)
                        Cont = Content()
                        Cont.url = url
                        Cont.content = Show_contents
                        IP_Res = Get_Ip_Info(ip)
                        Show_cs = IP_Res.get_cs_name(ip)
                        Cont.save()
                        Show_Data.objects.create(url=url,
                                                 ip=ip,
                                                 cs=Show_cs,
                                                 content=Cont)
                    except Exception as e:
                        close_old_connections()
                        Except_Log(stat=4, url=url + '|外键添加错误', error=str(e))
                        Show_contents = 'Error'
                        Cont = Content()
                        Cont.url = url
                        Cont.content = Show_contents
                        IP_Res = Get_Ip_Info(ip)
                        Show_cs = IP_Res.get_cs_name(ip)
                        Cont.save()
                        Show_Data.objects.create(url=url,
                                                 ip=ip,
                                                 cs=Show_cs,
                                                 content=Cont)
                        '''添加网页内容,数据展示'''
                except Exception as e:
                    Except_Log(stat=8, url=url + '|外键添加错误', error=str(e))

            This_Sub = [x for x in ALL_DOMAINS if x in url]
            '''获取到当前子域名属于的主域名'''
            try:
                '''尝试进行域名总数据获取检测'''
                if This_Sub != []:
                    Domain_Count = Domains.objects.filter(url=This_Sub[0])[0]
                    counts = Other_Url.objects.filter(
                        url__contains=This_Sub[0])
                    Domain_Count.counts = str(len(counts))
                    # counts = int(Domain_Count.counts)+1
                    # Domain_Count.counts = counts
                    Domain_Count.save()
            except Exception as e:
                Except_Log(stat=15,
                           url=url + '|获取归属域名失败|' + This_Sub,
                           error=str(e))
        except Exception as e:
            Except_Log(stat=22, url=url + '|添加到网址索引表失败|', error=str(e))

        try:
            test_ip = list(IP.objects.filter(ip=ip))
        except:
            close_old_connections()
            test_ip = list(IP.objects.filter(ip=ip))
            '''开始添加ip 维护ip统一
            这里开始判断数据库中是否有这个ip,并且先添加然后修改(防止重复浪费资源)
            if test_ip != []:
                test_ip_0 = IP.objects.filter(ip=ip)[0]
                # 这里判断数据中IP时候存在,如果存在并且有扫描状态,就直接中断操作
                if test_ip_0.get == '是' or test_ip_0.get == '中':
                    return'''
        if test_ip == []:
            try:
                IP_Res = Get_Ip_Info(ip)
                area = IP_Res.get_ip_address(ip)
                cs_name = IP_Res.get_cs_name(ip)
                try:
                    IP.objects.create(ip=ip,
                                      servers='None',
                                      host_type='None',
                                      cs=cs_name,
                                      alive_urls='None',
                                      area=area)
                    '''这里先添加数据,异步执行获取到的数据作为结果给下个进程使用
                    这里本来是要扫描c段开放端口,但是这样就相当于把耗时操作加载到同步执行的线程中
                    于是把扫描开放端口  放在获取ip详细信息线程中处理'''
                except Exception as e:
                    Except_Log(stat=86, url=url + '|转换IP地区编码失败|', error=str(e))
                    IP.objects.create(ip=ip,
                                      servers='None',
                                      host_type='None',
                                      cs=cs_name,
                                      alive_urls='None',
                                      area='获取失败')

            except Exception as e:
                Except_Log(stat=21, url=url + '|添加IP资源失败|', error=str(e))

    except Exception as e:
        Except_Log(stat=30, url=url + '|维护传入网址失败|', error=str(e))
        Add_Data_To_Url(url)