Exemplo n.º 1
0
    def get_ipwhois(self, ip: str, reason: str) -> IPWhoisData:
        """get ip whois"""
        res: IPWhoisData = None
        try:

            url = '{}{}'.format(self._api_ipwhois, ip)

            # 增加一个静态代理IP去获取whois,modify by judy 20201027
            html = self._ha.getstring(url,
                                      headers="""
            Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3
            Accept-Encoding: gzip, deflate
            Accept-Language: en-US,en;q=0.9,zh;q=0.8
            Cache-Control: no-cache
            Host: {}
            Pragma: no-cache
            Proxy-Connection: keep-alive
            Upgrade-Insecure-Requests: 1
            User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.97 Safari/537.36"""
                                      .format(self._api_host),
                                      proxies=ProxyMngr.get_static_proxy(),
                                      verify=False)

            if html is None or html == '':
                return res

            jcontent = json.loads(html)
            res: IPWhoisData = self._parse_one_ipwhois(ip, jcontent, reason)
            self._logger.info("Got an IPWhois: ip={} handle={} name={}".format(
                ip, res._handle, res._netname))

        except Exception:
            self._logger.debug("Get ipwhois error: ip:{}, error: {}".format(
                ip, traceback.format_exc()))
        return res
Exemplo n.º 2
0
 def __init__(self, task: IscoutTask):
     ScoutPlugBase.__init__(self)
     self.task = task
     self.proxydict = ProxyMngr.get_static_proxy()
     self._now = datetime.datetime.now(
         pytz.timezone("Asia/Shanghai")).date()
     self.source = "twitter"
     # 取现在的时间
     self.time_now: int = int(
         datetime.datetime.now(pytz.timezone("Asia/Shanghai")).timestamp())
     # 取限制的时间 秒
     self.time_limit = (int(self.task.cmd.stratagyscout.cmdnetworkid.
                            posttime.public_twitter.timerange) * 86400)
Exemplo n.º 3
0
    def get_resp(self, url: str, headers: str):
        failnum = 0
        resp: Response = None
        got = False
        while True:
            try:
                p: ProxyItem = ProxyMngr.get_one_crosswall()
                proxydict = None
                if isinstance(p, ProxyItem):
                    proxydict = p.proxy_dict
                    self._logger.debug(f"proxy ip: {p._ip}, port: {p._port}")
                
                resp: Response = self._ha.get_response(
                    url,
                    verify=False,
                    headers=headers,
                    timeout=30,
                    allow_redirects=False
                )
                if resp is None:
                    self._logger.debug(f"Cannt connect {url},resp:{None}")
                    failnum += 1
                    continue

                got = True
                break
            except Exception as ex:
                self._logger.debug(f"Request {url} error: {ex}")
                failnum += 1
            finally:
                if not got and failnum >= 1:
                    break

        if resp is None:
            self._logger.debug(
                f"Connect {url} fail three times, get nothing, resp: {None}")

        return resp
Exemplo n.º 4
0
    def get_ipwhois_history(self, ip: str, reason: str) -> iter:
        """get ip whois"""
        try:

            url = '{}{}'.format(self._api_ipwhois_history, ip)

            html = self._ha.getstring(url,
                                      headers="""
            Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3
            Accept-Encoding: gzip, deflate
            Accept-Language: en-US,en;q=0.9,zh;q=0.8
            Cache-Control: no-cache
            Host: {}
            Pragma: no-cache
            Proxy-Connection: keep-alive
            Upgrade-Insecure-Requests: 1
            User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.97 Safari/537.36"""
                                      .format(self._api_host),
                                      proxies=ProxyMngr.get_static_proxy(),
                                      verify=False)

            if html is None or html == '':
                return

            for iw in self._parse_ipwhois_history(ip, html, reason):
                if not isinstance(iw, IPWhoisData):
                    continue
                self._logger.info(
                    "Got an IPWhois: ip={} handle={} name={}".format(
                        ip, iw._handle, iw._netname))
                yield iw

        except Exception:
            self._logger.debug(
                "Get ipwhois history error: ip:{}, error: {}".format(
                    ip, traceback.format_exc()))
Exemplo n.º 5
0
    def run_logic_grabber(self, host: str, portinfo: PortInfo, **kwargs):
        try:
            outlog = kwargs.get('outlog')
            log = f"开始扫描漏洞: {WordPress.vuln}"
            self._logger.debug(log)
            outlog(log)
            urls = [
                f"http://{host}/wordpress/wp-content/plugins/wp-file-manager/lib/php/connector.minimal.php",
                f"http://{host}/wp-content/plugins/wp-file-manager/lib/php/connector.minimal.php"
            ]
            if portinfo.ssl_flag:
                urls = [
                    f"https://{host}/wordpress/wp-content/plugins/wp-file-manager/lib/php/connector.minimal.php",
                    f"https://{host}/wp-content/plugins/wp-file-manager/lib/php/connector.minimal.php"
                ]

            for url in urls:
                self._logger.debug(
                    f"Start WordpressFileMangerToolOk url:{url}")
                try:
                    p: ProxyItem = ProxyMngr.get_one_crosswall()
                    proxydict = None
                    if isinstance(p, ProxyItem):
                        proxydict = p.proxy_dict
                        self._logger.debug(
                            f"proxy ip: {p._ip}, port: {p._port}")
                    headers = f"""
                    Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9
                    Accept-Encoding: gzip, deflate
                    Accept-Language: zh-CN,zh;q=0.9,en;q=0.8
                    Cache-Control: no-cache
                    Host: {host}
                    Pragma: no-cache
                    Proxy-Connection: keep-alive
                    Upgrade-Insecure-Requests: 1
                    User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.83 Safari/537.36
                    """
                    resp: Response = self._ha.get_response(url,
                                                           headers=headers,
                                                           verify=False,
                                                           timeout=10)
                    if resp is None or resp.status_code != 200:
                        self._logger.debug(
                            f"Cannt connect {url},resp:{resp.status_code if resp is not None else None}"
                        )
                        continue
                    self._logger.debug(
                        f"Succeed get WordpressFileMangerToolOk, url:{url}")
                    siteinfo: SiteInfo = SiteInfo(url)
                    respheard = ""
                    for k, v in resp.headers.items():
                        respheard += f"{k}:{v}\n"
                    siteinfo.set_httpdata(None, None, respheard, resp.text)
                    wapres = 0
                    try:
                        self._logger.info(
                            f"Start WordpressFileMangerToolOk wappalyzer: {url}"
                        )
                        for com in self.ca.get_alyzer_res(level=1, url=url):
                            wapres += 1
                            siteinfo.set_components(com)
                    except:
                        self._logger.error(
                            f"Get WordpressFileMangerToolOk components error")
                    portinfo.set_siteinfo(siteinfo)
                    self._logger.info(
                        f"Stop WordpressFileMangerToolOk wappalyzer: {url} rescount:{wapres}"
                    )
                    log = f"{WordPress.vuln} 漏洞扫描完成"
                    outlog(log)
                except Exception as ex:
                    self._logger.debug(f"Request {url} error")
                    continue
        except Exception as ex:
            self._logger.error(f"Wordpress error")
Exemplo n.º 6
0
    def search(cls,
               query,
               tld='com',
               lang='en',
               tbs='0',
               safe='off',
               num=10,
               start=0,
               stop=None,
               domains=None,
               pause=2.0,
               tpe='',
               country='',
               extra_params=None,
               user_agent=None,
               proxydict: dict = None):
        """
        自己编写的google搜索程序采用代理
        """
        if not isinstance(proxydict, dict):
            proxydict = ProxyMngr.get_static_proxy()
            # cls._logger.debug(f"Google Search recommended to use proxydict")
        hashes = set()
        count = 0
        query = quote_plus(query)
        if not extra_params:
            extra_params = {}
        for builtin_param in cls.url_parameters:
            if builtin_param in extra_params.keys():
                raise ValueError(
                    'GET parameter "%s" is overlapping with \
                    the built-in GET parameter', builtin_param)
        sa = requests.Session()
        headers = {
            'accept':
            'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
            'accept-encoding': 'gzip, deflate, br',
            'accept-language': 'en-US,en;q=0.9',
            'cache-control': 'no-cache',
            'pragma': 'no-cache',
            'sec-fetch-dest': 'document',
            'sec-fetch-mode': 'navigate',
            'sec-fetch-site': 'none',
            'sec-fetch-user': '******',
            'upgrade-insecure-requests': '1',
            'user-agent': cls.get_random_user_agent()
        }
        # 先去拿首页保存下cookie
        sa.headers.update(headers)
        # 测试使用,不用代理
        # sa.get(cls.url_home % vars(), timeout=10)
        # sa.get(cls.url_home % vars(), proxies=proxydict, timeout=10)
        if start:
            if num == 10:
                url = cls.url_next_page % vars()
            else:
                url = cls.url_next_page_num % vars()
        else:
            if num == 10:
                url = cls.url_search % vars()
            else:
                url = cls.url_search_num % vars()

        while not stop or count < stop:
            last_count = count
            for k, v in extra_params.items():
                k = quote_plus(k)
                v = quote_plus(v)
                url = url + ('&%s=%s' % (k, v))

            time.sleep(pause)

            html = cls.get_page(url, sa, proxydict=proxydict)
            soup = BeautifulSoup(html, 'html.parser')
            try:
                anchors = soup.find(id='search').findAll('a')
                # Sometimes (depending on the User-agent) there is
                # no id "search" in html response...
            except AttributeError:
                # Remove links of the top bar.
                gbar = soup.find(id='gbar')
                if gbar:
                    gbar.clear()
                anchors = soup.findAll('a')
            # Process every anchored URL.
            for a in anchors:

                # Get the URL from the anchor tag.
                try:
                    link = a['href']
                except KeyError:
                    continue

                # Filter invalid links and links pointing to Google itself.
                link = cls.filter_result(link)
                if not link:
                    continue

                # Discard repeated results.
                h = hash(link)
                if h in hashes:
                    continue
                hashes.add(h)

                # Yield the result.
                yield link

                # Increase the results counter.
                # If we reached the limit, stop.
                count += 1
                if stop and count >= stop:
                    return

            # End if there are no more results.
            # XXX TODO review this logic, not sure if this is still true!
            if last_count == count:
                break

            # Prepare the URL for the next request.
            start += num
            if num == 10:
                url = cls.url_next_page % vars()
            else:
                url = cls.url_next_page_num % vars()
Exemplo n.º 7
0
    def run_logic_grabber(self, host: str, portinfo: PortInfo, **kwargs):
        """
        2020/09/24
        wsy:这个漏洞的目录应该是/index.php/component/users/
        目前应该是搜集的指定目录的
        """
        try:
            outlog = kwargs.get('outlog')
            log = f"开始扫描漏洞: {JoomlaCareers.vuln}"
            outlog(log)
            failnum = 0
            url = None
            resp: Response = None
            got = False
            iserr = False
            while True:
                try:
                    p: ProxyItem = ProxyMngr.get_one_crosswall()
                    proxydict = None
                    if isinstance(p, ProxyItem):
                        proxydict = p.proxy_dict
                        self._logger.debug(
                            f"proxy ip: {p._ip}, port: {p._port}")
                    url = f"http://{host}/index.php/component/users"
                    if portinfo.ssl_flag:
                        url = f"https://{host}/index.php/component/users"
                    self._logger.debug(f"Start joomlacareers url:{url}")
                    try:
                        resp: Response = self._ha.get_response(
                            url,
                            timeout=10,
                            headers="""
                        accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9
                        accept-encoding: gzip, deflate
                        accept-language: en-US,en;q=0.9
                        cache-control: no-cache
                        pragma: no-cache
                        sec-fetch-dest: document
                        sec-fetch-mode: navigate
                        sec-fetch-site: none
                        sec-fetch-user: ?1
                        upgrade-insecure-requests: 1
                        user-agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36""",
                            verify=False,
                        )
                    except:
                        iserr = True

                    if iserr or resp is None or resp.status_code != 200:
                        url = f"http://{host}/index.php/component/users"
                        if not portinfo.ssl_flag:
                            url = f"https://{host}/index.php/component/users"
                    else:
                        got = True
                        break

                    p: ProxyItem = ProxyMngr.get_one_crosswall()
                    proxydict = None
                    if isinstance(p, ProxyItem):
                        proxydict = p.proxy_dict
                        self._logger.debug(
                            f"proxy ip: {p._ip}, port: {p._port}")

                    iserr = False
                    try:
                        resp: Response = self._ha.get_response(
                            url,
                            timeout=10,
                            headers="""
                        accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9
                        accept-encoding: gzip, deflate
                        accept-language: en-US,en;q=0.9
                        cache-control: no-cache
                        pragma: no-cache
                        sec-fetch-dest: document
                        sec-fetch-mode: navigate
                        sec-fetch-site: none
                        sec-fetch-user: ?1
                        upgrade-insecure-requests: 1
                        user-agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36""",
                            verify=False,
                        )
                    except:
                        iserr = True

                    if iserr or resp is None or resp.status_code != 200:
                        failnum += 1
                        continue

                    got = True
                    break

                except Exception as ex:
                    self._logger.trace(f"Get {url} error")
                    failnum += 1
                finally:
                    if not got or failnum >= 1:
                        break

            if iserr or resp is None or resp.status_code != 200:
                return
            self._logger.debug(f"Succeed get {JoomlaCareers.vuln}, url:{url}")
            siteinfo: SiteInfo = SiteInfo(url)
            respheader = ""
            for k, v in resp.headers.items():
                respheader += f"{k}:{v}\n"
            siteinfo.set_httpdata(None, None, respheader, resp.text)
            # 将组件信息加入到site里面
            wapres = 0
            try:
                self._logger.info(f"Start joomlacareers wappalyzer: {url}")
                for com in self.ca.get_alyzer_res(level=1, url=url):
                    wapres += 1
                    siteinfo.set_components(com)
            except:
                self._logger.error(f"Get joomlacareers components error")
            portinfo.set_siteinfo(siteinfo)
            self._logger.info(
                f"Stop joomlacareers wappalyzer: {url} rescount:{wapres}")
            log = f"{JoomlaCareers.vuln} 漏洞扫描完成"
            outlog(log)
        except Exception as ex:
            self._logger.error(f"Joomla careers error")
Exemplo n.º 8
0
    def get_whoisres(self, level, domain, reason):
        """
        访问whois,然后获取结果
        :return:
        """
        # registrar
        re_registrar = re.compile('Registrar:(.+?)<br />')
        re_registraremail = re.compile(
            'Registrar Abuse Contact Email: (.+?)<br />')
        re_registrarphone = re.compile(
            'Registrar Abuse Contact Phone: (.+?)<br />')
        # registrant
        re_registrant_1 = re.compile('Registrant Name:(.+?)<br />')
        re_registrant_2 = re.compile('Registrant:(.+?)<br />')
        re_registrantorg = re.compile('Registrant Organization:(.+?)<br />')
        re_registrantemail = re.compile('Registrant Email:(.+?)<br />')
        re_registrantphone = re.compile('Registrant Phone:(.+?)<br />')
        # registrant addr
        # addr
        re_registrantcountry = re.compile('Registrant Country:(.+?)<br />')
        re_registrantSP = re.compile('Registrant State/Province:(.+?)<br />')
        re_registrantcity = re.compile('Registrant City:(.+?)<br />')
        re_registrantstreet = re.compile('Registrant Street:(.+?)<br />')
        # time
        re_registtime = re.compile('Creation Date:(.+?)<br />')
        re_expiretime = re.compile('Expiration.+?:(.+?)<br />')
        re_infotime = re.compile('Updated Date:(.+?)<br />')
        # dns
        re_dns = re.compile('Name Server:(.+?)<br />')

        url = f'{self.web_url}/{domain.strip()}'
        response = self.ha.get(url, proxies=ProxyMngr.get_static_proxy())
        res_string = response.text

        # 必要字段
        registrar = None
        registtime = None
        registrar_info = re_registrar.search(res_string)
        if registrar_info:
            registrar = self._filter_a_lable(registrar_info.group(1))

        registtime_info = re_registtime.search(res_string)
        if registtime_info:
            registtime = registtime_info.group(1).replace('T', ' ').replace(
                'Z', '')
        # 如果必要字段没有的话,那么本次查询没有找到相应的数据,直接返回即可
        if registrar is None or registtime is None:
            return
        whois = Whois(self.task, level, registrar, registtime)

        registraremail_info = re_registraremail.search(res_string)
        if registraremail_info:
            registraremail = self._filter_a_lable(registraremail_info.group(1))
            whois.registraremail = registraremail
            # 这里要返回email
            d_email = self._make_email(level, registraremail, reason)
            yield d_email

        registrarphone_info = re_registrarphone.search(res_string)
        if registrarphone_info:
            registrarphone = self.__format_phone(registrarphone_info.group(1))
            whois.registrarphone = registrarphone
            # 这里要返回phone
            d_phone = self._make_phone(level, registrarphone, reason)
            yield d_phone

        registrant_2_info = re_registrant_2.search(res_string)
        if registrant_2_info:
            whois.registrant = self._filter_a_lable(registrant_2_info.group(1))
        else:
            registrant_1_info = re_registrant_1.search(res_string)
            if registrant_1_info:
                whois.registrant = registrant_1_info.group(1)

        registrantorg_info = re_registrantorg.search(res_string)
        if registrantorg_info:
            whois.registrantorg = self._filter_a_lable(
                registrantorg_info.group(1))

        registrantemail_info = re_registrantemail.search(res_string)
        if registrantemail_info:
            registrantemail = self._filter_a_lable(
                registrantemail_info.group(1))
            whois.registrantemail = registrantemail
            # 这里要返回email
            dt_email = self._make_email(level, registrantemail, reason)
            yield dt_email

        registrantphone_info = re_registrantphone.search(res_string)
        if registrantphone_info:
            registrantphone = self.__format_phone(
                registrantphone_info.group(1))
            whois.registrantphone = registrantphone
            # 这里也要返回phone
            dt_phone = self._make_phone(level, registrantphone, reason)
            yield dt_phone

        # 拼接地址
        country = None
        registrantcountry_info = re_registrantcountry.search(res_string)
        if registrantcountry_info:
            country = registrantcountry_info.group(1)
        sp = None
        registrantSP_info = re_registrantSP.search(res_string)
        if registrantSP_info:
            sp = registrantSP_info.group(1)
        city = None
        registrantcity_info = re_registrantcity.search(res_string)
        if registrantcity_info:
            city = registrantcity_info.group(1)
        street = None
        registrantstreet_info = re_registrantstreet.search(res_string)
        if registrantstreet_info:
            street = registrantstreet_info.group(1)
        addr = ''
        if country is not None:
            addr += f'{country}/'
        if sp is not None:
            addr += f'{sp}/'
        if city is not None:
            addr += f'{city}/'
        if street is not None:
            addr += f'{street}'
        whois.registrantaddr = addr

        # 最后就是时间了
        expiretime_info = re_expiretime.search(res_string)
        if expiretime_info:
            whois.expiretime = expiretime_info.group(1).replace('T',
                                                                ' ').replace(
                                                                    'Z', '')
        infotime_info = re_infotime.search(res_string)
        if infotime_info:
            whois.infotime = infotime_info.group(1).replace('T', ' ').replace(
                'Z', '')
        dns_info = re_dns.findall(res_string)
        for el in dns_info:
            whois.set_dns_server(self._filter_a_lable(el))
        yield whois
Exemplo n.º 9
0
 def run_logic_grabber(self, host: str, portinfo: PortInfo, **kwargs):
     try:
         outlog = kwargs.get('outlog')
         log = f"开始扫描漏洞: {LotusSmtp.vuln}"
         self._logger.debug(log)
         outlog(log)
         failnum = 0
         url = None
         resp: Response = None
         got = False
         while True:
             url = f"http://{host}/names.nsf"
             if portinfo.ssl_flag:
                 url = f"https://{host}/names.nsf"
             self._logger.debug(f"Start lotussmtp url:{url}")
             try:
                 p: ProxyItem = ProxyMngr.get_one_crosswall()
                 proxydict = None
                 if isinstance(p, ProxyItem):
                     proxydict = p.proxy_dict
                     self._logger.debug(f"proxy ip: {p._ip}, port: {p._port}")
                 headers = f"""
                 Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9
                 Accept-Encoding: gzip, deflate
                 Accept-Language: zh-CN,zh;q=0.9,en;q=0.8
                 Cache-Control: no-cache
                 Host: {host}
                 Pragma: no-cache
                 Proxy-Connection: keep-alive
                 Upgrade-Insecure-Requests: 1
                 User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.83 Safari/537.36
                 """
                 resp: Response = self._ha.get_response(
                     url,
                     headers=headers,
                     verify=False,
                     timeout=10,
                     allow_redirects=False
                 )
                 if resp is None or resp.status_code != 200:
                     self._logger.debug(f"Cannt connect {url},resp:{resp.status_code if resp is not None else None}")
                     failnum += 1
                     continue
                 got = True
                 break
             except Exception as ex:
                 self._logger.debug(f"Request {url} error")
                 failnum += 1
             finally:
                 if not got and failnum >= 1:
                     break
         if resp is None or resp.status_code != 200:
             self._logger.debug(
                 f"Connect {url} fail three times, get nothing, resp:{resp.status_code if resp is not None else None}")
             return
         self._logger.debug(f"Succeed get lotussmtp, url:{url}")
         siteinfo: SiteInfo = SiteInfo(url)
         respheard = ""
         for k, v in resp.headers.items():
             respheard += f"{k}:{v}\n"
         siteinfo.set_httpdata(None, None, respheard, resp.text)
         wapres = 0
         try:
             self._logger.info(f"Start lotussmtp wappalyzer: {url}")
             for com in self.ca.get_alyzer_res(level=1, url=url):
                 wapres += 1
                 siteinfo.set_components(com)
         except:
             self._logger.error(
                 f"Get lotussmtp components error"
             )
         portinfo.set_siteinfo(siteinfo)
         self._logger.info(f"Stop lotussmtp wappalyzer: {url} rescount:{wapres}")
         log = f"{LotusSmtp.vuln} 漏洞扫描完成"
         outlog(log)
     except Exception as ex:
         self._logger.error(f"Lotus smtp error")
Exemplo n.º 10
0
    def run_logic_grabber(self, host: str, portinfo: PortInfo, **kwargs):
        try:
            outlog = kwargs.get('outlog')
            log = f"开始扫描漏洞: {HuaweiHg532.vuln}"
            outlog(log)

            self._logger.debug(log)

            failnum = 0
            url = None
            resp: Response = None
            got = False
            while True:
                url = f"http://{host}/ctrlt/DeviceUpgrade_1"

                self._logger.debug(f"Start huawei hg532 url:{url}")

                try:
                    p: ProxyItem = ProxyMngr.get_one_crosswall()
                    proxydict = None
                    if isinstance(p, ProxyItem):
                        proxydict = p.proxy_dict
                        self._logger.debug(
                            f"proxy ip: {p._ip}, port: {p._port}")

                    headers = """
                        'Authorization':'Digest username=dslf-config, realm=HuaweiHomeGateway, nonce=88645cefb1f9ede0e336e3569d75ee30, uri=/ctrlt/DeviceUpgrade_1, response=3612f843a42db38f48f59d2a3597e19c, algorithm=MD5, qop=auth, nc=00000001, cnonce=248d1a2560100669',
                        'User-Agent'        :   'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.140 Safari/537.36',
                        'Accept-Encoding'   :   'gzip, deflate',
                        'Connection'        :   'keep-alive',
                        'Content-Type'      :   'application/x-www-form-urlencoded'
                    """

                    TEMPLATE = '<?xml version="1.0" ?> <s:Envelope xmlns:s="http://schemas.xmlsoap.org/soap/envelope/" s:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/"> <s:Body><u:%s xmlns:u="urn:schemas-upnp-org:service:WANPPPConnection:1">%s</u:%s></s:Body></s:Envelope>'
                    data = TEMPLATE % (
                        'GetSoftwareVersion',
                        '<NewSoftwareVersion></NewSoftwareVersion>',
                        'GetSoftwareVersion')

                    resp: Response = self._ha.get_response(
                        url,
                        req_data=data,
                        headers=headers,
                        verify=False,
                        timeout=10,
                        allow_redirects=False)
                    if resp is None or resp.status_code != 200:
                        self._logger.debug(
                            f"Cannt connect {url},resp:{resp.status_code if resp is not None else None}"
                        )
                        failnum += 1
                        continue
                    got = True
                    break
                except Exception as ex:
                    self._logger.debug(f"Request {url} error: {ex}")
                    failnum += 1
                finally:
                    if not got and failnum >= 1:
                        break

            if resp is None or resp.status_code != 200:
                self._logger.debug(
                    f"Connect {url} fail three times, get nothing, resp:{resp.status_code if resp is not None else None}"
                )
                return

            self._logger.debug(f"Succeed get huawei hg532, url:{url}")
            siteinfo: SiteInfo = SiteInfo(url)

            respheard = ""
            for k, v in resp.headers.items():
                respheard += f"{k}:{v}\n"

            siteinfo.set_httpdata(None, None, respheard, resp.text)

            if portinfo.service == 'unknown':
                portinfo.service = 'http'
            portinfo.set_siteinfo(siteinfo)
            log = f"{HuaweiHg532.vuln} 漏洞扫描完成"
            outlog(log)

        except Exception as ex:
            self._logger.error("Huawei hg532 vuln error")