Esempio n. 1
0
 def loop_connect_site(self, domain, subdomain):
     """循环测试URL"""
     log.write("[UI]start http test")
     self.known_subdomain = []
     # 计数,计算进度条的值
     count = 0
     for sdom in subdomain:
         url_s = "https://%s.%s/" % (sdom, domain)
         url = "http://%s.%s/" % (sdom, domain)
         code = sdl.request_head_s(url_s)
         if sdl.isOK(code):
             self.known_subdomain.append(url_s)
             # 探测到后发送结果
             self.trigger.emit(url_s)
         else:
             code = sdl.request_head(url)
             if sdl.isOK(code):
                 self.known_subdomain.append(url)
                 # 探测到后发送结果
                 self.trigger.emit(url)
         count += 1
         # 计算并发送进度条的值给进度条线程
         self.progressBarValue.emit(self.alg(count, self.subdomainCount))
     # 探测结束后发送结束信号
     self.trigger.emit("finish")
     log.write("[UI]finish connect")
Esempio n. 2
0
 def run(self):
     """重写run函数,执行爆破任务"""
     log.write("[UI]get domain " + self.domain)
     # 测试环境选择直接获取字典文件
     subdomain = sdl.get_dict_contents(self.dict)
     # 计算得到字典中子域名的个数,主要为进度条准备
     self.subdomainCount = len(subdomain)
     log.write("[UI]get subdomain from dictionary " + self.dict)
     self.loop_connect_site(self.domain, subdomain)
Esempio n. 3
0
 def connect_site_to_get_data(self, target_url):
     try:
         html = self.s.get(url=target_url,
                           headers=self.headers).content.decode()
         jdata = json.loads(html)
         self.get_subdomain(jdata)
         return jdata
     except (Timeout, ConnectionError):
         log.write("DNS-virusTotal:Proxy error or Internet error!")
         return None
Esempio n. 4
0
 def get_subdomain(self, jdata):
     """
     从返回的json数据中获取子域名并存储数据
     """
     try:
         for link in jdata['data']:
             self.subdomains.append(link['id'])
     except KeyError:
         log.write(
             "Maybe you have not enough API quota allowances.Or your network is not good."
         )
Esempio n. 5
0
 def execute(self):
     # 获取用户输入的domain值
     self.getDomain()
     if self.domain_is_right():
         self.reset_listView()
         log.write("[" + self.domain + "]")
         if self.brute_checkBox.isChecked():
             self.start_brute()
         self.start_optional_features(proxies, virus_api_key)
     else:
         self.alertError("域名不合法!")
Esempio n. 6
0
 def start_optional_features(self, proxies, virus_api_key):
     """判断并执行可选的爬虫和DNS解析功能"""
     search_engine = []
     if self.checkBox_1.isChecked():
         log.write("[UI]baidu start")
         search_engine.append("baidu")
     if self.checkBox_2.isChecked():
         log.write("[UI]bing start")
         search_engine.append("bing")
     if self.checkBox_3.isChecked():
         log.write("[UI]google start")
         search_engine.append("google")
     if search_engine:
         self.clicked_task += 1
         self.se_work = GSpider(self.domain, search_engine, proxies)
         # 状态栏提示信息,信号传递方向:se_work.signal --> sBarWork.signal --> show_status_bar
         self.sBarWork.statusBarValue.connect(self.se_work.trigger_tip)
         self.se_work.trigger_tip.connect(self.show_status_bar)
         self.se_work.start()
         self.se_work.trigger_subdomains.connect(self.show_spider_result)
     if self.checkBox_4.isChecked():
         self.clicked_task += 1
         log.write("[UI]DNS start")
         self.dns_work = GDns(self.domain, virus_api_key, proxies)
         # 状态栏提示信息,信号传递方向:se_work.signal --> sBarWork.signal --> show_status_bar
         self.sBarWork.statusBarValue.connect(self.dns_work.trigger_tip)
         self.dns_work.trigger_tip.connect(self.show_status_bar)
         self.dns_work.start()
         self.dns_work.trigger_subdomains.connect(self.show_dns_result)
Esempio n. 7
0
 def run(self):
     self.trigger_tip.emit("start")
     # VirusTotal DNS resolution
     VirusTotal = virusTotal.Client(self.domain, self.virus_api_key)
     results = VirusTotal.run()
     if results:
         self.subdomains.extend(results)
     # threadcrowd DNS resolution
     ThreadCrowd = threadcrowd.Client(self.domain, self.proxies)
     self.subdomains.extend(ThreadCrowd.run())
     self.subdomains = deduplicate.remove_duplicate_data(self.subdomains)
     self.trigger_subdomains.emit(self.subdomains)
     log.write("[UI]DNS finished")
     self.trigger_tip.emit("finish")
Esempio n. 8
0
 def is_key_right(self):
     test_url = "https://www.virustotal.com/api/v3/domains/baidu.com/subdomains"
     try:
         html = self.s.get(test_url, headers=headers).text
         jdata = json.loads(html)
         try:
             # 如果有error值则表示API_KEY 有错
             if jdata['error']['code'] == 'WrongCredentialsError':
                 return False
         except:
             return True
     except (Timeout, ConnectionError):
         log.write("Proxy error or Internet error!")
         return False
Esempio n. 9
0
 def transmit_finishd_count(self, unfinisd_count):
     """通过queue中的unfinished_tasks属性计算进度条所需的值并发送给进度条"""
     all_task_count = self.subdomainCount
     finished_count = all_task_count - unfinisd_count
     if unfinisd_count == 0:
         self.progressBarValue.emit(100)
         # 探测结束后发送结束信号
         self.trigger.emit("finish")
         log.write("[UI]finish connect")
     elif self.transmited_count != finished_count:
         # 计算并发送进度条的值给进度条线程
         self.progressBarValue.emit(
             self.alg(finished_count, self.subdomainCount))
         # 发送后更新已发送的值
         self.transmited_count = finished_count
Esempio n. 10
0
 def run(self, total_page=3):
     """
     建议total_page的值不要超过4,因为免费AIP一分钟内只能使用4次API
     """
     if self.is_key_right():
         log.write("virusTotal:API Key is right.Keep going!")
         jdata = self.connect_site_to_get_data(self.base_url)
         for page_num in range(total_page - 1):
             if self.is_have_next(jdata):
                 jdata = self.connect_site_to_get_data(
                     jdata['links']['next'])
             else:
                 break
         return self.subdomains
     else:
         log.write(API_KEY_WRONG_TIP)
Esempio n. 11
0
 def run(self):
     self.trigger_tip.emit("start")
     for client in self.search_engines:
         if client == 'baidu':
             Baidu = baidu.Client(self.domain)
             self.subdomains.extend(Baidu.run())
         elif client == 'google':
             Google = google.Client(self.domain, proxies=self.proxies)
             self.subdomains.extend(Google.run())
         elif client == 'bing':
             Bing = bing.Client(self.domain)
             self.subdomains.extend(Bing.run())
     self.subdomains = deduplicate.remove_duplicate_data(self.subdomains)
     self.trigger_subdomains.emit(self.subdomains)
     log.write("[UI]Spider finished")
     self.trigger_tip.emit("finish")
Esempio n. 12
0
 def run(self):
     """重写run函数,执行爆破任务"""
     log.write("[UI]get domain " + self.domain)
     # 测试环境选择直接获取字典文件
     subdomain = sdl.get_dict_contents(self.dict)
     # 计算得到字典中子域名的个数,主要为进度条准备
     self.subdomainCount = len(subdomain)
     log.write("[UI]get subdomain from dictionary " + self.dict)
     # 创建多线程传送数据所需的队列
     search_queue = queue.Queue(len(subdomain))
     log.write("[UI]finish new queue " + str(self.thread_num))
     self.init_queue(search_queue, subdomain)
     log.write("[UI]Init queue")
     self.multi_queue_connect(self.domain, search_queue, self.thread_num,
                              self.connect_site)
     log.write("[UI]finish connect")
     search_queue.join()
Esempio n. 13
0
def main(t, d, n, bt, se, dns):
    makeLogDir()
    makeSitesDir()
    domain = t
    print(logo)
    log.write("print logo")
    web_subdomains = []
    if bt:
        dict_name = d
        thread_num = n
        brute(domain, dict_name, thread_num)
    # 如果使用se参数,执行爬虫功能
    if se:
        web_subdomains.extend(start_search_engine(se, domain))
    # 如果使用dns参数,执行dns解析功能
    if dns:
        web_subdomains.extend(dns_resolution(domain))
    if se or dns:
        data_processing(web_subdomains)
        write_subdomains_to_file(domain+'.txt', web_subdomains)
Esempio n. 14
0
def brute(domain, dict_name, thread_num):
    """
    爆破功能
    """
    subdomain = get_dict_contents(dict_name)
    log.write("get dictionary")
    search_queue = queue.Queue(len(subdomain))
    log.write("finish new queue")
    init_queue(search_queue, subdomain)
    log.write("Init queue")
    multi_queue_connect(domain, search_queue, thread_num, connect_site)
    log.write("finish connect")
    search_queue.join()
    write_into_file(domain + ".txt")
Esempio n. 15
0
 def run(self, total_page=3):
     # 使用session只需建立一次TCP连接
     s = Session()
     # for 循环实现获取前10页结果,经测试一页有10条结果
     for num in range(total_page):
         self.item_num = num * 10 + 1
         target = self.base_url % (self.domain, self.item_num)
         try:
             html = s.get(url=target, headers=self.headers).content.decode()
             html = BeautifulSoup(html, features="html.parser")
             self.find_subdomain(html)
         except Timeout:
             log.write("spider-bing:Timeout!")
         except ConnectionError:
             log.write("spider-bing:Internet Error!")
         except TypeError:
             log.write("spider-bing:Type Error!")
         except Exception:
             log.write("spider-bing:Unknown Error!")
     return self.subdomains
Esempio n. 16
0
 def run(self, total_page=3):
     # 使用session只需建立一次TCP连接
     s = Session()
     for num in range(total_page):
         self.page_num = num * 10
         target = self.base_url % (self.domain, self.page_num)
         try:
             html = s.get(target, headers=self.headers).content.decode()
             # print(html)
             html = BeautifulSoup(html, features="html.parser")
             self.find_subdomain(html)
         except Timeout:
             log.write("spider-baidu:Timeout!")
         except ConnectionError:
             log.write("spider-baidu:Internet Error!")
         except TypeError:
             log.write("spider-baidu:Type Error!")
         except Exception:
             log.write("spider-baidu:Unknown Error!")
     return self.subdomains
Esempio n. 17
0
 def run(self):
     target = self.base_url % self.domain
     try:
         html = get(url=target,
                    headers=self.headers,
                    proxies=self.proxies,
                    verify=False).content.decode()
         jdata = json.loads(html)
         self.get_subdomains(jdata)
     except (Timeout, ConnectionError):
         log.write("DNS-threadcrowd:Proxy error or Internet error!")
     except Exception:
         log.write("DNS-threadcrowd:Unknown error!")
     except json.decoder.JSONDecodeError:
         log.write("Don't have enough data to decode.Maybe it passed CDN.")
     finally:
         return self.subdomains