Ejemplo n.º 1
0
def foxScanDetail(target):
    thread = ThreadPoolExecutor(config.ThreadNum)
    filename = hashlib.md5(target.encode("utf-8")).hexdigest()
    print("Start attsrc foxScan {}\nfilename : {}\n".format(target, filename))
    base.subScan(target, filename)
    # 进行子域名搜集
    while not config.target_queue.empty():
        current_target = config.target_queue.get()
        # 对搜集到的目标挨个进行扫描
        if base.checkBlackList(current_target):
            req_pool = crawlergoMain.crawlergoGet(current_target)
            req_pool.add(current_target)
            i = 0
            all_task = []
            while len(req_pool) != 0:
                # 将 req_pool 里的URL依次弹出并扫描
                temp_url = req_pool.pop()
                current_filename = hashlib.md5(
                    temp_url.encode("utf-8")).hexdigest()
                i += 1
                one_t = thread.submit(threadPoolDetailScan, temp_url,
                                      current_filename)
                all_task.append(one_t)
                if i == 5 or len(req_pool) == 0:
                    i = 0
                    wait(all_task, return_when=ALL_COMPLETED)
                    all_task = []
    print("InPuT T4rGet {} Sc3n EnD#".format(target))
    return
Ejemplo n.º 2
0
def foxScan(target):
    filename = hashlib.md5(target.encode("utf-8")).hexdigest()
    print(
        f"{Hx_config.yellow}{Hx_config.green}Start attsrc foxScan {target}\nfilename : {filename}\n{Hx_config.end}"
    )
    base.subScan(target, filename)
    # 将队列列表化并进行子域名搜集
    _ = base.from_queue_to_list(Hx_config.target_queue)
    base.ArlScan(name=target, target=_)  # 启动ARL扫描,第一个参数target表示文件名
    print(
        f"{Hx_config.yellow}InPuT T4rGet {target} Sc3n Start!{Hx_config.end}")
    while not Hx_config.target_queue.empty():
        current_target = base.addHttpHeader(Hx_config.target_queue.get())
        try:
            if base.checkBlackList(current_target):
                # 对搜集到的目标挨个进行扫描
                req_pool = crawlergoMain.crawlergoGet(
                    current_target)  # 返回crawlergoGet结果列表,是多个url路径
                req_pool.add(current_target)  # 添加自己本身到该列表里
                req_pool = WAF(req_pool).run_detect()
                base.save(
                    req_pool,
                    filepath=f"{Hx_config.Crawlergo_save_path}{target}.txt",
                    host=current_target)
                tempFilename = hashlib.md5(
                    current_target.encode("utf-8")).hexdigest()
                # 对目标网址使用 crawlergoGet 页面URL动态爬取,保存在 req_pool 集合里
                threadPoolScan(req_pool, tempFilename, target)
        except:
            pass
    print(f"{Hx_config.yellow}InPuT T4rGet {target} Sc3n EnD#{Hx_config.end}")
    return
Ejemplo n.º 3
0
def foxScan(target):
    filename=hashlib.md5(target.encode("utf-8")).hexdigest()
    print("Start attsrc foxScan {}\nfilename : {}\n".format(target,filename))
    base.subScan(target,filename)
    #进行子域名搜集
    while not config.target_queue.empty():
        current_target=config.target_queue.get()
        if base.checkBlackList(current_target):
            # 对搜集到的目标挨个进行扫描
            req_pool=crawlergoMain.crawlergoGet(current_target)
            req_pool.add(current_target)
            #对目标网址使用 crawlergoGet 页面URL动态爬取,保存在 req_pool 集合里
            threadPoolScan(req_pool,filename)
    print("InPuT T4rGet {} Sc3n EnD#".format(target))
    return
Ejemplo n.º 4
0
def subGet(target):
    filename = hashlib.md5(target.encode("utf-8")).hexdigest()
    print("Start attsrc foxScan {}\nfilename : {}\n".format(target, filename))
    base.subScan(target, filename)
    return