示例#1
0
def foxScan(target):
    filename = hashlib.md5(target.encode("utf-8")).hexdigest()
    print(
        f"{Hx_config.yellow}{Hx_config.green}Start attsrc foxScan {target}\nfilename : {filename}\n{Hx_config.end}"
    )
    base.subScan(target, filename)
    # 将队列列表化并进行子域名搜集
    _ = base.from_queue_to_list(Hx_config.target_queue)
    base.ArlScan(name=target, target=_)  # 启动ARL扫描,第一个参数target表示文件名
    print(
        f"{Hx_config.yellow}InPuT T4rGet {target} Sc3n Start!{Hx_config.end}")
    while not Hx_config.target_queue.empty():
        current_target = base.addHttpHeader(Hx_config.target_queue.get())
        try:
            if base.checkBlackList(current_target):
                # 对搜集到的目标挨个进行扫描
                req_pool = crawlergoMain.crawlergoGet(
                    current_target)  # 返回crawlergoGet结果列表,是多个url路径
                req_pool.add(current_target)  # 添加自己本身到该列表里
                req_pool = WAF(req_pool).run_detect()
                base.save(
                    req_pool,
                    filepath=f"{Hx_config.Crawlergo_save_path}{target}.txt",
                    host=current_target)
                tempFilename = hashlib.md5(
                    current_target.encode("utf-8")).hexdigest()
                # 对目标网址使用 crawlergoGet 页面URL动态爬取,保存在 req_pool 集合里
                threadPoolScan(req_pool, tempFilename, target)
        except:
            pass
    print(f"{Hx_config.yellow}InPuT T4rGet {target} Sc3n EnD#{Hx_config.end}")
    return
示例#2
0
def pppFoxScan(filename):
    print("Start pppFoxScan,filename is {}".format(filename))
    try:
        with open(filename, 'r') as f:
            lines = f.readlines()
            for line in lines:
                target = line.strip()
                target = base.addHttpHeader(target)
                config.ppp_queue.put(target)
    except Exception as e:
        print(e)
        pass
    while not config.ppp_queue.empty():
        current_target = config.ppp_queue.get()
        # 对搜集到的目标挨个进行扫描
        currentfilename = hashlib.md5(
            current_target.encode("utf-8")).hexdigest()
        if base.checkBlackList(current_target):
            req_pool = crawlergoMain.crawlergoGet(current_target)
            req_pool.add(current_target)
            # 对目标网址使用 crawlergoGet 页面URL动态爬取,保存在 req_pool 集合里
            threadPoolScan(req_pool, currentfilename, current_target)
        else:
            print("扫描网址在黑名单内,退出")
    print("pppFoxScan End~")
    return
示例#3
0
def oneFoxScan(target):
    if base.checkBlackList(target):
        target = base.addHttpHeader(target)
        filename = hashlib.md5(target.encode("utf-8")).hexdigest()
        print("Start foxScan {}\nfilename : {}\n".format(target, filename))
        req_pool = crawlergoMain.crawlergoGet(target)
        # 对目标网址使用 crawlergoGet 页面URL动态爬取,保存在 req_pool 集合里
        req_pool.add(target)
        threadPoolScan(req_pool, filename, target)
    print("InPuT T4rGet {} Sc3n EnD#".format(target))
    return
示例#4
0
def oneFoxScan(target):
    if base.checkBlackList(target):
        target = base.addHttpHeader(target)
        filename = hashlib.md5(target.encode("utf-8")).hexdigest()
        print(
            f"{Hx_config.yellow}Start foxScan {target}\nfilename : {filename}\n{Hx_config.end}"
        )
        req_pool = crawlergoMain.crawlergoGet(target)
        # 对目标网址使用 crawlergoGet 页面URL动态爬取,保存在 req_pool 集合里
        req_pool.add(target)
        threadPoolScan(req_pool, filename, target)
    else:
        print("扫描网址在黑名单内,退出")
    print(f"{Hx_config.yellow}InPuT T4rGet {target} Sc3n EnD#{Hx_config.end}")
    return