Exemple #1
0
 def run(self):
     print(banner)
     dt = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
     print(f'[*] Starting OneForAll @ {dt}\n')
     logger.log('INFOR', f'开始运行OneForAll')
     self.domains = utils.get_domains(self.target)
     if self.domains:
         for self.domain in self.domains:
             collect = Collect(self.domain, export=False)
             collect.run()
             if self.brute:
                 # 由于爆破会有大量dns解析请求 并发常常会导致其他任务中的网络请求超时
                 brute = AIOBrute(self.domain)
                 brute.run()
             table_name = self.domain.replace('.', '_')
             db_conn = database.connect_db()
             self.datas = database.get_data(db_conn, table_name).as_dict()
             loop = asyncio.get_event_loop()
             asyncio.set_event_loop(loop)
             self.datas = loop.run_until_complete(resolve.bulk_query_a(self.datas))
             self.datas = loop.run_until_complete(request.bulk_get_request(self.datas, self.port))
             loop.run_until_complete(asyncio.sleep(0.25))  # 在关闭事件循环前加入一小段延迟让底层连接得到关闭的缓冲时间
             loop.close()
             database.clear_table(db_conn, table_name)
             database.save_db(db_conn, table_name, self.datas)
             # 数据库导出
             if not self.path:
                 self.path = config.result_save_path.joinpath(f'{self.domain}.{self.format}')
             dbexport.export(table_name, db_conn, self.valid, self.path, self.format, self.output)
             db_conn.close()
     else:
         logger.log('FATAL', f'获取域名失败')
     logger.log('INFOR', f'结束运行OneForAll')
Exemple #2
0
    def run(self):
        """
        类运行入口
        """
        start = time.time()
        logger.log('INFOR', f'开始收集{self.domain}的子域')
        self.get_mod()
        self.import_func()

        threads = []
        # 创建多个子域收集线程
        for collect_func in self.collect_func:
            thread = threading.Thread(target=collect_func,
                                      args=(self.domain, ),
                                      daemon=True)
            threads.append(thread)
        # 启动所有线程
        for thread in threads:
            thread.start()
        # 等待所有线程完成
        for thread in threads:
            thread.join()

        # 数据库导出
        if self.export:
            if not self.path:
                name = f'{self.domain}.{self.format}'
                self.path = config.result_save_path.joinpath(name)
            dbexport.export(self.domain, path=self.path, format=self.format)
        end = time.time()
        self.elapsed = round(end - start, 1)
Exemple #3
0
    def main(self):
        if self.brute is None:
            self.brute = config.enable_brute_module
        if self.verify is None:
            self.verify = config.enable_verify_subdomain
        rename_table = self.domain + '_last'
        collect = Collect(self.domain, export=False)
        collect.run()
        if self.brute:
            # 由于爆破会有大量dns解析请求 并发爆破可能会导致其他任务中的网络请求异常
            brute = AIOBrute(self.domain, export=False)
            brute.run()

        db = Database()
        db.copy_table(self.domain, self.domain + '_ori')
        db.remove_invalid(self.domain)
        db.deduplicate_subdomain(self.domain)
        # 不验证子域的情况
        if not self.verify:
            # 数据库导出
            self.valid = None
            dbexport.export(self.domain,
                            valid=self.valid,
                            format=self.format,
                            show=self.show)
            db.drop_table(rename_table)
            db.rename_table(self.domain, rename_table)
            return
        # 开始验证子域工作
        self.datas = db.get_data(self.domain).as_dict()
        loop = asyncio.get_event_loop()
        asyncio.set_event_loop(loop)

        # 解析域名地址
        task = resolve.bulk_query_a(self.datas)
        self.datas = loop.run_until_complete(task)

        # 保存解析结果
        resolve_table = self.domain + '_res'
        db.drop_table(resolve_table)
        db.create_table(resolve_table)
        db.save_db(resolve_table, self.datas, 'resolve')

        # 请求域名地址
        task = request.bulk_get_request(self.datas, self.port)
        self.datas = loop.run_until_complete(task)
        # 在关闭事件循环前加入一小段延迟让底层连接得到关闭的缓冲时间
        loop.run_until_complete(asyncio.sleep(0.25))

        db.clear_table(self.domain)
        db.save_db(self.domain, self.datas)

        # 数据库导出
        dbexport.export(self.domain,
                        valid=self.valid,
                        format=self.format,
                        show=self.show)
        db.drop_table(rename_table)
        db.rename_table(self.domain, rename_table)
        db.close()
Exemple #4
0
    def run(self, rx_queue=None):
        self.domains = utils.get_domains(self.target)
        while self.domains:
            self.domain = self.domains.pop()
            start = time.time()
            db = Database()
            db.create_table(self.domain)
            if not rx_queue:
                rx_queue = queue.Queue()
            logger.log('INFOR', f'开始执行{self.source}模块爆破域名{self.domain}')
            logger.log('INFOR', f'使用{self.process}进程乘{self.coroutine}协程')
            # fuzz模式不使用递归爆破
            if self.recursive_brute and not self.fuzz:
                logger.log('INFOR', f'开始递归爆破{self.domain}的第1层子域')
            loop = asyncio.get_event_loop()
            asyncio.set_event_loop(loop)
            loop.run_until_complete(self.main(self.domain, rx_queue))

            # 递归爆破下一层的子域
            # fuzz模式不使用递归爆破
            if self.recursive_brute and not self.fuzz:
                for layer_num in range(1, self.recursive_depth):
                    # 之前已经做过1层子域爆破 当前实际递归层数是layer+1
                    logger.log('INFOR', f'开始递归爆破{self.domain}的'
                               f'第{layer_num + 1}层子域')
                    for subdomain in self.subdomains.copy():
                        # 进行下一层子域爆破的限制条件
                        if subdomain.count('.') - self.domain.count(
                                '.') == layer_num:
                            loop.run_until_complete(
                                self.main(subdomain, rx_queue))
            # 队列不空就一直取数据存数据库
            while not rx_queue.empty():
                source, results = rx_queue.get()
                # 将结果存入数据库中
                db.save_db(self.domain, results, source)

            end = time.time()
            self.elapsed = round(end - start, 1)
            logger.log('INFOR', f'结束执行{self.source}模块爆破域名{self.domain}')
            length = len(self.subdomains)
            logger.log(
                'INFOR', f'{self.source}模块耗时{self.elapsed}秒'
                f'发现{self.domain}的域名{length}个')
            logger.log(
                'DEBUG', f'{self.source}模块发现{self.domain}的域名:\n'
                f'{self.subdomains}')
            # 数据库导出
            if self.export:
                if not self.path:
                    name = f'{self.domain}_brute.{self.format}'
                    self.path = config.result_save_path.joinpath(name)
                dbexport.export(self.domain,
                                valid=self.valid,
                                path=self.path,
                                format=self.format,
                                show=self.show)
Exemple #5
0
    def run(self):
        logger.log('INFOR', f'Start running {self.source} module')
        if self.check_env:
            utils.check_env()
        self.domains = utils.get_domains(self.target, self.targets)
        for self.domain in self.domains:
            self.results = list()  # 置空
            all_subdomains = list()
            self.check_brute_params()
            if self.recursive_brute:
                logger.log(
                    'INFOR', f'Start recursively brute the 1 layer subdomain'
                    f' of {self.domain}')
            valid_subdomains = self.main(self.domain)

            all_subdomains.extend(valid_subdomains)

            # 递归爆破下一层的子域
            # fuzz模式不使用递归爆破
            if self.recursive_brute:
                for layer_num in range(1, self.recursive_depth):
                    # 之前已经做过1层子域爆破 当前实际递归层数是layer+1
                    logger.log(
                        'INFOR',
                        f'Start recursively brute the {layer_num + 1} '
                        f'layer subdomain of {self.domain}')
                    for subdomain in all_subdomains:
                        self.place = '*.' + subdomain
                        # 进行下一层子域爆破的限制条件
                        num = subdomain.count('.') - self.domain.count('.')
                        if num == layer_num:
                            valid_subdomains = self.main(subdomain)
                            all_subdomains.extend(valid_subdomains)

            logger.log(
                'INFOR',
                f'Finished {self.source} module to brute {self.domain}')
            if not self.path:
                name = f'{self.domain}_brute_result.{self.format}'
                self.path = settings.result_save_dir.joinpath(name)
            # 数据库导出
            if self.export:
                dbexport.export(self.domain,
                                type='table',
                                alive=self.alive,
                                limit='resolve',
                                path=self.path,
                                format=self.format)
Exemple #6
0
    def main(self):
        collect = Collect(self.domain, export=False)
        collect.run()
        if self.brute:
            # 由于爆破会有大量dns解析请求 并发爆破可能会导致其他任务中的网络请求异常
            brute = AIOBrute(self.domain, export=False)
            brute.run()

        db = Database()
        db.copy_table(self.domain, self.domain+'_ori')
        db.remove_invalid(self.domain)
        db.deduplicate_subdomain(self.domain)
        self.datas = db.get_data(self.domain).as_dict()
        loop = asyncio.get_event_loop()
        asyncio.set_event_loop(loop)

        # 解析域名地址
        task = resolve.bulk_query_a(self.datas)
        self.datas = loop.run_until_complete(task)

        # 保存解析结果
        resolve_table = self.domain + '_res'
        db.drop_table(resolve_table)
        db.create_table(resolve_table)
        db.save_db(resolve_table, self.datas, 'resolve')

        # 请求域名地址
        task = request.bulk_get_request(self.datas, self.port)
        self.datas = loop.run_until_complete(task)
        # 在关闭事件循环前加入一小段延迟让底层连接得到关闭的缓冲时间
        loop.run_until_complete(asyncio.sleep(0.25))
        loop.close()

        db.clear_table(self.domain)
        db.save_db(self.domain, self.datas)

        # 数据库导出
        if not self.path:
            name = f'{self.domain}.{self.format}'
            self.path = config.result_save_path.joinpath(name)
        dbexport.export(self.domain, db.conn, self.valid, self.path,
                        self.format, self.output)

        rename_table = self.domain + '_last'
        db.drop_table(rename_table)
        db.rename_table(self.domain, rename_table)
Exemple #7
0
    def run(self):
        """
        Class entrance
        """
        start = time.time()
        logger.log('INFOR', f'Start collecting subdomains of {self.domain}')
        self.get_mod()
        self.import_func()

        threads = []
        # Create subdomain collection threads
        for collect_func in self.collect_funcs:
            func_obj, func_name = collect_func
            thread = threading.Thread(target=func_obj,
                                      name=func_name,
                                      args=(self.domain, ),
                                      daemon=True)
            threads.append(thread)
        # Start all threads
        for thread in threads:
            thread.start()
        # Wait for all threads to finish
        for thread in threads:
            # 挨个线程判断超时 最坏情况主线程阻塞时间=线程数*module_thread_timeout
            # 超时线程将脱离主线程 由于创建线程时已添加守护属于 所有超时线程会随着主线程结束
            thread.join(settings.module_thread_timeout)

        for thread in threads:
            if thread.is_alive():
                logger.log('ALERT', f'{thread.name} module thread timed out')

        # Export
        if self.export:
            if not self.path:
                name = f'{self.domain}.{self.format}'
                self.path = settings.result_save_dir.joinpath(name)
            dbexport.export(self.domain,
                            type='table',
                            path=self.path,
                            format=self.format)
        end = time.time()
        self.elapse = round(end - start, 1)
Exemple #8
0
    def run(self):
        logger.log('INFOR', f'开始执行{self.source}模块')
        if self.check_env:
            utils.check_env()
        self.domains = utils.get_domains(self.target)
        all_subdomains = list()
        for self.domain in self.domains:
            self.check_brute_params()
            if self.recursive_brute:
                logger.log('INFOR', f'开始递归爆破{self.domain}的第1层子域')
            valid_subdomains = self.main(self.domain)
            all_subdomains.extend(valid_subdomains)

            # 递归爆破下一层的子域
            # fuzz模式不使用递归爆破
            if self.recursive_brute:
                for layer_num in range(1, self.recursive_depth):
                    # 之前已经做过1层子域爆破 当前实际递归层数是layer+1
                    logger.log('INFOR', f'开始递归爆破{self.domain}的'
                               f'第{layer_num + 1}层子域')
                    for subdomain in all_subdomains:
                        self.place = '*.' + subdomain
                        # 进行下一层子域爆破的限制条件
                        num = subdomain.count('.') - self.domain.count('.')
                        if num == layer_num:
                            valid_subdomains = self.main(subdomain)
                            all_subdomains.extend(valid_subdomains)

            logger.log('INFOR', f'结束执行{self.source}模块爆破域名{self.domain}')
            if not self.path:
                name = f'{self.domain}_brute_result.{self.format}'
                self.path = config.result_save_dir.joinpath(name)
            # 数据库导出
            if self.export:
                dbexport.export(self.domain,
                                alive=self.alive,
                                limit='resolve',
                                path=self.path,
                                format=self.format)
Exemple #9
0
    def run(self):
        """
        类运行入口
        """
        start = time.time()
        logger.log('INFOR', f'开始收集{self.domain}的子域')
        self.get_mod()
        self.import_func()

        threads = []
        # 创建多个子域收集线程
        for collect in self.collect_funcs:
            func_obj, func_name = collect
            thread = threading.Thread(target=func_obj,
                                      name=func_name,
                                      args=(self.domain,),
                                      daemon=True)
            threads.append(thread)
        # 启动所有线程
        for thread in threads:
            thread.start()
        # 等待所有线程完成
        for thread in threads:
            # 挨个线程判断超时 最坏情况主线程阻塞时间=线程数*module_thread_timeout
            # 超时线程将脱离主线程 由于创建线程时已添加守护属于 所有超时线程会随着主线程结束
            thread.join(config.module_thread_timeout)

        for thread in threads:
            if thread.is_alive():
                logger.log('ALERT', f'{thread.name}模块线程发生超时')

        # 数据库导出
        if self.export:
            if not self.path:
                name = f'{self.domain}.{self.format}'
                self.path = config.result_save_path.joinpath(name)
            dbexport.export(self.domain, path=self.path, format=self.format)
        end = time.time()
        self.elapsed = round(end - start, 1)
Exemple #10
0
    def run(self, rx_queue=None):
        """
        类运行入口
        """
        start = time.time()
        self.get_mod()
        self.import_func()

        if not rx_queue:
            rx_queue = queue.Queue(maxsize=len(self.collect_func))  # 结果集队列
        threads = []
        # 创建多个子域收集线程
        for collect_func in self.collect_func:
            thread = threading.Thread(target=collect_func,
                                      args=(self.domain, rx_queue),
                                      daemon=True)
            threads.append(thread)
        # 启动所有线程
        for thread in threads:
            thread.start()
        # 等待所有线程完成
        for thread in threads:
            thread.join()

        db_conn = database.connect_db()
        table_name = self.domain.replace('.', '_')
        database.create_table(db_conn, table_name)
        database.copy_table(db_conn, table_name)
        database.deduplicate_subdomain(db_conn, table_name)
        database.remove_invalid(db_conn, table_name)
        db_conn.close()
        # 数据库导出
        if self.export:
            if not self.path:
                self.path = config.result_save_path.joinpath(
                    f'{self.domain}.{self.format}')
            dbexport.export(table_name, path=self.path, format=self.format)
        end = time.time()
        self.elapsed = round(end - start, 1)
Exemple #11
0
    def export(self, table):
        """
        从数据库中导出数据并做一些后续数据库善后处理

        :param table: 要导出的表名
        :return: 导出的数据
        :rtype: list
        """
        db = Database()
        data = dbexport.export(table, alive=self.alive, format=self.format)
        db.drop_table(self.new_table)
        db.rename_table(self.domain, self.new_table)
        db.close()
        return data
Exemple #12
0
    def export(self, table):
        """
        Export data from the database and do some follow-up processing

        :param table: table name
        :return: export data
        :rtype: list
        """
        db = Database()
        data = dbexport.export(table, type='table', alive=self.alive, format=self.format)
        db.drop_table(self.new_table)
        db.rename_table(self.domain, self.new_table)
        db.close()
        return data
Exemple #13
0
    def main(self):
        if self.brute is None:
            self.brute = config.enable_brute_module
        if self.dns is None:
            self.dns = config.enable_dns_resolve
        if self.req is None:
            self.req = config.enable_http_request
        old_table = self.domain + '_last_result'
        new_table = self.domain + '_now_result'
        collect = Collect(self.domain, export=False)
        collect.run()
        if self.brute:
            # 由于爆破会有大量dns解析请求 并发爆破可能会导致其他任务中的网络请求异常
            brute = AIOBrute(self.domain, export=False)
            brute.run()

        db = Database()
        original_table = self.domain + '_original_result'
        db.copy_table(self.domain, original_table)
        db.remove_invalid(self.domain)
        db.deduplicate_subdomain(self.domain)

        old_data = []
        # 非第一次收集子域的情况时数据库预处理
        if db.exist_table(new_table):
            db.drop_table(old_table)  # 如果存在上次收集结果表就先删除
            db.rename_table(new_table, old_table)  # 新表重命名为旧表
            old_data = db.get_data(old_table).as_dict()

        # 不解析子域直接导出结果
        if not self.dns:
            # 数据库导出
            dbexport.export(self.domain,
                            valid=self.valid,
                            format=self.format,
                            show=self.show)
            db.drop_table(new_table)
            db.rename_table(self.domain, new_table)
            db.close()
            return

        self.data = db.get_data(self.domain).as_dict()

        # 标记新发现子域
        self.data = utils.mark_subdomain(old_data, self.data)

        # 获取事件循环
        loop = asyncio.get_event_loop()
        asyncio.set_event_loop(loop)

        # 解析子域
        task = resolve.bulk_resolve(self.data)
        self.data = loop.run_until_complete(task)

        # 保存解析结果
        resolve_table = self.domain + '_resolve_result'
        db.drop_table(resolve_table)
        db.create_table(resolve_table)
        db.save_db(resolve_table, self.data, 'resolve')

        # 不请求子域直接导出结果
        if not self.req:
            # 数据库导出
            dbexport.export(resolve_table,
                            valid=self.valid,
                            format=self.format,
                            show=self.show)
            db.drop_table(new_table)
            db.rename_table(self.domain, new_table)
            db.close()
            return

        # 请求子域
        task = request.bulk_request(self.data, self.port)
        self.data = loop.run_until_complete(task)
        self.datas.extend(self.data)
        # 在关闭事件循环前加入一小段延迟让底层连接得到关闭的缓冲时间
        loop.run_until_complete(asyncio.sleep(0.25))
        count = utils.count_valid(self.data)
        logger.log('INFOR', f'经验证{self.domain}有效子域{count}个')

        # 保存请求结果
        db.clear_table(self.domain)
        db.save_db(self.domain, self.data, 'request')

        # 数据库导出
        dbexport.export(self.domain,
                        valid=self.valid,
                        format=self.format,
                        show=self.show)
        db.drop_table(new_table)
        db.rename_table(self.domain, new_table)
        db.close()

        # 子域接管检查
        if self.takeover:
            subdomains = set(map(lambda x: x.get('subdomain'), self.data))
            takeover = Takeover(subdomains)
            takeover.run()
Exemple #14
0
    def main(self):
        if self.brute is None:
            self.brute = config.enable_brute_module
        if self.verify is None:
            self.verify = config.enable_verify_subdomain
        old_table = self.domain + '_last'
        new_table = self.domain + '_now'
        collect = Collect(self.domain, export=False)
        collect.run()
        if self.brute:
            # 由于爆破会有大量dns解析请求 并发爆破可能会导致其他任务中的网络请求异常
            brute = AIOBrute(self.domain, export=False)
            brute.run()

        db = Database()
        db.copy_table(self.domain, self.domain+'_ori')
        db.remove_invalid(self.domain)
        db.deduplicate_subdomain(self.domain)

        old_data = []
        # 非第一次收集子域的情况时数据库预处理
        if db.exist_table(new_table):
            db.drop_table(old_table)  # 如果存在上次收集结果表就先删除
            db.rename_table(new_table, old_table)  # 新表重命名为旧表
            old_data = db.get_data(old_table).as_dict()

        # 不验证子域的情况
        if not self.verify:
            # 数据库导出
            self.valid = None
            dbexport.export(self.domain, valid=self.valid,
                            format=self.format, show=self.show)
            db.drop_table(new_table)
            db.rename_table(self.domain, new_table)
            return
        # 开始验证子域工作
        self.data = db.get_data(self.domain).as_dict()

        # 标记新发现子域
        self.data = utils.mark_subdomain(old_data, self.data)

        loop = asyncio.get_event_loop()
        asyncio.set_event_loop(loop)

        # 解析域名地址
        task = resolve.bulk_query_a(self.data)
        self.data = loop.run_until_complete(task)

        # 保存解析结果
        resolve_table = self.domain + '_res'
        db.drop_table(resolve_table)
        db.create_table(resolve_table)
        db.save_db(resolve_table, self.data, 'resolve')

        # 请求域名地址
        task = request.bulk_get_request(self.data, self.port)
        self.data = loop.run_until_complete(task)
        # 在关闭事件循环前加入一小段延迟让底层连接得到关闭的缓冲时间
        loop.run_until_complete(asyncio.sleep(0.25))

        db.clear_table(self.domain)
        db.save_db(self.domain, self.data)

        # 数据库导出
        dbexport.export(self.domain, valid=self.valid,
                        format=self.format, show=self.show)
        db.drop_table(new_table)
        db.rename_table(self.domain, new_table)
        db.close()
        # 子域接管检查

        if self.takeover:
            subdomains = set(map(lambda x: x.get('subdomain'), self.data))
            takeover = Takeover(subdomains)
            takeover.run()
Exemple #15
0
#!/usr/bin/env python3
# coding=utf-8
"""
Example
"""

from oneforall import OneForAll
from dbexport import export


def oneforall(target):
    test = OneForAll(target=target)
    test.brute = True
    test.req = True
    test.takeover = True
    test.run()


if __name__ == '__main__':
    TARGET = 'freebuf.com'
    oneforall(target=TARGET)
    export(target=TARGET)