Exemple #1
0
async def aio_resolve(subdomain_list, process_num, coroutine_num):
    """
    异步解析子域A记录

    :param list subdomain_list: 待解析的子域列表
    :param int process_num: 解析进程数
    :param int coroutine_num: 每个解析进程下的协程数
    :return: 解析结果
    """
    m = Manager()
    done_obj = m.Value('done', 0)  # 创建一个进程间可以共享的值
    loop = asyncio.get_event_loop()
    loop.run_in_executor(None, resolve_progress_func,
                         done_obj, len(subdomain_list))
    wrapped_resolve_func = functools.partial(do_resolve, done_obj)
    result_list = list()
    # macOS上队列大小不能超过2**15 - 1 = 32767
    # https://stackoverflow.com/questions/5900985/multiprocessing-queue-maxsize-limit-is-32767
    if sys.platform == 'darwin':
        split_subdomain_list = utils.split_list(subdomain_list, 32767)
        for current_subdomain_list in split_subdomain_list:
            async with aiomp.Pool(processes=process_num,
                                  childconcurrency=coroutine_num) as pool:
                result = await pool.map(wrapped_resolve_func,
                                        current_subdomain_list)
                result_list.extend(result)
        return result_list
    async with aiomp.Pool(processes=process_num,
                          childconcurrency=coroutine_num) as pool:
        result_list = await pool.map(wrapped_resolve_func, subdomain_list)
        return result_list
Exemple #2
0
async def main(urls,backs):

    all_tasks = []
    for b in backs:
        for u in urls:
            all_tasks.append(u+b)
    for b in backup_suffix:
        for u in urls:
            try:
                all_tasks.append(u+'/'+u.split('//')[1].split('.')[1]  + b)
            except:
                pass
            try:
                all_tasks.append(u+'/'+u.split('//')[1]  + b)
            except:
                pass
            try:
                all_tasks.append(u+'/'+u.split('.', 1)[1].replace('/', '') + b)
            except:
                pass
    print('目标数量:{}'.format(len(all_tasks)))
    time.sleep(2)
    if len(all_tasks)>2000000:
        print('目标数量过于庞大,可能会导致扫描过程中内存溢出')
    async with aiomultiprocess.Pool() as pool:
        await pool.map(run,all_tasks)
Exemple #3
0
 async def main(self, domain, rx_queue):
     if not self.fuzz:  # fuzz模式不探测域名是否使用泛解析
         self.enable_wildcard, self.wildcard_ips, self.wildcard_ttl = detect_wildcard(
             domain)
     tasks = self.gen_tasks(domain)
     logger.log('INFOR', f'正在爆破{domain}的域名')
     for task in tqdm.tqdm(tasks,
                           desc='Progress',
                           smoothing=1.0,
                           ncols=True):
         async with aiomp.Pool(processes=self.process,
                               initializer=init_worker,
                               childconcurrency=self.coroutine) as pool:
             try:
                 results = await pool.map(resolve.aiodns_query_a, task)
             except KeyboardInterrupt:
                 logger.log('ALERT', '爆破终止正在退出')
                 pool.terminate()  # 关闭pool,结束工作进程,不在处理未完成的任务。
                 self.save_json()
                 self.gen_result()
                 rx_queue.put(self.results)
                 return
             self.deal_results(results)
             self.save_json()
             self.gen_result()
             rx_queue.put(self.results)
Exemple #4
0
 async def get_result_from_dns(self, subhosts):
     res = set()
     async with aiomultiprocess.Pool(
             processes=processes,
             childconcurrency=childconcurrency) as pool:
         # 如果你想跑满CPU 修改上面的线程和协程数量即可
         results = await pool.map(self.Aio_Subdomain, subhosts)
     for result in results:
         subdomain, answers = result
         if answers != None and subdomain != None:
             # res.add(subdomain)
             if answers != self.FakeDomain_IP:
                 # 这里则确认不存在泛解析,可以,但是没必要
                 res.add(subdomain)
             else:
                 try:
                     close_old_connections()
                     BLACKURL.objects.create(url='http://' + subdomain,
                                             ip=str(answers),
                                             title=RequestsTitle('http://' +
                                                                 subdomain),
                                             resons='泛解析过滤或者网址无法正常访问')
                 except:
                     pass
                 res.add(self.FakeDomain_IP)
     return list(res)
Exemple #5
0
 async def main(self, domain, rx_queue):
     if not self.fuzz:  # fuzz模式不探测域名是否使用泛解析
         self.enable_wildcard, self.wildcard_ips, self.wildcard_ttl \
             = detect_wildcard(domain)
     tasks = self.gen_tasks(domain)
     logger.log('INFOR', f'正在爆破{domain}的域名')
     # for task in tqdm.tqdm(tasks, total=len(tasks),
     #                       desc='Progress'):
     m = Manager()
     pr_queue = m.Queue()
     loop = asyncio.get_event_loop()
     loop.run_in_executor(None, progress, pr_queue, len(tasks))
     wrapped_query = functools.partial(aiodns_query_a, pr_queue)
     async with aiomp.Pool(processes=self.process,
                           initializer=init_worker,
                           childconcurrency=self.coroutine) as pool:
         try:
             results = await pool.map(wrapped_query, tasks)
         except KeyboardInterrupt:
             logger.log('ALERT', '爆破终止正在退出')
             pool.terminate()  # 关闭pool,结束工作进程,不在处理未完成的任务。
             self.save_json()
             self.gen_result()
             rx_queue.put(self.results)
             return
         self.deal_results(results)
         self.save_json()
         self.gen_result()
         rx_queue.put(self.results)
async def main():
    tasks = []
    while not q.empty():
        url = q.get()
        tasks.append(url)
    async with aiomultiprocess.Pool() as pool:
        result = await pool.map(fetch, tasks)
Exemple #7
0
    async def run(indicators: dict) -> None:
        """Accept a dict containing yara indicators and write out to the OUTPUT_DIR specified by chirp.common.

        :param indicators: A NamespaceDict containing parsed yara indicator files.
        :type indicators: dict
        """
        if not indicators:
            return

        CONSOLE("[cyan][YARA][/cyan] Entered yara plugin.")

        files = [i["indicator"]["files"] for i in indicators]
        files = "\\**" if "\\**" in files else ", ".join(files)

        if files == "\\**":
            blame = [
                i["name"] for i in indicators
                if i["indicator"]["files"] == "\\**"
            ]
            CONSOLE(
                "[cyan][YARA][/cyan] Enumerating the entire filesystem due to {}... this is going to take a while."
                .format(blame))

        report = {
            indicator["name"]: build_report(indicator)
            for indicator in indicators
        }

        hits = 0
        run_args = []

        # Normalize every path, for every path
        try:
            run_args = [(a, b, indicators)
                        for a, b in enumerate(normalize_paths(files), 1)]
            async with aiomp.Pool() as pool:
                try:
                    async for result in pool.map(_run, tuple(run_args)):
                        if result:
                            report[result["namespace"]]["matches"].append(
                                result)
                            hits += 1
                except KeyboardInterrupt:
                    pass
        except IndexError:
            pass

        count = len(run_args)

        CONSOLE("[cyan][YARA][/cyan] Done. Processed {} files.".format(count))
        CONSOLE(
            "[cyan][YARA][/cyan] Found {} hit(s) for yara indicators.".format(
                hits))

        with open(os.path.join(OUTPUT_DIR, "yara.json"), "w+") as writeout:
            writeout.write(
                json.dumps(
                    {r: report[r]
                     for r in report if report[r]["matches"]}))
    async def test_map_reduce(self):

        numbers = [1, 2, 3]
        expected = sum(numbers) * 2

        async with amp.Pool() as pool:
            result = await pool.map_reduce(mapper, reducer, numbers)
            self.assertEqual(result, expected)
Exemple #9
0
    async def test_raise(self):
        result = await amp.Worker(target=raise_fn,
                                  name="test_process",
                                  initializer=do_nothing)
        self.assertIsInstance(result, RuntimeError)

        async with amp.Pool(2) as pool:
            with self.assertRaises(ProxyException):
                await pool.apply(raise_fn, args=())
    async def test_pool_uvloop(self):
        try:
            import uvloop

            async with amp.Pool(2, loop_initializer=uvloop.new_event_loop) as pool:
                had_uvloop = await pool.apply(check_uvloop)
                self.assertTrue(had_uvloop)

        except ModuleNotFoundError:
            self.skipTest("uvloop not available")
Exemple #11
0
 async def get_result_from_dns(self,subhosts):
     res = set()
     async with aiomultiprocess.Pool(processes=8,childconcurrency=8) as pool:
         # 如果你想跑满CPU 修改上面的线程和协程数量即可
         results = await pool.map(self.Aio_Subdomain, subhosts)
     for result in results:
         subdomain, answers = result
         if answers != None and subdomain != None:
             res.add(subdomain)
             print((subdomain,answers))
     return list(res)
Exemple #12
0
    async def test_pool_exception_handler(self):
        exc_q = get_context().Queue()
        handler = exc_q.put_nowait

        async with amp.Pool(2, exception_handler=handler) as pool:
            with self.assertRaises(ProxyException):
                await pool.apply(raise_fn, args=())

            exc = exc_q.get_nowait()
            self.assertIsInstance(exc, RuntimeError)
            self.assertEqual(exc.args, ("raising", ))
Exemple #13
0
 async def run(self):
     """Run the downloader with multiprocessing and multithreading."""
     pool_tasks = []
     async with aiomultiprocess.Pool(processes=4,
                                     maxtasksperchild=64,
                                     childconcurrency=8,
                                     queuecount=2) as pool:
         for call in self.calls_list:
             pool_tasks.append(pool.apply(self._get_call, args=[call]))
         for download in tqdm(asyncio.as_completed(pool_tasks),
                              total=len(pool_tasks)):
             await download
Exemple #14
0
    async def test_initializer(self):
        p = amp.Process(target=sleepy,
                        name="test_process",
                        initializer=do_nothing)
        p.start()
        await p.join()

        result = 10
        async with amp.Pool(2, initializer=initializer,
                            initargs=(result, )) as pool:
            self.assertEqual(await pool.apply(get_dummy_constant, args=()),
                             result)
async def main(urls, backs):
    # async with aiomultiprocess.Process() as process:
    #     for u in urls:
    #         for b in backs:
    #             await process()
    all_tasks = []
    for b in backs:
        for u in urls:
            all_tasks.append(u + b)
    print('目标数量:{}'.format(len(all_tasks)))
    async with aiomultiprocess.Pool() as pool:
        await pool.map(run, all_tasks)
Exemple #16
0
    async def test_pool_concurrency(self):
        results = []
        for sleep, tasks, processes, concurrency in PERF_SETS:
            with Timer() as timer:
                async with amp.Pool(processes, childconcurrency=concurrency) as pool:
                    await pool.map(sleepy, (sleep for _ in range(tasks)))

            results.append((sleep, tasks, processes, concurrency, timer.result))

        print()
        for result in results:
            print(*result)
    async def test_pool(self):
        values = list(range(10))
        results = [await mapper(i) for i in values]

        async with amp.Pool(2, maxtasksperchild=5) as pool:
            self.assertEqual(pool.process_count, 2)
            self.assertEqual(len(pool.processes), 2)

            self.assertEqual(await pool.apply(mapper, (values[0],)), results[0])
            self.assertEqual(await pool.map(mapper, values), results)
            self.assertEqual(
                await pool.starmap(starmapper, [values[:4], values[4:]]),
                [results[:4], results[4:]],
            )
    async def test_pool_closed(self):
        pool = amp.Pool(2)
        pool.close()

        with self.assertRaisesRegex(RuntimeError, "pool is closed"):
            await pool.apply(two)

        with self.assertRaisesRegex(RuntimeError, "pool is closed"):
            await pool.map(mapper, [1, 2, 3])

        with self.assertRaisesRegex(RuntimeError, "pool is closed"):
            await pool.starmap(starmapper, [[1, 2, 3], [1, 2, 3]])

        pool.terminate()
    async def test_pool_map(self):
        values = list(range(0, 20, 2))
        expected = [k * 2 for k in values]

        async with amp.Pool(2) as pool:
            obj = pool.map(mapper, values)
            self.assertIsInstance(obj, amp.PoolResult)
            results = await obj
            self.assertEqual(results, expected)

            obj = pool.map(mapper, values)
            self.assertIsInstance(obj, amp.PoolResult)
            idx = 0
            async for result in obj:
                self.assertEqual(result, expected[idx])
                idx += 1
Exemple #20
0
 async def get_result_from_dns(self, subhosts):
     res = set()
     async with aiomultiprocess.Pool(
             processes=processes,
             childconcurrency=childconcurrency) as pool:
         # 如果你想跑满CPU 修改上面的线程和协程数量即可
         results = await pool.map(self.Aio_Subdomain, subhosts)
     for result in results:
         subdomain, answers = result
         if answers != None and subdomain != None:
             if answers != self.FakeDomain_IP:
                 # 这里则确认不存在泛解析
                 res.add(subdomain)
             else:
                 res.add(self.FakeDomain_IP)
     return list(res)
Exemple #21
0
    async def test_spawn_method(self):
        self.assertEqual(amp.core.context.get_start_method(), "spawn")

        async def inline(x):
            return x

        with self.assertRaises(AttributeError):
            await amp.Worker(target=inline, args=(1, ), name="test_inline")

        result = await amp.Worker(target=two, name="test_global")
        self.assertEqual(result, 2)

        values = list(range(10))
        results = [await mapper(i) for i in values]
        async with amp.Pool(2) as pool:
            self.assertEqual(await pool.map(mapper, values), results)
    async def test_pool_starmap(self):
        values = list(range(0, 20, 2))
        expected = [k * 2 for k in values]

        async with amp.Pool(2) as pool:
            obj = pool.starmap(starmapper, [values] * 5)
            self.assertIsInstance(obj, amp.PoolResult)
            results = await obj
            self.assertEqual(results, [expected] * 5)

            obj = pool.starmap(starmapper, [values] * 5)
            self.assertIsInstance(obj, amp.PoolResult)
            count = 0
            async for results in obj:
                self.assertEqual(results, expected)
                count += 1
            self.assertEqual(count, 5)
Exemple #23
0
async def aio_resolve(subdomain_list, process_num, coroutine_num):
    """
    异步解析子域A记录

    :param list subdomain_list: 待解析的子域列表
    :param int process_num: 解析进程数
    :param int coroutine_num: 每个解析进程下的协程数
    :return: 解析结果
    """
    m = Manager()
    pr_queue = m.Queue()
    loop = asyncio.get_event_loop()
    loop.run_in_executor(None, resolve_progress, pr_queue, len(subdomain_list))
    wrapped_query = functools.partial(aio_query, pr_queue)
    async with aiomp.Pool(processes=process_num,
                          childconcurrency=coroutine_num) as pool:
        results = await pool.map(wrapped_query, subdomain_list)
        return results
Exemple #24
0
async def run(indicators: dict) -> None:
    """Accept a dict containing events indicators and writes out to the OUTPUT_DIR specified by chirp.common.

    :param indicators: A dict containing parsed events indicator files.
    :type indicators: dict
    """
    if not indicators:
        return
    hits = 0
    num_logs = 0
    logging.debug("Entered events plugin.")
    event_types = {
        indicator["indicator"]["event_type"]
        for indicator in indicators
    }
    report = {
        indicator["name"]: build_report(indicator)
        for indicator in indicators
    }
    run_args = [(event_type, indicators, report, num_logs)
                for event_type in event_types]
    async with aiomp.Pool() as pool:
        try:
            async for i in pool.map(_run, tuple(run_args)):
                _rep = i[0]
                num_logs += i[1]
                for k, v in _rep.items():
                    try:
                        report[k]["_search_criteria"] = v["_search_criteria"]
                    except KeyError:
                        pass
                    report[k]["matches"] += v["matches"]
        except KeyboardInterrupt:
            pass

    hits = sum(len(v["matches"]) for _, v in report.items())
    logging.log(EVENTS,
                "Read {} logs, found {} matches.".format(num_logs, hits))
    with open(os.path.join(OUTPUT_DIR, "events.json"), "w+") as writeout:
        writeout.write(
            json.dumps({r: report[r]
                        for r in report if report[r]["matches"]}))
Exemple #25
0
    async def test_spawn_context(self):
        with self.assertRaises(ValueError):
            amp.set_context("foo")

        async def inline(x):
            return x

        amp.set_context("spawn")

        with self.assertRaises(AttributeError):
            p = amp.Worker(target=inline, args=(1, ), name="test_inline")
            p.start()
            await p.join()

        p = amp.Worker(target=two, name="test_global")
        p.start()
        await p.join()

        values = list(range(10))
        results = [await mapper(i) for i in values]
        async with amp.Pool(2) as pool:
            self.assertEqual(await pool.map(mapper, values), results)

        self.assertEqual(p.result, 2)
Exemple #26
0
async def main(urls):
    async with aiomultiprocess.Pool() as pool:
        await pool.map(run, urls)
async def main(urls, backs):

    all_tasks = set()
    for b in backs:
        for u in urls:
            # 加载普通字典 /a.rar,/2016.rar
            all_tasks.add(u + b)
            # http://www.langzi.fun/a.rar
            try:
                # 加载自定义目录+普通字典
                all_tasks.add(u + '/' + u.split('//')[1].split('.')[1] + b)
                # http://www.langzi.fun/langzi/a.rar
            except:
                pass
            for m in backup_dir:
                all_tasks.add(u + '/' + m + b)
                # 加载目录
                # http://www.langzi.fun/www/a.rar

    for b in first_back_:
        # 动态加载自定义的后缀名,比如/a.zip,/a.rar,/a.iso
        for u in urls:
            all_tasks.add(u + b)

    for b in backup_suffix:
        # 自定义根据域名生成检测网址
        for u in urls:
            try:
                all_tasks.add(u + '/' + u.split('//')[1].split('.')[1] + b)
                # http://www.langzi.fun/langzi.rar
            except:
                pass
            for m in backup_dir:
                try:
                    # 加载目录
                    all_tasks.add(u + '/' + m + '/' +
                                  u.split('//')[1].split('.')[1] + b)
                # http://www.langzi.fun/www/langzi.rar
                except:
                    pass
            try:
                all_tasks.add(u + '/' + u.split('//')[1] + b)
                # http://www.langzi.fun/www.langzi.fun.rar
            except:
                pass
            try:
                all_tasks.add(u + '/' + u.split('.', 1)[1].replace('/', '') +
                              b)
                # http://www.langzi.fun/langzi.rar
            except:
                pass
            try:
                all_tasks.add(u + '/' + u.split('//')[1].split('.')[1] + '/' +
                              u.split('//')[1].split('.')[1] + b)
                # http://www.langzi.fun/langzi/langzi.rar
            except:
                pass

    all_tasks = list(all_tasks)
    print('目标数量:{}'.format(len(all_tasks)))
    time.sleep(2)
    if len(all_tasks) > 3000000:
        print('目标数量过于庞大,可能会导致扫描过程中内存溢出导致蓝屏死机')
    print('开始扫描.....')
    async with aiomultiprocess.Pool() as pool:
        await pool.map(run, all_tasks)
async def main(urls):
    async with aiomultiprocess.Pool() as pool:
        result = await pool.map(run, urls)
    return result
Exemple #29
0
 async def main(self,urls):
     async with aiomultiprocess.Pool(processes=8,childconcurrency=8) as pool:
         result = await pool.map(self.check_url_alive,urls)
     return [x for x in result if x is not None]
Exemple #30
0
async def main():
    tasks = range(800, 1001)
    async with aiomultiprocess.Pool() as pool:
        result = await pool.map(tq,tasks)
    print(result)