Example #1
0
def schedule_green_jobs(fns,
                        concurrency=DEFAULT_THREADS,
                        progress=None,
                        total=None):
    import gevent.pool

    if total is None:
        try:
            total = len(fns)
        except TypeError:  # generators don't have len
            pass

    pbar = tqdm(total=total, desc=progress, disable=(not progress))
    results = []

    def updatefn(fn):
        def realupdatefn():
            res = fn()
            pbar.update(1)
            results.append(res)

        return realupdatefn

    pool = gevent.pool.Pool(concurrency)
    for fn in fns:
        pool.spawn(updatefn(fn))

    pool.join()
    pool.kill()
    pbar.close()

    return results
Example #2
0
def startBruteforce(sClient, pool, wordlist):
    for word in open(wordlist, "r"):
        pool.add(
            pool.apply_async(
                isSecretSaslValid,
                args=(sClient, word.strip(), "sparkSaslUser", True),
                callback=checkResult,
            ))

    pool.join(timeout=30)
    pool.kill()
    whine("Could not find the secret", "warn")
Example #3
0
def schedule_green_jobs(fns,
                        concurrency=DEFAULT_THREADS,
                        progress=None,
                        total=None,
                        count_return=False):
    import gevent.pool

    if total is None:
        try:
            total = len(fns)
        except TypeError:  # generators don't have len
            pass

    desc = progress if isinstance(progress, str) else None

    if isinstance(progress, tqdm):
        pbar = progress
    else:
        pbar = tqdm(total=total, desc=desc, disable=(not progress))

    results = []
    exceptions = []

    def add_exception(greenlet):
        nonlocal exceptions
        try:
            greenlet.get()
        except Exception as err:
            exceptions.append(err)

    def updatefn(fn):
        def realupdatefn():
            res = fn()
            pbar.update((res if count_return else 1))
            results.append(res)

        return realupdatefn

    pool = gevent.pool.Pool(concurrency)
    for fn in fns:
        greenlet = pool.spawn(updatefn(fn))
        greenlet.link_exception(add_exception)

    pool.join()
    pool.kill()
    pbar.close()

    if exceptions:
        raise_multiple(exceptions)

    return results
Example #4
0
def restore_ARP(targets, router_IP):
    router_MAC = getmacbyip(router_IP)
    pool.kill()  #kill original spoofing greenlets

    #send ARP packets associating router IP with router MAC again

    def fix(target):
        ARP_pkt = (
            Ether(dst=BROADCAST) /
            ARP(op="is-at", hwsrc=router_MAC, psrc=router_IP, pdst=target))
        sendp(ARP_pkt, verbose=False)

    gevent.joinall([gevent.spawn(fix, target) for target in targets],
                   timeout=10)
Example #5
0
        def _validate_proxy_list(self, proxies, timeout=90):
            valid_proxies = []

            def save_result(p):
                if p and len(str(p)):
                    valid_proxies.append(p)

            # 在多进程下面, 只检测部分
            if self.process_count > 1:
                num_per_part = int((len(proxies) + self.process_count - 1) /
                                   self.process_count)
                start = self.process_seq * num_per_part
                end = (self.process_seq + 1) * num_per_part
                proxies = proxies[start:end]

            # 每100 检查一次
            total_num = len(proxies)
            step = 500
            pool = gevent.pool.Pool(500)
            for i in range(0, total_num, step):
                group = proxies[i:i + step]
                for proxy in group:
                    pool.apply_async(self._validate_one_proxy,
                                     args=(proxy, 'http'),
                                     callback=save_result)
                    pool.apply_async(self._validate_one_proxy,
                                     args=(proxy, 'https'),
                                     callback=save_result)
                    pool.apply_async(self._validate_one_proxy,
                                     args=(proxy, 'socks4'),
                                     callback=save_result)
                    pool.apply_async(self._validate_one_proxy,
                                     args=(proxy, 'socks5'),
                                     callback=save_result)

                is_empty = pool.join(timeout=timeout)
                if not is_empty:
                    # 还没有检查完整
                    print("**** validation is not done!")

                pool.kill()

                # 即时保存检验过的traffic
                self.save_proxies(valid_proxies)
                valid_proxies = []
                print("progress: {}/{}".format(i, total_num))
                time.sleep(1)

            return valid_proxies
Example #6
0
def keep_fqsocks_busy():
    while True:
        pool = gevent.pool.Pool(size=16)
        greenlets = []
        for i in range(100):
            greenlets.append(pool.apply_async(check_twitter_access))
        while len(pool) > 0:
            for greenlet in list(pool):
                try:
                    greenlet.join(timeout=10)
                except:
                    pass
        try:
            pool.kill()
        except:
            pass
Example #7
0
def keep_fqsocks_busy():
    while True:
        pool = gevent.pool.Pool(size=16)
        greenlets = []
        for i in range(100):
            greenlets.append(pool.apply_async(check_twitter_access))
        while len(pool) > 0:
            for greenlet in list(pool):
                try:
                    greenlet.join(timeout=10)
                except:
                    pass
        try:
            pool.kill()
        except:
            pass
Example #8
0
def keep_fqsocks_busy():
    goagent.GoAgentProxy.GOOGLE_HOSTS = ["goagent-google-ip.fqrouter.com"]
    goagent.GoAgentProxy.refresh(fqsocks.mandatory_proxies)
    while True:
        pool = gevent.pool.Pool(size=16)
        greenlets = []
        for i in range(100):
            greenlets.append(pool.apply_async(check_baidu_access))
        while len(pool) > 0:
            for greenlet in list(pool):
                try:
                    greenlet.join(timeout=10)
                except:
                    pass
        try:
            pool.kill()
        except:
            pass
Example #9
0
        def data_generator(model_class, pids, results):
            """
            Greenlet: Launches and manages a pool of workers that check
            conditions on all the items.
            """
            try:
                pool = gevent.pool.Pool(10)

                collect_results.set(
                )  # Tell the generator to start waiting on results

                # Get the list of id's only first
                # TODO: Checking across all instances is only supported on LDAP models
                #ids = model_class.objects.all().values_list('id', flat=True)
                ids = model_class.provider.ids()

                for dev_id in ids:
                    try:
                        #item = model_class.objects.get(id=dev_id)
                        item = model_class.provider.get(dev_id)
                    except (model_class.DoesNotExist, Exception):
                        continue

                    async = gevent.event.AsyncResult()
                    pool.spawn(check_conditions, item, pids).link(async)
                    results.append(async)

                pool.join()
            except gevent.GreenletExit:
                log.info("GreenletExit - clean up pool")
            except:
                log.exception("unexpected death")
            finally:
                log.error("Killing pools")
                pool.kill(
                )  # The socket being written to may go away, so cleanup
Example #10
0
def quit_handler():
    server.stop()
    pool.kill()
    agg.flush()
def downloader(shard, downloads_folder, session_factory):
    info("Another cycle of sansara")
    concurency = 3
    pool = gevent.pool.Pool(concurency)
    input_queue = gevent.queue.Queue(concurency)
    output_queue = gevent.queue.Queue()

    done_tasks = set()
    tasks_set = set()
    tasks_lock = gevent.lock.RLock()
    for i in xrange(concurency):
        pool.spawn(worker,
                   input_queue,
                   output_queue,
                   tasks_lock,
                   tasks_set,
                   done_tasks,
                   timeout=60)

    session = session_factory.next()
    tree = html.fromstring(session.get('http://rghost.net/main').text)
    latest_links_elements = tree.xpath(
        "//div[contains(concat(' ', normalize-space(@class), ' '), ' main-column ')]/ul/li/a"
    )
    latest_link = max(el.attrib['href'] for el in latest_links_elements)
    latest_num = int(latest_link[1:])
    latest_num = shard.real_to_sharded(latest_num)

    task_retries = {}
    suspicious = set()

    MAX_RETRIES = 2

    def add_job(n, force=False):
        if force:
            with tasks_lock:
                done_tasks.discard(n)
        input_queue.put((download_sharded, n, shard, downloads_folder,
                         session_factory.next()))

    start = time.time()

    while time.time() < start + 90:
        for i in xrange(concurency):
            try:
                out = output_queue.get_nowait()
            except gevent.queue.Empty:
                break
            else:
                n, result = out
                try:
                    result = result.get()
                except NotExistsYet:
                    if n in suspicious:
                        suspicious.discard(n)
                        warning("File %d seems to be instantly 404", n)
                        continue
                    if latest_num > n:
                        suspicious.add(n)
                    elif latest_num == n:
                        gevent.sleep(shard.sub_total * 0.5)
                    add_job(n, force=True)
                except (Exception, gevent.Timeout):
                    error("Failed to download %d", n, exc_info=True)
                    retries = task_retries.get(n, 0)
                    if retries <= MAX_RETRIES:
                        info("Retrying %d", n)
                        task_retries[n] = task_retries.get(n, 0) + 1
                        add_job(n, force=True)
                    else:
                        info("%d has reached max retry tries", n)
                        task_retries.pop(n, 0)
                    latest_num = max(latest_num, n + 1)

                else:
                    latest_num = max(latest_num, n + 1)
        add_job(latest_num)
        gevent.sleep(0.1)

    pool.kill()