def _filter_transactions( self, transactions: List[Transaction]) -> List[Transaction]: """Filters all transactions in parallel. The results may not have the same order.""" filter_task_group = pool.Group() filter_task_results = filter_task_group.imap_unordered( self._filter_transaction_task, transactions) return filter_array(lambda el: el is not None, filter_task_results)
def __init__(self, name=None): self.name=name self.modules={} self.refcount = 0 self.pool=pool.Group() if self.name == None: self.name = str(uuid4())
def _handle_transactions(self, transactions: List[Transaction]) -> None: handle_task_group = pool.Group() for tr in transactions: handle_task_group.spawn(lambda: self._handle_transaction(tr)) handle_task_group.join(raise_error=True)
def __init__(self, name, group=pool.Group()): self.name = name self._commands = {} # cmd_id : command self._greenlets = {} # cmd_id : greenlet self._decorators = {} # cmd_id : callable self._group = group self._queue = queue.Queue() self.services.append(weakref.ref(self))
def __init__(self, name, group=pool.Group()): super().__init__(name) self._commands = {} # cmd_id : command self._greenlets = {} # cmd_id : greenlet self._values = {} # cmd_id : values self._decorators = {} # cmd_id : callable self._group = group self._queue = queue.Queue()
def test_basic(self): s = pool.Group() s.spawn(gevent.sleep, timing.LARGE_TICK) self.assertEqual(len(s), 1, s) s.spawn(gevent.sleep, timing.LARGE_TICK * 5) self.assertEqual(len(s), 2, s) gevent.sleep(timing.LARGE_TICK * 2 + timing.LARGE_TICK_MIN_ADJ) self.assertEqual(len(s), 1, s) gevent.sleep(timing.LARGE_TICK * 5 + timing.LARGE_TICK_MIN_ADJ) self.assertFalse(s)
def __init__(self, host, port, login_id, auth_key): self.host, self.port = host, port self.login_id, self.auth_key = login_id, auth_key self._socket = None self._group = pool.Group() self.handlers = {} self._send = lambda _: self._send_queue.put(_) self.sleep = lambda _: gevent.sleep(_)
def test_kill_noblock(self): s = pool.Group() s.spawn(gevent.sleep, DELAY) s.spawn(gevent.sleep, DELAY * 2) assert len(s) == 2, s s.kill(block=False) assert len(s) == 2, s gevent.sleep(0.0001) self.assertFalse(s) self.assertEqual(len(s), 0)
def test_kill_noblock(self): s = pool.Group() s.spawn(gevent.sleep, DELAY) s.spawn(gevent.sleep, DELAY * 2) assert len(s) == 2, s s.kill(block=False) assert len(s) == 2, s gevent.sleep(0.0001) assert len(s) == 0, s assert not s, s
def test_basic(self): DELAY = 0.05 if not greentest.RUNNING_ON_APPVEYOR else 0.1 s = pool.Group() s.spawn(gevent.sleep, DELAY) assert len(s) == 1, s s.spawn(gevent.sleep, DELAY * 2.) assert len(s) == 2, s gevent.sleep(DELAY * 3. / 2.) assert len(s) == 1, s gevent.sleep(DELAY) assert not s, s
def test_waitall(self): s = pool.Group() s.spawn(gevent.sleep, DELAY) s.spawn(gevent.sleep, DELAY * 2) assert len(s) == 2, s start = time.time() s.join(raise_error=True) delta = time.time() - start assert not s, s assert len(s) == 0, s assert DELAY * 1.9 <= delta <= DELAY * 2.5, (delta, DELAY)
def test_waitall(self): s = pool.Group() s.spawn(gevent.sleep, DELAY) s.spawn(gevent.sleep, DELAY * 2) assert len(s) == 2, s start = time.time() s.join(raise_error=True) delta = time.time() - start self.assertFalse(s) self.assertEqual(len(s), 0) self.assertTimeWithinRange(delta, DELAY * 1.9, DELAY * 2.5)
def test_kill_block(self): s = pool.Group() s.spawn(gevent.sleep, DELAY) s.spawn(gevent.sleep, DELAY * 2) assert len(s) == 2, s start = time.time() s.kill() self.assertFalse(s) self.assertEqual(len(s), 0) delta = time.time() - start assert delta < DELAY * 0.8, delta
def test_kill_block(self): s = pool.Group() s.spawn(gevent.sleep, DELAY) s.spawn(gevent.sleep, DELAY * 2) assert len(s) == 2, s start = time.time() s.kill() assert not s, s assert len(s) == 0, s delta = time.time() - start assert delta < DELAY * 0.8, delta
def test_basic(self): DELAY = 0.05 s = pool.Group() s.spawn(gevent.sleep, DELAY) assert len(s) == 1, s s.spawn(gevent.sleep, DELAY * 2.) assert len(s) == 2, s gevent.sleep(DELAY * 3. / 2.) assert len(s) == 1, s gevent.sleep(DELAY) assert not s, s
def __init__(self, name): super().__init__(name) self._commands = {} # cmd_id : command self._wake_objects = {} # cmd_id : obj self._greenlets = {} # cmd_id : greenlet self._values = {} # cmd_id : values self._decorators = {} # cmd_id : callable self._group = pool.Group() self._result_queue = queue.Queue() self._result_greenlet = self._group.start( async_utils.Greenlet(self._get_results))
def __init__(self, client_id='', hostname='96.126.125.171', port=443): self.client_id = client_id self.hostname = hostname self.port = port self.socket = None self._tunnels = {} self._handlers = {} self._group = pool.Group() self._outbox = queue.Queue() self.logger = logging.getLogger(self.__module__)
def test_basic(self): DELAY = 0.05 if not greentest.RUNNING_ON_APPVEYOR else 0.1 s = pool.Group() s.spawn(gevent.sleep, DELAY) self.assertEqual(len(s), 1, s) s.spawn(gevent.sleep, DELAY * 2.) self.assertEqual(len(s), 2, s) gevent.sleep(DELAY * 3. / 2.) try: self.assertEqual(len(s), 1, s) except AssertionError: reraiseFlakyTestRaceConditionLibuv() gevent.sleep(DELAY) self.assertFalse(s)
def __init__(self, idx, ip, port): super(CTcpClient,self).__init__() self.idx = idx self.sockfd = None self.need_close = False self.ip = ip self.port = port self.client_ip = "" self.client_port = "0" self.connect_pool = pool.Group() self._lock = Semaphore(value=1) self.packet_queue = Queue()
def grep_expression(containers_ids=None, expression=None): if not containers_ids or not expression: return None result = {} group = pool.Group() for container in containers_ids: path = get_path_to_log(container) result[container] = {'name': get_container_name(container), 'data': []} group.apply_async(search_expression, (path, expression), {'result': result[container]['data']}) group.join() return result
def write_files(self, bucket_name, file_names): def func(file_name): duration, speed, size = self.write_file(bucket_name, file_name) return (file_name, duration, speed, size) speed_sum = 0 size_sum = 0 group = pool.Group() for _, _, speed, size in group.imap_unordered(func, file_names): speed_sum += speed size_sum += size group.join() logger.info("uploaded %sMiB, total speed: %.2fMiB/s", size_sum / MiB, speed_sum)
def __init__(self, app_settings): pool_size = app_settings.get('dispatcher_pool_size', 100) logging_enabled = app_settings.get('enable_logging', False) dispatcher = DispatcherEngine( d.MessageDispatcher(), enable_logging=logging_enabled, size=pool_size) super(DefaultKaarmeBotApp, self).__init__( network_client_provider=NetworkClientGreenlet, dispatcher=dispatcher, concurrency_manager=pool.Group(), message_parser=irc.parse_message, plugin_manager=pl.PluginManager(dispatcher), logger=logger, app_settings=app_settings)
def main(): tb = time.time() p = pool.Group() for n in xrange(100): pars = { 'host': '127.0.0.1', 'service': 'www%d' % n, 'state': 'down', 'metric_f': 10000 } c = p.spawn(_s, pars) p.join() print time.time() - tb
def test_service_creation_data_race(self): def get_client(): return self.cl def create_service(name): robot = get_client() data = {'ip': '127.0.0.1'} service = self.cl.services.create( 'github.com/threefoldtech/0-robot/node/0.0.1', name, data=data) print("service %s created", name) N = 200 group = pool.Group() expected_secrets = ["greenlet_%d" % i for i in range(N)] group.map(create_service, expected_secrets) group.join() robot = get_client() assert len(robot.services.find()) == N
def test_kill_fires_once(self): u1 = Undead() u2 = Undead() p1 = gevent.spawn(u1) p2 = gevent.spawn(u2) def check(count1, count2): assert p1, p1 assert p2, p2 assert not p1.dead, p1 assert not p2.dead, p2 self.assertEqual(u1.shot_count, count1) self.assertEqual(u2.shot_count, count2) gevent.sleep(0.01) s = pool.Group([p1, p2]) assert len(s) == 2, s check(0, 0) s.killone(p1, block=False) check(0, 0) gevent.sleep(0) check(1, 0) s.killone(p1) check(1, 0) s.killone(p1) check(1, 0) s.kill(block=False) s.kill(block=False) s.kill(block=False) check(1, 0) gevent.sleep(DELAY) check(1, 1) X = object() kill_result = gevent.with_timeout(DELAY, s.kill, block=True, timeout_value=X) assert kill_result is X, repr(kill_result) assert len(s) == 2, s check(1, 1) p1.kill(SpecialError) p2.kill(SpecialError)
def __init__(self): self.current_idx = 0 self.client_list = {} self.connect_pool = pool.Group()
def create_connection( address, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, dns_timeout=None, source_address=None, use_happyeyeballs=True, prepare=None, ): _log.debug("create_connection %r", address) (host, port, *_) = address try: ipaddress.ip_address(host) use_happyeyeballs = False except ValueError: pass if not use_happyeyeballs: # TODO: a bit problematic we use socket's hidden timeout sentinel # as our default, but it hasn't changed for 12 years so we're probably # gonna be fine; maybe! return _create_connection(address, timeout=timeout, source_address=source_address, prepare=prepare) group = pool.Group() # TODO: OK, I'm gonna be honest: this system of greenlet orchestration # is really, uhh, let's just say not good; the proper way of implementing # this would be something like curio's TaskGroup: a Group that tracks the # completion states of its members # (0, (family, addr)) = success (gai) # (1, sock) = success (result) # (-1, (family, None, exc)) = fail (gai) # (-2, (family, addr, exc)) = fail (connect) bus = queue.Queue() def _do_gai(family, proto=0, flags=0): _log.debug("_do_gai: started family=%s, proto=%d, flags=%s", family, proto, flags) try: addrs = gevent.with_timeout( dns_timeout, socket.getaddrinfo, host, port, family, socket.SOCK_STREAM, proto, flags, ) _log.debug("_do_gai: finished family=%s, addrs=%r", family, addrs) while addrs: (*_, addr) = addrs.pop() bus.put((0, (family, addr))) except _Cancel: _log.debug("_do_gai: cancelled family=%s", family) except gevent.Timeout: bus.put((-1, (family, None, socket.gaierror(-errno.ETIMEDOUT, "Timed out")))) except Exception as e: bus.put((-1, (family, None, e))) dns_attempts = 2 group.apply_async(_do_gai, args=(socket.AF_INET6, 0, socket.AI_V4MAPPED)) group.apply_async(_do_gai, args=(socket.AF_INET, )) def _do_connect(family, addr): _log.debug("_do_connect: started family=%s, addr=%s", family, addr) # TODO: god I hate the flow of logic in this proc sock = socket.socket(family, socket.SOCK_STREAM) if source_address: sock.bind(source_address) if prepare: prepare(sock) try: sock.connect(addr) _log.debug( "_do_connect: finished family=%s, addr=%s, socket=%r", family, addr, sock, ) except _Cancel: _log.debug("_do_connect: cancelled family=%s, addr=%s", family, addr) except Exception as e: bus.put((-2, (family, addr, e))) except: sock.close() raise else: return bus.put((1, sock)) sock.close() do_later = queue.Queue() started_ipv6 = event.Event() def _laterlet(): try: stagger = started_ipv6.wait(timeout=RESOLVE_DELAY) if stagger: gevent.sleep(CONNECT_DELAY) for cb, args, kwds in do_later: group.apply_async(cb, args, kwds) except _Cancel: pass group.apply_async(_laterlet) if timeout is socket._GLOBAL_DEFAULT_TIMEOUT: timeout = None started = time.monotonic() conn_attempts = 0 errors = [] t = timeout try: while True: # TODO: technically this is not right: # we take dns query times into account as timeout # no good; though there's no better way of solving it # without restructuring the entire algo if t is not None: t = max(MIN_TIMEOUT, timeout - (time.monotonic() - started)) op, rest = bus.get(timeout=t) _log.debug("bus get op %d with payload %s", op, rest) _log.debug("error states = %r", errors) if op == 1: return rest elif op == -1: errors.append(rest) dns_attempts -= 1 if dns_attempts <= 0: raise socket.error(errors) continue elif op == -2: errors.append(rest) conn_attempts -= 1 if conn_attempts <= 0: raise socket.error(errors) continue family, addr = rest conn_attempts += 1 if family == socket.AF_INET: do_later.put((_do_connect, (socket.AF_INET, addr), {})) else: started_ipv6.set() group.apply_async(_do_connect, (socket.AF_INET6, addr)) except queue.Empty: raise socket.timeout("timed out") finally: group.kill(_Cancel)
def __init__(self): NetworkClient.__init__(self) self.shutdown_event = event.Event() self.connection_group = pool.Group() self.socket = None self.lock = gevent.lock.RLock()
def test_killall_subclass(self): p1 = GreenletSubclass.spawn(lambda: 1 / 0) p2 = GreenletSubclass.spawn(lambda: gevent.sleep(10)) s = pool.Group([p1, p2]) s.kill()