async def read(reader: StreamReader, e: asyncio.Event): while not e.is_set(): got = await tcp_recv(reader, DELIMIT) print(f"recv >> {got}") if KILL in got: e.set()
class ProgressBarActor: counter: int delta: int event: Event def __init__(self) -> None: self.counter = 0 self.delta = 0 self.event = Event() def update(self, num_items_completed: int) -> None: """Updates the ProgressBar with the incremental number of items that were just completed.""" self.counter += num_items_completed self.delta += num_items_completed self.event.set() async def wait_for_update(self) -> Tuple[int, int]: """Blocking call. Waits until somebody calls `update`, then returns a tuple of the number of updates since the last call to `wait_for_update`, and the total number of completed items. """ await self.event.wait() self.event.clear() saved_delta = self.delta self.delta = 0 return saved_delta, self.counter def get_counter(self) -> int: """Returns the total number of complete items.""" return self.counter
async def test_pkg_loader_consolidates_concurrent_fetch(executor): loop = get_event_loop() pkg_ref, contents = load_some_bytes() evt1 = Event() evt2 = Event() class MockPackageService: def __init__(self): self.call_count = 0 async def get_package(self, package_id: "PackageRef") -> bytes: self.call_count += 1 if package_id != pkg_ref: raise Exception evt1.set() await evt2.wait() return contents async def list_package_ids(self) -> "AbstractSet[PackageRef]": # we don't expect this method to be called in the test raise Exception conn = MockPackageService() lookup = MultiPackageLookup() loader = PackageLoader(lookup, conn) # first, call the PackageLoader.load coroutine fut1 = ensure_future(loader.load(pkg_ref)) # wait until we are definitely in the MockPackageService.package_bytes call in one of # PackageLoader's background threads await loop.run_in_executor(executor, lambda: evt1.wait()) # now schedule a _second_ PackageLoader.load coroutine; because the first one is still in # progress, this should NOT result in a second call to # MockPackageService.package_bytes fut2 = ensure_future(loader.load(pkg_ref)) # allow coroutines some time to screw things up await sleep(0.1) # make sure that neither call to PackageLoader.load has actually come back yet assert not fut1.done() assert not fut2.done() # now unblock MockPackageService.package_bytes, which will return our bytes evt2.set() # grab the results from both async PackageLoader.load coroutine calls pkg1 = await fut1 pkg2 = await fut2 # we should have only called package_bytes once assert conn.call_count == 1 # the two Package objects that come back should be identical; creating Package objects are # expensive but they are also immutable, so the two calls should return the same instance # as an optimization assert pkg1 is pkg2
class StreamsLimit: def __init__(self, limit=None, *, loop): self._limit = limit self._current = 0 self._loop = loop self._release = Event(loop=loop) def reached(self): if self._limit is not None: return self._current >= self._limit else: return False async def wait(self): # TODO: use FIFO queue for waiters if self.reached(): self._release.clear() await self._release.wait() def acquire(self): self._current += 1 def release(self): self._current -= 1 if not self.reached(): self._release.set() def set(self, value: Optional[int]): assert value is None or value >= 0, value self._limit = value
class UpdateToken: def __init__(self, config_vk): self.log = Log("UpdateToken") self.config = config_vk self.client_id = self.config.client_id self.finished = Event() async def __call__(self) -> str: async with RedirectServer() as server: redirect_address = server.redirect_address url = "https://oauth.vk.com/authorize" \ f"?client_id={self.client_id}" \ "&display=page" \ f"&redirect_uri={redirect_address}" \ "&scope=friends,wall,offline,groups" \ "&response_type=token" \ "&v=5.103" webbrowser.open_new(url) data = await server() self.config.token = data['access_token'] self.config.user_id = data['user_id'] self.config.update() self.log.info("Token updated") self.finished.set()
async def test_will_wait_for_cancel_before_end(qtbot, task_runner): event_1 = Event() event_2 = Event() event_3 = Event() event_4 = Event() result = Result() def _task(): result.set(42) event_1.set() while not event_2.is_set(): pass result.set(24) event_3.set() task_runner.raiseForCancelled() while not event_4.is_set(): pass result.set(7) with qtbot.waitSignal(task_runner.taskCancelled, timeout=10000): task_runner.run_task(_task) await wait_for(event_1.wait(), 2) task_runner.cancel() event_2.set() await wait_for(event_3.wait(), 2) event_4.set() assert result.value == 24
async def producer(queue: asyncio.Queue, event: asyncio.Event) -> None: for i in itertools.cycle([0, 1, 0, 2, 0, 0, 3, 0, 0, 4]): print("Running producer...") if i: await queue.put(i) event.set() await asyncio.sleep(1)
class ProgressActor: def __init__(self): self.progress = {} self.event = Event() def update(self, key, delta=1): if key in self.progress: self.progress[key] += delta self.event.set() else: self.progress[key] = delta self.progress = { key: self.progress[key] for key in sorted(self.progress.keys()) } self.event.set() def update_multi(self, keys, delta=1): for key in keys: self.update(key, delta) async def wait_for_update(self): await self.event.wait() self.event.clear() return self.progress
class Garage: def __init__(self, sledge: Sledge): self._sledge = sledge self._lock = Lock() self._new_sledge = Event() @property def sledges(self) -> Iterable[Sledge]: """All sledges of the garage""" yield self._sledge async def wait_new_sledge(self): """Wait for a "new" sledge coming back from delivery""" await self._new_sledge.wait() @asynccontextmanager async def use_sledge(self) -> AsyncIterator[Sledge]: """Use a sledge of the garage""" async with self._lock: yield self._sledge async def deliver(self): """Send a sledge in delivery""" async with self.use_sledge() as sledge: try: await sledge.deliver() self._new_sledge.set() self._new_sledge.clear() except (ReindeerHungry, SledgeEmpty): pass
class CPUThrottle: def __init__(self, max_cpus): self._max_cpus = max_cpus self._cpus_allocated = 0 self._cpus_allocated_changed = Event() @property def cpus_allocated(self): return self._cpus_allocated @cpus_allocated.setter def cpus_allocated(self, v): self._cpus_allocated = v self._cpus_allocated_changed.set() @asynccontextmanager async def jobs_limit(self, cpus): while self.cpus_allocated + cpus > self._max_cpus: log.info( f"CPU allocation exhausted, allocated {self.cpus_allocated}/{self._max_cpus}" ) await self._cpus_allocated_changed.wait() self._cpus_allocated_changed.clear() try: self.cpus_allocated += cpus log.info( f"allocated {cpus}, total allocation {self.cpus_allocated}") yield finally: # make sure we count down, # event if the job failed or was cancelled self.cpus_allocated -= cpus
class Connection: """ Holds connection state (write_ready), and manages H2Connection <-> Transport communication """ def __init__(self, connection: H2Connection, transport: Transport, *, loop: AbstractEventLoop) -> None: self._connection = connection self._transport = transport self._loop = loop self.write_ready = Event(loop=self._loop) self.write_ready.set() self.outbound_streams_limit = StreamsLimit(loop=self._loop) def feed(self, data): return self._connection.receive_data(data) def pause_writing(self): self.write_ready.clear() def resume_writing(self): self.write_ready.set() def create_stream(self, stream_id=None): return Stream(self, self._connection, self._transport, stream_id, loop=self._loop) def flush(self): self._transport.write(self._connection.data_to_send()) def close(self): self._transport.close()
class EventLock(Lock): """ This lock contains asyncio.Event instance which is automatically set and 'broadcast' when on release. """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.has_unlocked = Event() # ==========================================================================dd== # PUBLIC METHODS # ==========================================================================dd== async def acquire(self) -> bool: locked = await super().acquire() if locked: self.has_unlocked.clear() return locked def release(self): super().release() self.has_unlocked.set() async def is_unlocked_or_wait(self): if self.locked(): await self.has_unlocked.wait()
class PricesManager(Initializable): MARK_PRICE_TIMEOUT = 60 def __init__(self): super().__init__() self.logger = get_logger(self.__class__.__name__) self.mark_price = 0 self.prices_initialized_event = Event() async def initialize_impl(self): self.__reset_prices() def set_mark_price(self, mark_price): self.mark_price = mark_price self.prices_initialized_event.set() async def get_mark_price(self, timeout=MARK_PRICE_TIMEOUT): await wait_for(self.prices_initialized_event.wait(), timeout) return self.mark_price def __reset_prices(self): self.mark_price = 0 @staticmethod def calculate_mark_price_from_recent_trade_prices(recent_trade_prices): return sum(recent_trade_prices) / len(recent_trade_prices)
class PricesManager(Initializable): MARK_PRICE_TIMEOUT = 60 def __init__(self): super().__init__() self.logger = get_logger(self.__class__.__name__) self.mark_price = 0 # warning: should only be created in the async loop thread self.prices_initialized_event = Event() async def initialize_impl(self): self.__reset_prices() def set_mark_price(self, mark_price): self.mark_price = mark_price self.prices_initialized_event.set() async def get_mark_price(self, timeout=MARK_PRICE_TIMEOUT): if not self.prices_initialized_event.is_set(): await wait_for(self.prices_initialized_event.wait(), timeout) return self.mark_price def __reset_prices(self): self.mark_price = 0
async def _discover_refresh_stream(self, event: Event) -> None: """Discovers or refreshes a discovered CrowdStrike Falcon stream in a loop. Sleeps for 25 minutes (expiry time is 30 minutes) between operations. Args: event (Event): Asynchronous event object to set or clear its internal flag. Yields: Iterator[Dict]: Event fetched from the stream. """ client = Client(base_url=self.base_url, app_id=self.app_id, verify_ssl=self.verify_ssl, proxy=self.proxy) while True: if client.refresh_stream_url: # We already discovered an event stream, need to refresh it demisto.debug('Starting stream refresh') client.refresh_stream_session() demisto.debug('Finished stream refresh') else: # We have no event stream, need to discover await client.set_access_token(self.refresh_token) demisto.debug('Starting stream discovery') discover_stream_response = client.discover_stream() demisto.debug('Finished stream discovery') resources = discover_stream_response.get('resources', []) if not resources: raise ValueError(f'Did not get event stream resources - {str(discover_stream_response)}') resource = resources[0] self.data_feed_url = resource.get('dataFeedURL') self.session_token = resource.get('sessionToken', {}).get('token') refresh_url = resource.get('refreshActiveSessionURL') client.refresh_stream_url = refresh_url event.set() await sleep(MINUTES_25) event.clear()
class Restarter(Startable): @inject def __init__(self, server: UiServer, config: Config): self._server = server self._config = config self._old_options = config.getServerOptions() self._restarted = Event() async def start(self): self._config.subscribe(self.trigger) async def check(self): if self._old_options == self._config.getServerOptions(): # no restart is necessary because the server didn't change return self._old_options = self._config.getServerOptions() try: # Restart the server logger.info("Restarting Web-UI server") await self._server.run() self._restarted.set() except Exception as e: logger.error("Problem while restarting the Web-UI server " + logger.formatException(e)) def trigger(self): create_task(self.check(), name="Web-UI Restarter") async def waitForRestart(self): await self._restarted.wait() self._restarted.clear()
async def send_task( s_sender: asyncio.StreamWriter, q: asyncio.Queue, e: asyncio.Event, delimiter: bytes, timeout=None, ): print("[SEND][INFO] Started") try: while True: try: n = await asyncio.wait_for(q.get(), timeout) q.task_done() except asyncio.TimeoutError: if e.is_set(): print(SharedData.bold("[SEND][INFO] Event set!")) return else: try: await tcp_send(n, s_sender, delimiter, timeout) except asyncio.TimeoutError: # really just want to use logging and dump logs in other thread.. print(SharedData.red("[Send][CRIT] Connection Broken!")) break except Exception: print(SharedData.bold("[SEND][CRIT] Stopping SEND!")) e.set() raise
class Buffer: def __init__(self, stream_id, connection, h2_connection, *, loop: AbstractEventLoop) -> None: self._stream_id = stream_id self._connection = connection self._h2_connection = h2_connection self._chunks = [] # type: List[bytes] self._size = 0 self._read_size = None self._ready_event = Event(loop=loop) self._eof = False def _ack(self, size): if size: self._h2_connection.acknowledge_received_data( size, self._stream_id) self._connection.flush() def append(self, data): size = len(data) self._chunks.append(data) self._size += size if self._read_size is not None: self._ack(min(max(size - self._size + self._read_size, 0), size)) if self._size >= self._read_size: self._ready_event.set() def eof(self): self._eof = True self._ready_event.set() async def read(self, size): if size < 0: raise ValueError('Size can not be negative') elif size == 0: return b'' else: if self._size < size and not self._eof: self._read_size = size self._ready_event.clear() self._ack(self._size) await self._ready_event.wait() self._read_size = None elif self._size >= size: self._ack(size) else: assert self._eof self._ack(self._size) data, self._chunks = _slice(self._chunks, size) data_bytes = b''.join(data) data_size = len(data_bytes) self._size -= data_size if 0 < data_size < size: # TODO: proper exception raise Exception('Incomplete data, {} instead of {}'.format( data_size, size)) return data_bytes
async def recv_task( s_receiver: asyncio.StreamReader, q: asyncio.Queue, e: asyncio.Event, delimiter: bytes, timeout=None, ): print("[RECV][INFO] Started") try: while True: try: data = await tcp_recv(s_receiver, delimiter, timeout) except asyncio.TimeoutError: print('[RECV][WARN] TIMEOUT') if e.is_set(): print(SharedData.bold(f"[RECV][INFO] Event set!")) return except asyncio.IncompleteReadError: print(SharedData.red(f"[RECV][CRIT] Disconnected!")) e.set() return else: await q.put(data) except Exception: print(SharedData.bold("[RECV][CRIT] Stopping SEND!")) e.set() raise
class RPC: def __init__(self, rpc_id, socket: WebSocketResponse, command_name: str, args: list = None): self.id = rpc_id self.command_name = command_name self.args = args if args is not None else [] self.return_data = None self._response_event = Event() self._resolved = False self._socket = socket async def __call__(self): """This function calls the RPC, sending the json to the bike, and waits for the response event.""" await self._socket.send_json({ "jsonrpc": "2.0", "id": self.id, "method": self.command_name, "params": self.args, }) await self._response_event.wait() return self.return_data async def resolve(self, return_data): """Resolves the RPC, setting its return data and giving control back to the caller.""" if self._resolved: raise ValueError("This RPC has already been resolved.") self._resolved = True self.return_data = return_data self._response_event.set()
async def _do_mock_response(request_type: Type[BaseEvent], response: BaseEvent, event_bus: EndpointAPI, ready: asyncio.Event) -> None: ready.set() async for req in event_bus.stream(request_type): await event_bus.broadcast(response, req.broadcast_config()) break
class Connection: """ Holds connection state (write_ready), and manages H2Connection <-> Transport communication """ def __init__( self, connection: H2Connection, transport: Transport, *, loop: AbstractEventLoop, ) -> None: self._connection = connection self._transport = transport self._loop = loop self.write_ready = Event(loop=self._loop) self.write_ready.set() self.stream_close_waiter = Event(loop=self._loop) def feed(self, data: bytes) -> List[H2Event]: return self._connection.receive_data(data) # type: ignore def ack(self, stream_id: int, size: int) -> None: if size: self._connection.acknowledge_received_data(size, stream_id) self.flush() def pause_writing(self) -> None: self.write_ready.clear() def resume_writing(self) -> None: self.write_ready.set() def create_stream( self, *, stream_id: Optional[int] = None, wrapper: Optional[Wrapper] = None, ) -> 'Stream': return Stream(self, self._connection, self._transport, loop=self._loop, stream_id=stream_id, wrapper=wrapper) def flush(self) -> None: data = self._connection.data_to_send() if data: self._transport.write(data) def close(self) -> None: if hasattr(self, '_transport'): self._transport.close() # remove cyclic references to improve memory usage del self._transport if hasattr(self._connection, '_frame_dispatch_table'): del self._connection._frame_dispatch_table
class Poster: def __init__(self, term, x, y, duration=2, content='', color='white'): self.term = term self.duration = duration self.x, self.y = x, y self.color = color self.content = content self.event = Event() self.event.set() async def display(self, msg): await self.event.wait() self.event.clear() self.content = msg self.redraw() await sleep(self.duration) self.event.set() def redraw(self): if not self.content or len(self.content.strip()) == 0: return with self.term.hidden_cursor(), self.term.location(*self.get_pos()): echo(use_color(self.term, self.color, f' {self.content} ')) def get_pos(self): width = len(self.content) + 2 x = self.x if self.x >= 0 else self.term.width + self.x - width y = self.y if self.y >= 0 else self.term.height + self.y return x, y
async def listen_notify( self, event: asyncio.Event, channels: Iterable[str] ) -> None: pool = await self._get_pool() # We need to acquire a dedicated connection, and use the listen # query if pool.maxsize == 1: logger.warning( "Listen/Notify capabilities disabled because maximum pool size" "is set to 1", extra={"action": "listen_notify_disabled"}, ) return while True: async with pool.acquire() as connection: for channel_name in channels: await self._execute_query_connection( connection=connection, query=self._make_dynamic_query( query=sql.queries["listen_queue"], channel_name=channel_name ), ) # Initial set() lets caller know that we're ready to listen event.set() await self._loop_notify(event=event, connection=connection)
async def serve(self, shutdown_event): '''Start the RPC server and wait for the mempool to synchronize. Then start serving external clients. ''' if not (0, 7, 1) <= aiorpcx_version < (0, 8): raise RuntimeError('aiorpcX version 0.7.x required with x >= 1') env = self.env min_str, max_str = env.coin.SESSIONCLS.protocol_min_max_strings() self.logger.info(f'software version: {electrumx.version}') self.logger.info(f'aiorpcX version: {version_string(aiorpcx_version)}') self.logger.info(f'supported protocol versions: {min_str}-{max_str}') self.logger.info(f'event loop policy: {env.loop_policy}') self.logger.info(f'reorg limit is {env.reorg_limit:,d} blocks') notifications = Notifications() daemon = env.coin.DAEMON(env) BlockProcessor = env.coin.BLOCK_PROCESSOR bp = BlockProcessor(env, daemon, notifications) mempool = MemPool(env.coin, daemon, notifications, bp.lookup_utxos) chain_state = ChainState(env, daemon, bp) session_mgr = SessionManager(env, chain_state, mempool, notifications, shutdown_event) caught_up_event = Event() serve_externally_event = Event() synchronized_event = Event() async with TaskGroup() as group: await group.spawn(session_mgr.serve(serve_externally_event)) await group.spawn(bp.fetch_and_process_blocks(caught_up_event)) await caught_up_event.wait() await group.spawn(mempool.keep_synchronized(synchronized_event)) await synchronized_event.wait() serve_externally_event.set()
async def test_can_queue_tasks(qtbot, task_runner): event_0 = Event() event_1 = Event() result_1 = Result() event_2 = Event() result_2 = Result() def _task(start_event): def _(): while not start_event.is_set(): pass return 42 return _ handler_1 = _handler(event_1, result_1) handler_2 = _handler(event_2, result_2) with qtbot.waitSignal(task_runner.taskCompleted, timeout=10000): task_runner.run_task(_task(event_0), on_completed=handler_1) task_runner.run_task(_task(event_1), on_completed=handler_2) event_0.set() with qtbot.waitSignal(task_runner.taskCompleted, timeout=10000): pass await wait_for(event_2.wait(), 2) assert result_1.value == 42 assert result_2.value == 42
async def subscribe(self, block_height, event: Event): self.__exception = None try: # set websocket payload maxsize to 4MB. async with websockets.connect( self.__target_uri, max_size=4 * conf.MAX_TX_SIZE_IN_BLOCK) as websocket: event.set() logging.debug(f"Websocket connection is Completed.") request = Request("node_ws_Subscribe", height=block_height, peer_id=ChannelProperty().peer_id) await websocket.send(json.dumps(request)) await self.__subscribe_loop(websocket) except InvalidStatusCode as e: if not self.__tried_with_old_uri: await self.try_subscribe_to_past_uri(block_height, event) return logging.warning( f"websocket subscribe {type(e)} exception, caused by: {e}\n" f"This target({self.__rs_target}) may not support websocket yet." ) raise NotImplementedError except Exception as e: traceback.print_exc() logging.error( f"websocket subscribe exception, caused by: {type(e)}, {e}") raise ConnectionError
class TimeTicker: """An event source which supports queue based notification""" def __init__(self) -> None: self.shutdown_event = Event() self.listeners: List[Queue] = [] async def start(self) -> None: """Start generating events""" while not self.shutdown_event.is_set(): now = datetime.now() for listener in self.listeners: await listener.put(now) try: await asyncio.wait_for(self.shutdown_event.wait(), timeout=1) except asyncio.TimeoutError: pass except: # pylint: disable=bare-except LOGGER.exception('Cancelled') def stop(self): """Stop the event source""" self.shutdown_event.set() def add_listener(self) -> Queue: """Add a listener to the event source""" LOGGER.debug('Adding a listener') listener: Queue = Queue() self.listeners.append(listener) return listener def remove_listener(self, listener: Queue) -> None: """Remove a listener from the event source""" self.listeners.remove(listener)
async def _websocket_loop(self, start_event: asyncio.Event) -> None: _LOGGER.debug("Connecting WS to %s", self.url) self._headers = await self._auth(self._should_reset_auth) session = self._get_session() # catch any and all errors for Websocket so we can clean up correctly try: self._ws_connection = await session.ws_connect( self.url, ssl=self.verify, headers=self._headers) start_event.set() await self._reset_timeout() async for msg in self._ws_connection: if not self._process_message(msg): break await self._reset_timeout() except ClientError as e: _LOGGER.exception("Websocket disconnect error: %s", e) finally: _LOGGER.debug("Websocket disconnected") self._increase_failure() self._cancel_timeout() if self._ws_connection is not None and not self._ws_connection.closed: await self._ws_connection.close() if not session.closed: await session.close() self._ws_connection = None # make sure event does not timeout start_event.set()
class StreamLog: def __init__(self): self.stream = [] self.written = Event() self.open = True def __aiter__(self): return self.retrieve() def put(self, **data): # logger.debug('[%s]: %s', channel, line.replace('\n', '')) if not self.open: raise EOFError("StreamLog is closed") if 'time' not in data: data['time'] = datetime.utcnow() self.stream.append(dict(**data)) self.written.set() self.written.clear() def close(self): self.open = False self.written.set() def retrieve_partial(self): return list(self.stream) async def retrieve(self): for record in self.stream: yield record while self.open: last = len(self.stream) await self.written.wait() for record in self.stream[last:]: yield record
class CompoundQueue(GeneratorQueue): stop_event = None ready = None loop = None queues = None def __init__(self, queues, loop): self.ready = Event(loop=loop) self.stop_event = Event(loop=loop) self.queues = queues self.loop = loop async def start(self): if self.stop_event.is_set(): raise QueueError("Socket already stopped.") await self.do_action("start") self.ready.set() @dies_on_stop_event async def get(self): raise NotImplementedError() @dies_on_stop_event async def put(self, data): await self.setup() await self.ready.wait() await self.do_action("put", (data,)) async def setup(self): """Setup the client.""" if not self.ready.is_set(): await self.start() async def stop(self): """Stop queue.""" self.ready.clear() self.stop_event.set() await self.do_action("stop") async def do_action(self, name, args=()): coroutines = [getattr(i, name) for i in self.queues] tasks = [i(*args) for i in coroutines] await wait(tasks, loop=self.loop)
def _perform_heartbeat_loop(self): if self._heartbeat_call is not None: # TODO: cancel call pass cancellation_event = Event() state_payload = self._subscription_state.state_payload() presence_channels = self._subscription_state.prepare_channel_list(False) presence_groups = self._subscription_state.prepare_channel_group_list(False) if len(presence_channels) == 0 and len(presence_groups) == 0: return try: heartbeat_call = (Heartbeat(self._pubnub) .channels(presence_channels) .channel_groups(presence_groups) .state(state_payload) .cancellation_event(cancellation_event) .future()) envelope = yield from heartbeat_call heartbeat_verbosity = self._pubnub.config.heartbeat_notification_options if envelope.status.is_error: if heartbeat_verbosity == PNHeartbeatNotificationOptions.ALL or \ heartbeat_verbosity == PNHeartbeatNotificationOptions.ALL: self._listener_manager.announce_stateus(envelope.status) else: if heartbeat_verbosity == PNHeartbeatNotificationOptions.ALL: self._listener_manager.announce_stateus(envelope.status) except PubNubAsyncioException as e: pass # TODO: check correctness # if e.status is not None and e.status.category == PNStatusCategory.PNTimeoutCategory: # self._start_subscribe_loop() # else: # self._listener_manager.announce_status(e.status) finally: cancellation_event.set()
class SubscribeListener(SubscribeCallback): def __init__(self): self.connected = False self.connected_event = Event() self.disconnected_event = Event() self.presence_queue = Queue() self.message_queue = Queue() self.error_queue = Queue() def status(self, pubnub, status): if utils.is_subscribed_event(status) and not self.connected_event.is_set(): self.connected_event.set() elif utils.is_unsubscribed_event(status) and not self.disconnected_event.is_set(): self.disconnected_event.set() elif status.is_error(): self.error_queue.put_nowait(status.error_data.exception) def message(self, pubnub, message): self.message_queue.put_nowait(message) def presence(self, pubnub, presence): self.presence_queue.put_nowait(presence) @asyncio.coroutine def _wait_for(self, coro): scc_task = asyncio.ensure_future(coro) err_task = asyncio.ensure_future(self.error_queue.get()) yield from asyncio.wait([ scc_task, err_task ], return_when=asyncio.FIRST_COMPLETED) if err_task.done() and not scc_task.done(): if not scc_task.cancelled(): scc_task.cancel() raise err_task.result() else: if not err_task.cancelled(): err_task.cancel() return scc_task.result() @asyncio.coroutine def wait_for_connect(self): if not self.connected_event.is_set(): yield from self._wait_for(self.connected_event.wait()) else: raise Exception("instance is already connected") @asyncio.coroutine def wait_for_disconnect(self): if not self.disconnected_event.is_set(): yield from self._wait_for(self.disconnected_event.wait()) else: raise Exception("instance is already disconnected") @asyncio.coroutine def wait_for_message_on(self, *channel_names): channel_names = list(channel_names) while True: try: env = yield from self._wait_for(self.message_queue.get()) if env.channel in channel_names: return env else: continue finally: self.message_queue.task_done() @asyncio.coroutine def wait_for_presence_on(self, *channel_names): channel_names = list(channel_names) while True: try: env = yield from self._wait_for(self.presence_queue.get()) if env.channel in channel_names: return env else: continue finally: self.presence_queue.task_done()
class Actor(object): """ Main actor model. Args: inbox (GeneratorQueue): Inbox to consume from. outbox (GeneratorQueue): Outbox to publish to. loop (GeneratorQueue): Event loop. """ running = False _force_stop = False def __init__(self, inbox, outbox, loop=None): self.inbox = inbox self.outbox = outbox if not loop: loop = get_event_loop() self._loop = loop self._pause_lock = Lock(loop=self._loop) self._stop_event = Event(loop=self._loop) self._test = None self.__testy = None self.on_init() @property def paused(self): """Indicate if actor is paused.""" return self._pause_lock.locked() async def start(self): """Main public entry point to start the actor.""" await self.initialize() await self._start() async def initialize(self): """Initialize the actor before starting.""" await self.on_start() if self._force_stop: return if not self.running: self.running = True async def _start(self): """Run the event loop and force the on_stop event.""" try: await self._run() finally: await self.on_stop() async def resume(self): """Resume the actor.""" await self.on_resume() self._pause_lock.release() async def pause(self): """Pause the actor.""" await self._pause_lock.acquire() await self.on_pause() async def _block_if_paused(self): """Block on the pause lock.""" if self.paused: await self._pause_lock.acquire() await self._pause_lock.release() async def _run(self): """Main event loop.""" while self.running: await self._block_if_paused() await self._process() async def publish(self, data): """Push data to the outbox.""" await self.outbox.put(data) async def stop(self): """Stop the actor.""" self.inbox = None self.outbox = None self.running = False self._force_stop = True self._stop_event.set() try: self._pause_lock.release() except RuntimeError: pass async def _process(self): """Process incoming messages.""" if not self.inbox: return pending = {self.inbox.get(), self._stop_event.wait()} result = await get_first_completed(pending, self._loop) if self.running: await self.on_message(result) async def on_message(self, data): """Called when the actor receives a message.""" raise NotImplementedError def on_init(self): """Called after the actor class is instantiated.""" pass async def on_start(self): """Called before the actor starts ingesting the inbox.""" pass async def on_stop(self): """Called after actor dies.""" pass async def on_pause(self): """Called before the actor is paused.""" pass async def on_resume(self): """Called before the actor is resumed.""" pass
class Channel(object): """ A Channel is a closable queue. A Channel is considered "finished" when it is closed and drained (unlike a queue which is "finished" when the queue is empty) """ def __init__(self, maxsize=0, *, loop=None): if loop is None: self._loop = get_event_loop() else: self._loop = loop if not isinstance(maxsize, int) or maxsize < 0: raise TypeError("maxsize must be an integer >= 0 (default is 0)") self._maxsize = maxsize # Futures. self._getters = deque() self._putters = deque() # "finished" means channel is closed and drained self._finished = Event(loop=self._loop) self._close = Event(loop=self._loop) self._init() def _init(self): self._queue = deque() def _get(self): return self._queue.popleft() def _put(self, item): self._queue.append(item) def _wakeup_next(self, waiters): # Wake up the next waiter (if any) that isn't cancelled. while waiters: waiter = waiters.popleft() if not waiter.done(): waiter.set_result(None) break def __repr__(self): return '<{} at {:#x} maxsize={!r} qsize={!r}>'.format( type(self).__name__, id(self), self._maxsize, self.qsize()) def __str__(self): return '<{} maxsize={!r} qsize={!r}>'.format( type(self).__name__, self._maxsize, self.qsize()) def qsize(self): """Number of items in the channel buffer.""" return len(self._queue) @property def maxsize(self): """Number of items allowed in the channel buffer.""" return self._maxsize def empty(self): """Return True if the channel is empty, False otherwise.""" return not self._queue def full(self): """Return True if there are maxsize items in the channel. Note: if the Channel was initialized with maxsize=0 (the default), then full() is never True. """ if self._maxsize <= 0: return False else: return self.qsize() >= self._maxsize @coroutine def put(self, item): """Put an item into the channel. If the channel is full, wait until a free slot is available before adding item. If the channel is closed or closing, raise ChannelClosed. This method is a coroutine. """ while self.full() and not self._close.is_set(): putter = Future(loop=self._loop) self._putters.append(putter) try: yield from putter except ChannelClosed: raise except: putter.cancel() # Just in case putter is not done yet. if not self.full() and not putter.cancelled(): # We were woken up by get_nowait(), but can't take # the call. Wake up the next in line. self._wakeup_next(self._putters) raise return self.put_nowait(item) def put_nowait(self, item): """Put an item into the channel without blocking. If no free slot is immediately available, raise ChannelFull. """ if self.full(): raise ChannelFull if self._close.is_set(): raise ChannelClosed self._put(item) self._wakeup_next(self._getters) @coroutine def get(self): """Remove and return an item from the channel. If channel is empty, wait until an item is available. This method is a coroutine. """ while self.empty() and not self._close.is_set(): getter = Future(loop=self._loop) self._getters.append(getter) try: yield from getter except ChannelClosed: raise except: getter.cancel() # Just in case getter is not done yet. if not self.empty() and not getter.cancelled(): # We were woken up by put_nowait(), but can't take # the call. Wake up the next in line. self._wakeup_next(self._getters) raise return self.get_nowait() def get_nowait(self): """Remove and return an item from the channel. Return an item if one is immediately available, else raise ChannelEmpty. """ if self.empty(): if self._close.is_set(): raise ChannelClosed else: raise ChannelEmpty item = self._get() if self.empty() and self._close.is_set(): # if empty _after_ we retrieved an item AND marked for closing, # set the finished flag self._finished.set() self._wakeup_next(self._putters) return item @coroutine def join(self): """Block until channel is closed and channel is drained """ yield from self._finished.wait() def close(self): """Marks the channel is closed and throw a ChannelClosed in all pending putters""" self._close.set() # cancel putters for putter in self._putters: putter.set_exception(ChannelClosed()) # cancel getters that can't ever return (as no more items can be added) while len(self._getters) > self.qsize(): getter = self._getters.pop() getter.set_exception(ChannelClosed()) if self.empty(): # already empty, mark as finished self._finished.set() def closed(self): """Returns True if the Channel is marked as closed""" return self._close.is_set() @coroutine def __aiter__(self): # pragma: no cover """Returns an async iterator (self)""" return self @coroutine def __anext__(self): # pragma: no cover try: data = yield from self.get() except ChannelClosed: raise StopAsyncIteration else: return data def __iter__(self): return iter(self._queue)
class AsyncConnectionPool: """Object manages asynchronous connections. :param int size: size (number of connection) of the pool. :param float queue_timeout: time out when client is waiting connection from pool :param loop: event loop, if not passed then default will be used :param config: MySql connection config see `doc. <http://dev.mysql.com/doc/connector-python/en/connector-python-connectargs.html>`_ :raise ValueError: if the `size` is inappropriate """ def __init__(self, size=1, queue_timeout=15.0, *, loop=None, **config): assert size > 0, 'DBPool.size must be greater than 0' if size < 1: raise ValueError('DBPool.size is less than 1, ' 'connections won"t be established') self._pool = set() self._busy_items = set() self._size = size self._pending_futures = deque() self._queue_timeout = queue_timeout self._loop = loop or asyncio.get_event_loop() self.config = config self._shutdown_event = Event(loop=self._loop) self._shutdown_event.set() @property def queue_timeout(self): """Number of seconds to wait a connection from the pool, before TimeoutError occurred :rtype: float """ return self._queue_timeout @queue_timeout.setter def queue_timeout(self, value): """Sets a timeout for :attr:`queue_timeout` :param float value: number of seconds """ if not isinstance(value, (float, int)): raise ValueError('Float or integer type expected') self._queue_timeout = value @property def size(self): """Size of pool :rtype: int """ return self._size def __len__(self): """Number of allocated pool's slots :rtype: int """ return len(self._pool) @property def free_count(self): """Number of free pool's slots :rtype: int """ return self.size - len(self._busy_items) @asyncio.coroutine def get(self): """Coroutine. Returns an opened connection from pool. If coroutine invoked when all connections have been issued, then caller will blocked until some connection will be released. Also, the class provides context manager for getting connection and automatically freeing it. Example: >>> with (yield from pool) as cnx: >>> ... :rtype: AsyncMySQLConnection :raise: concurrent.futures.TimeoutError() """ cnx = None yield from self._shutdown_event.wait() for free_client in self._pool - self._busy_items: cnx = free_client self._busy_items.add(cnx) break else: if len(self) < self.size: cnx = AsyncMySQLConnection(loop=self._loop) self._pool.add(cnx) self._busy_items.add(cnx) try: yield from cnx.connect(**self.config) except: self._pool.remove(cnx) self._busy_items.remove(cnx) raise if not cnx: queue_future = Future(loop=self._loop) self._pending_futures.append(queue_future) try: cnx = yield from asyncio.wait_for(queue_future, self.queue_timeout, loop=self._loop) self._busy_items.add(cnx) except TimeoutError: raise TimeoutError('Database pool is busy') finally: try: self._pending_futures.remove(queue_future) except ValueError: pass return cnx def release(self, connection): """Frees connection. After that the connection can be issued by :func:`get`. :param AsyncMySQLConnection connection: a connection received from :func:`get` """ if len(self._pending_futures): f = self._pending_futures.popleft() f.set_result(connection) else: self._busy_items.remove(connection) @asyncio.coroutine def shutdown(self): """Coroutine. Closes all connections and purge queue of a waiting for connection. """ self._shutdown_event.clear() try: for cnx in self._pool: yield from cnx.disconnect() for f in self._pending_futures: f.cancel() self._pending_futures.clear() self._pool = set() self._busy_items = set() finally: self._shutdown_event.set() def __enter__(self): raise RuntimeError( '"yield from" should be used as context manager expression') def __exit__(self, *args): # This must exist because __enter__ exists, even though that # always raises; that's how the with-statement works. pass @asyncio.coroutine def __iter__(self): cnx = yield from self.get() return ContextManager(self, cnx)