Ejemplo n.º 1
0
async def fire_event(condition: Condition):
    while True:
        await asyncio.sleep(5)
        print('About to notify, acquiring condition lock...')
        async with condition:
            print('Lock acquired, notifying all workers.')
            condition.notify_all()
        print('Notification finished, releasing lock.')
Ejemplo n.º 2
0
class AsyncIteratorPool(collabc.AsyncGenerator, AsyncPool[T]):
    """An asynchronous pool that wraps another async iterator."""
    def __init__(self, base: AsyncIterator[T]):
        self._base = base
        self._basegen = isinstance(base, collabc.AsyncGenerator)
        self._yl = deque()
        self._rl = deque()
        self._ac = None
        self._stopped = False

    async def asend(self, value: OpT) -> OpT:
        if self._stopped: raise StopAsyncIteration
        if self._ac is None: self._ac = ACondition()
        if value is None:
            async with self._ac:
                if len(self._rl) == 0:
                    try:
                        if self._basegen:
                            yv = await self._base.asend(None)
                        else:
                            yv = await self._base.__anext__()
                        self._yl.append(yv)
                        return yv
                    except (Exception, GeneratorExit) as exc:
                        self._stopped = True
                        self._ac.notify_all()
                        raise StopAsyncIteration from exc
                else:
                    yv = self._rl.popleft()
                    self._yl.append(yv)
                    return yv
        else:
            async with self._ac:
                if value in self._yl:
                    self._yl.remove(value)
                    self._rl.append(value)

    async def athrow(self, typ, val=None, tb=None):
        try:
            if self._basegen:
                return await self._base.athrow(typ, val, tb)
            else:
                return await super().athrow(typ, val, tb)
        except (Exception, GeneratorExit) as exc:
            self._stopped = True
            if self._ac is not None:
                await self._ac.acquire()
                self._ac.notify_all()
                self._ac.release()
            raise StopAsyncIteration from exc
Ejemplo n.º 3
0
class SetBasedAsyncPopPool(collabc.AsyncGenerator, AsyncPopulationPool[H]):
    """An asynchronous population pool backed by a set."""
    def __init__(self, ivals: Optional[AbstractSet[H]] = None):
        self._stopped = False
        self._set = Set[H]() if ivals is None else {ivals}
        self._ac = None
        self._stopped = False

    async def apopulate(self, val: H, *args: H) -> None:
        #Can't populate a closed pool:
        if self._stopped: raise StopAsyncIteration
        if self._ac is None: self._ac = ACondition()
        if args is None or len(args) == 0:
            self._set.add(val)
            count = 1
        else:
            argset = {args}
            argset.add(val)
            count = len(argset)
            self._set |= argset
        async with self._ac:
            self._ac.notify(count)

    async def asend(self, value: Optional[H]) -> Optional[H]:
        if self._stopped: raise StopAsyncIteration
        if self._ac is None: self._ac = ACondition()
        if value is None:
            async with self._ac:
                while len(self._set) == 0:
                    self._ac.wait()
                    if self._stopped: raise StopAsyncIteration
                return self._set.pop()
        else:
            if value not in self._set:
                self._set.add(value)
                await self._ac.acquire()
                self._ac.notify()
                self._ac.release()

    async def athrow(self, typ, val=None, tb=None) -> None:
        try:
            return await super().athrow(typ, val, tb)
        except (Exception, GeneratorExit) as exc:
            self._stopped = True
            if self._ac is not None:
                await self._ac.acquire()
                self._ac.notify_all()
                self._ac.release()
            raise StopAsyncIteration from exc
async def manipulate_condition(condition: asyncio.Condition):
    print("starting manipulate_condition")

    # consumer의 시작을 잠깐 지연시킨다
    await asyncio.sleep(0.1)

    for i in range(1, 3):
        async with condition:
            print(f"notifying {i} condumers")
            condition.notify(i)
        await asyncio.sleep(0.1)

    async with condition:
        print("notifying remaining consumers")
        condition.notify_all()
    print("ending manupulate_condition")
Ejemplo n.º 5
0
async def condition_chain(
    source: asyncio.Condition,
    target: asyncio.Condition,
) -> None:
    """
    A condition chain is a "clean" hack to attach one condition to another.

    It is a "clean" (not "dirty") hack to wake up the webhook configuration
    managers when either the resources are revised (as seen in the insights),
    or a new client config is yielded from the webhook server.
    """
    async with source:
        while True:
            await source.wait()
            async with target:
                target.notify_all()
Ejemplo n.º 6
0
async def main():
    cond = Condition()
    fs = list([workers(cond, i) for i in range(10)])
    workers(cond, 11)
    workers(cond, 12)

    await sleep(0.1)
    async with cond:
        for i in range(4):
            print('notify {} workers'.format(i))
            cond.notify(i)
            await sleep(0.1)

    async with cond:
        await sleep(0.5)
        print('notify all')
        cond.notify_all()

    await wait(fs)
Ejemplo n.º 7
0
async def worker(
    *,
    signaller: asyncio.Condition,
    processor: WatchStreamProcessor,
    settings: configuration.OperatorSettings,
    resource_indexed: Optional[
        aiotoggles.Toggle],  # None for tests & observation
    operator_indexed: Optional[
        aiotoggles.ToggleSet],  # None for tests & observation
    streams: Streams,
    key: ObjectRef,
) -> None:
    """
    The per-object workers consume the object's events and invoke the processors/handlers.

    The processor is expected to be an async coroutine, always the one from the framework.
    In fact, it is either a peering processor, which monitors the peer operators,
    or a generic resource processor, which internally calls the registered synchronous processors.

    The per-object worker is a time-limited task, which ends as soon as all the object's events
    have been handled. The watcher will spawn a new job when and if the new events arrive.

    To prevent the queue/job deletion and re-creation to happen too often, the jobs wait some
    reasonable, but small enough time (few seconds) before actually finishing --
    in case the new events are there, but the API or the watcher task lags a bit.
    """
    backlog = streams[key].backlog
    pressure = streams[key].pressure
    shouldstop = False
    try:
        while not shouldstop:

            # Try ASAP, but give it a few seconds for the new events to arrive.
            # If the queue is empty for some time, then finish the object's worker.
            # If the queue is filled, use the latest event only (within a short time window).
            # If an EOS marker is received, handle the last real event, then finish the worker ASAP.
            try:
                raw_event = await asyncio.wait_for(
                    backlog.get(), timeout=settings.batching.idle_timeout)
            except asyncio.TimeoutError:
                # A tricky part! Under high-load or with synchronous blocks of asyncio event-loop,
                # it is possible that the timeout happens while the queue is filled: depending on
                # the order in which the coros/waiters are checked once control returns to asyncio.
                # As a work-around, we double-check the queue and exit only if it is truly empty;
                # if not, run as normally. IMPORTANT: There MUST be NO async/await-code between
                # "break" and "finally", so that the queue is not populated again.
                # TODO: LATER: Test the described scenario. I have found no ways to simulate
                #  a timeout while the queue is filled -- neither with pure Python nor with mocks.
                if backlog.empty():
                    break
                else:
                    continue
            else:
                try:
                    while True:
                        prev_event = raw_event
                        next_event = await asyncio.wait_for(
                            backlog.get(),
                            timeout=settings.batching.batch_window)
                        shouldstop = shouldstop or isinstance(next_event, EOS)
                        raw_event = prev_event if isinstance(
                            next_event, EOS) else next_event
                except asyncio.TimeoutError:
                    pass

            # Exit gracefully and immediately on the end-of-stream marker sent by the watcher.
            if isinstance(raw_event, EOS):
                break

            # Try the processor. In case of errors, show the error, but continue the processing.
            pressure.clear()
            await processor(
                raw_event=raw_event,
                stream_pressure=pressure,
                resource_indexed=resource_indexed,
                operator_indexed=operator_indexed,
            )

    except Exception:
        # Log the error for every worker: there can be several of them failing at the same time,
        # but only one will trigger the watcher's failure -- others could be lost if not logged.
        logger.exception(
            f"Event processing has failed with an unrecoverable error for {key}."
        )
        raise

    finally:
        # Whether an exception or a break or a success, notify the caller, and garbage-collect our queue.
        # The queue must not be left in the queue-cache without a corresponding job handling this queue.
        try:
            del streams[key]
        except KeyError:
            pass

        # Notify the depletion routine about the changes in the workers'/streams' overall state.
        # * This should happen STRICTLY AFTER the removal from the streams[], and
        # * This should happen A MOMENT BEFORE the job ends (within the scheduler's close_timeout).
        async with signaller:
            signaller.notify_all()
class CommunicationTokenCredential(object):
    """Credential type used for authenticating to an Azure Communication service.
    :param str token: The token used to authenticate to an Azure Communication service
    :keyword token_refresher: The token refresher to provide capacity to fetch fresh token
    :raises: TypeError
    """

    _ON_DEMAND_REFRESHING_INTERVAL_MINUTES = 2

    def __init__(self,
                 token,  # type: str
                 **kwargs
                 ):
        token_refresher = kwargs.pop('token_refresher', None)
        communication_token_refresh_options = CommunicationTokenRefreshOptions(token=token,
                                                                               token_refresher=token_refresher)
        self._token = communication_token_refresh_options.get_token()
        self._token_refresher = communication_token_refresh_options.get_token_refresher()
        self._lock = Condition(Lock())
        self._some_thread_refreshing = False

    def get_token(self):
        # type () -> ~azure.core.credentials.AccessToken
        """The value of the configured token.
        :rtype: ~azure.core.credentials.AccessToken
        """

        if not self._token_refresher or not self._token_expiring():
            return self._token

        should_this_thread_refresh = False

        with self._lock:

            while self._token_expiring():
                if self._some_thread_refreshing:
                    if self._is_currenttoken_valid():
                        return self._token

                    self._wait_till_inprogress_thread_finish_refreshing()
                else:
                    should_this_thread_refresh = True
                    self._some_thread_refreshing = True
                    break


        if should_this_thread_refresh:
            try:
                newtoken = self._token_refresher()  # pylint:disable=not-callable

                with self._lock:
                    self._token = newtoken
                    self._some_thread_refreshing = False
                    self._lock.notify_all()
            except:
                with self._lock:
                    self._some_thread_refreshing = False
                    self._lock.notify_all()

                raise

        return self._token

    def _wait_till_inprogress_thread_finish_refreshing(self):
        self._lock.release()
        self._lock.acquire()

    def _token_expiring(self):
        return self._token.expires_on - self._get_utc_now_as_int() <\
               timedelta(minutes=self._ON_DEMAND_REFRESHING_INTERVAL_MINUTES).total_seconds()

    def _is_currenttoken_valid(self):
        return self._get_utc_now_as_int() < self._token.expires_on

    @classmethod
    def _get_utc_now_as_int(cls):
        current_utc_datetime = datetime.utcnow().replace(tzinfo=TZ_UTC)
        current_utc_datetime_as_int = _convert_datetime_to_utc_int(current_utc_datetime)
        return current_utc_datetime_as_int
class CommunicationTokenCredential(object):
    """Credential type used for authenticating to an Azure Communication service.
    :param str token: The token used to authenticate to an Azure Communication service
    :raises: TypeError
    """

    ON_DEMAND_REFRESHING_INTERVAL_MINUTES = 2

    def __init__(
            self,
            token,  # type: str
            token_refresher=None):
        # type: (str) -> None
        if not isinstance(token, six.string_types):
            raise TypeError("token must be a string.")
        self._token = create_access_token(token)
        self._token_refresher = token_refresher
        self._lock = Condition(Lock())
        self._some_thread_refreshing = False

    def get_token(self):
        # type () -> ~azure.core.credentials.AccessToken
        """The value of the configured token.
        :rtype: ~azure.core.credentials.AccessToken
        """

        if not self._token_refresher or not self._token_expiring():
            return self._token

        should_this_thread_refresh = False

        with self._lock:

            while self._token_expiring():
                if self._some_thread_refreshing:
                    if self._is_currenttoken_valid():
                        return self._token

                    self._wait_till_inprogress_thread_finish_refreshing()
                else:
                    should_this_thread_refresh = True
                    self._some_thread_refreshing = True
                    break

        if should_this_thread_refresh:
            try:
                newtoken = self._token_refresher()

                with self._lock:
                    self._token = newtoken
                    self._some_thread_refreshing = False
                    self._lock.notify_all()
            except:
                with self._lock:
                    self._some_thread_refreshing = False
                    self._lock.notify_all()

                raise

        return self._token

    def _wait_till_inprogress_thread_finish_refreshing(self):
        self._lock.release()
        self._lock.acquire()

    def _token_expiring(self):
        return self._token.expires_on - self._get_utc_now() <\
            timedelta(minutes=self.ON_DEMAND_REFRESHING_INTERVAL_MINUTES)

    def _is_currenttoken_valid(self):
        return self._get_utc_now() < self._token.expires_on

    @classmethod
    def _get_utc_now(cls):
        return datetime.now().replace(tzinfo=TZ_UTC)
Ejemplo n.º 10
0
class HangoutsPlug(immp.HTTPOpenable, immp.Plug):
    """
    Plug for `Google Hangouts <https://hangouts.google.com>`_.
    """

    schema = immp.Schema({"cookie": str, immp.Optional("read", True): bool})

    network_name = "Hangouts"

    @property
    def network_id(self):
        return "hangouts:{}".format(self._bot_user) if self._bot_user else None

    def __init__(self, name, config, host):
        super().__init__(name, config, host)
        self._client = self._looped = None
        self._starting = Condition()
        self._closing = False
        self._users = self._convs = self._bot_user = None

    async def _loop(self):
        while True:
            try:
                await self._client.connect()
            except CancelledError:
                log.debug("Cancel request for plug %r loop", self.name)
                return
            except Exception as e:
                log.debug("Unexpected client disconnect: %r", e)
            if self._closing:
                return
            log.debug("Reconnecting in 3 seconds")
            await sleep(3)

    async def _connect(self):
        log.debug("Retrieving users and conversations")
        self._users, self._convs = await hangups.build_user_conversation_list(
            self._client)
        self._convs.on_event.add_observer(self._event)
        resp = await self._client.get_self_info(
            hangouts_pb2.GetSelfInfoRequest(
                request_header=self._client.get_request_header()))
        self._bot_user = resp.self_entity.id.chat_id
        async with self._starting:
            self._starting.notify_all()

    async def _event(self, event):
        try:
            sent = await HangoutsMessage.from_event(self, event)
        except NotImplementedError:
            log.warn("Skipping unimplemented %r event type",
                     event.__class__.__name__)
        else:
            log.debug("Queueing new message event")
            self.queue(sent)
        if self.config["read"]:
            await self._convs.get(event.conversation_id
                                  ).update_read_timestamp()

    async def start(self):
        await super().start()
        self._closing = False
        self._client = hangups.Client(
            hangups.get_auth_stdin(self.config["cookie"]))
        self._client.on_connect.add_observer(self._connect)
        log.debug("Connecting client")
        self._looped = ensure_future(self._loop())
        async with self._starting:
            # Block until users and conversations are loaded.
            await self._starting.wait()
        log.debug("Listening for events")

    async def stop(self):
        await super().stop()
        self._closing = True
        if self._client:
            log.debug("Requesting client disconnect")
            await self._client.disconnect()
            self._client = None
        if self._looped:
            self._looped.cancel()
            self._looped = None
        self._bot_user = None

    async def user_from_id(self, id_):
        user = self._users.get_user(
            hangups.user.UserID(chat_id=id_, gaia_id=id_))
        if user:
            return HangoutsUser.from_user(self, user)
        request = hangouts_pb2.GetEntityByIdRequest(
            request_header=self._client.get_request_header(),
            batch_lookup_spec=[hangouts_pb2.EntityLookupSpec(gaia_id=id_)])
        response = await self._client.get_entity_by_id(request)
        if response.entity:
            return HangoutsUser.from_entity(self, response.entity)
        else:
            return None

    async def user_is_system(self, user):
        return user.id == self._bot_user

    def _filter_channels(self, type_):
        convs = self._convs.get_all(include_archived=True)
        return (immp.Channel(self, conv.id_) for conv in convs
                if conv._conversation.type == type_)

    async def public_channels(self):
        return list(self._filter_channels(
            hangouts_pb2.CONVERSATION_TYPE_GROUP))

    async def private_channels(self):
        return list(
            self._filter_channels(hangouts_pb2.CONVERSATION_TYPE_ONE_TO_ONE))

    async def channel_for_user(self, user):
        for channel in self._filter_channels(
                hangouts_pb2.CONVERSATION_TYPE_ONE_TO_ONE):
            if any(part.id == user.id for part in await channel.members()):
                return channel
        request = hangouts_pb2.CreateConversationRequest(
            request_header=self._client.get_request_header(),
            type=hangouts_pb2.CONVERSATION_TYPE_ONE_TO_ONE,
            client_generated_id=self._client.get_client_generated_id(),
            invitee_id=[hangouts_pb2.InviteeID(gaia_id=user.id)])
        response = await self._client.create_conversation(request)
        return immp.Channel(self, response.conversation.conversation_id.id)

    async def channel_is_private(self, channel):
        try:
            conv = self._convs.get(channel.source)
        except KeyError:
            return False
        else:
            return conv._conversation.type == hangouts_pb2.CONVERSATION_TYPE_ONE_TO_ONE

    async def channel_title(self, channel):
        try:
            return self._convs.get(channel.source).name
        except KeyError:
            return None

    async def channel_link(self, channel):
        return "https://hangouts.google.com/chat/{}".format(channel.source)

    async def channel_rename(self, channel, title):
        try:
            conv = self._convs.get(channel.source)
        except KeyError:
            return None
        else:
            if not conv.name == title:
                await conv.rename(title)

    async def channel_members(self, channel):
        try:
            conv = self._convs.get(channel.source)
        except KeyError:
            return None
        else:
            return [HangoutsUser.from_user(self, user) for user in conv.users]

    async def channel_invite(self, channel, user):
        try:
            conv = self._convs.get(channel.source)
        except KeyError:
            return
        request = hangouts_pb2.AddUserRequest(
            request_header=self._client.get_request_header(),
            event_request_header=conv._get_event_request_header(),
            invitee_id=[hangouts_pb2.InviteeID(gaia_id=user.id)])
        await self._client.add_user(request)

    async def channel_remove(self, channel, user):
        try:
            conv = self._convs.get(channel.source)
        except KeyError:
            return
        request = hangouts_pb2.RemoveUserRequest(
            request_header=self._client.get_request_header(),
            event_request_header=conv._get_event_request_header(),
            participant_id=hangouts_pb2.ParticipantId(gaia_id=user.id))
        await self._client.remove_user(request)

    async def _next_batch(self, conv, before_id):
        # Conversation.get_events() should, if the target is the oldest message in the current
        # batch, fetch the next whole batch and return that, or else return everything before the
        # target.  However, at the end of the message history, it sometimes returns an arbitrary
        # batch instead.  Return fetched messages from Conversation.events directly instead.
        ids = [event.id_ for event in conv.events]
        if before_id not in ids:
            return None
        if ids[0] == before_id:
            # Target is the oldest message cached, so there may be more -- try for another batch.
            await conv.get_events(before_id)
            ids = [event.id_ for event in conv.events]
        # Take all events older than the target.
        events = conv.events[:ids.index(before_id)]
        return [
            await HangoutsMessage.from_event(self, event) for event in events
        ]

    async def channel_history(self, channel, before=None):
        try:
            conv = self._convs.get(channel.source)
        except KeyError:
            return []
        if not conv.events:
            return []
        if not before:
            if len(conv.events) == 1:
                # Only the initial message cached, try to fetch a first batch.
                await conv.get_events(conv.events[0].id_)
            # Return all cached events.
            return [
                await HangoutsMessage.from_event(self, event)
                for event in conv.events
            ]
        ids = [event.id_ for event in conv.events]
        if before.id in ids:
            return await self._next_batch(conv, before.id)
        # Hangouts has no way to query for an event by ID, only by timestamp.  Instead, we'll try a
        # few times to retrieve it further down the message history.
        for i in range(10):
            log.debug("Fetching batch %i of events to find %r", i + 1,
                      before.id)
            events = await conv.get_events(conv.events[0].id_)
            ids = [event.id_ for event in events]
            if not ids:
                # No further messages, we've hit the end of the message history.
                return []
            elif before.id in ids:
                return await self._next_batch(conv, before.id)
        # Maxed out on attempts but didn't find the requested message.
        return []

    async def _get_event(self, receipt):
        try:
            conv = self._convs.get(receipt.channel.source)
        except KeyError:
            return None
        ids = [event.id_ for event in conv.events]
        try:
            return conv.get_event(receipt.id)
        except KeyError:
            pass
        # Hangouts has no way to query for an event by ID, only by timestamp.  Instead, we'll try a
        # few times to retrieve it further down the message history.
        for i in range(10):
            log.debug("Fetching batch %i of events to find %r", i + 1,
                      receipt.id)
            events = await conv.get_events(conv.events[0].id_)
            ids = [event.id_ for event in events]
            if not ids:
                # No further messages, we've hit the end of the message history.
                return []
            elif receipt.id in ids:
                return events[ids.index(receipt.id)]
        # Maxed out on attempts but didn't find the requested message.
        return None

    async def get_message(self, receipt):
        # We have the message reference but not the content.
        event = await self._get_event(receipt)
        if not event:
            return None
        sent = await HangoutsMessage.from_event(self, event)
        # As we only use this for rendering the message again, we shouldn't add a second
        # layer of authorship if we originally sent the message being retrieved.
        if sent.user.id == self._bot_user:
            sent.user = None
        return sent

    async def _upload(self, attach):
        async with (await attach.get_content(self.session)) as img_content:
            # Hangups expects a file-like object with a synchronous read() method.
            # NB. The whole file is read into memory by Hangups anyway.
            # Filename must be present, else Hangups will try (and fail) to read the path.
            photo = await self._client.upload_image(
                BytesIO(await img_content.read()),
                filename=attach.title or "image.png")
        return hangouts_pb2.ExistingMedia(photo=hangouts_pb2.Photo(
            photo_id=photo))

    @classmethod
    def _serialise(cls, segments):
        output = []
        for segment in segments:
            output += HangoutsSegment.to_segments(segment)
        return [segment.serialize() for segment in output]

    def _request(self, conv, segments=None, media=None, place=None):
        return hangouts_pb2.SendChatMessageRequest(
            request_header=self._client.get_request_header(),
            event_request_header=conv._get_event_request_header(),
            message_content=hangouts_pb2.MessageContent(
                segment=segments) if segments else None,
            existing_media=media,
            location=hangouts_pb2.Location(place=place) if place else None)

    async def _requests(self, conv, msg):
        uploads = []
        images = []
        places = []
        for attach in msg.attachments:
            if isinstance(attach,
                          immp.File) and attach.type in (immp.File.Type.image,
                                                         immp.File.Type.video):
                uploads.append(self._upload(attach))
            elif isinstance(attach, immp.Location):
                places.append(HangoutsLocation.to_place(attach))
        if uploads:
            images = await gather(*uploads)
        requests = []
        if msg.text or msg.reply_to:
            render = msg.render(link_name=False,
                                edit=msg.edited,
                                quote_reply=True)
            segments = self._serialise(render)
            media = None
            if len(images) == 1:
                # Attach the only image to the message text.
                media = images.pop()
            requests.append(self._request(conv, segments, media))
        if images:
            segments = []
            if msg.user:
                label = immp.Message(user=msg.user,
                                     text="sent an image",
                                     action=True)
                segments = self._serialise(label.render(link_name=False))
            # Send any additional media items in their own separate messages.
            for media in images:
                requests.append(self._request(conv, segments, media))
        if places:
            # Send each location separately.
            for place in places:
                requests.append(self._request(conv, place=place))
            # Include a label only if we haven't sent a text message earlier.
            if msg.user and not msg.text:
                label = immp.Message(user=msg.user,
                                     text="sent a location",
                                     action=True)
                segments = self._serialise(label.render(link_name=False))
                requests.append(self._request(conv, segments))
        return requests

    async def put(self, channel, msg):
        conv = self._convs.get(channel.source)
        # Attempt to find sources for referenced messages.
        clone = copy(msg)
        clone.reply_to = await self.resolve_message(clone.reply_to)
        requests = []
        for attach in clone.attachments:
            # Generate requests for attached messages first.
            if isinstance(attach, immp.Message):
                requests += await self._requests(
                    conv, await self.resolve_message(attach))
        own_requests = await self._requests(conv, clone)
        if requests and not own_requests:
            # Forwarding a message but no content to show who forwarded it.
            info = immp.Message(user=clone.user,
                                action=True,
                                text="forwarded a message")
            own_requests = await self._requests(conv, info)
        requests += own_requests
        events = []
        for request in requests:
            events.append(await self._client.send_chat_message(request))
        return [event.created_event.event_id for event in events]
Ejemplo n.º 11
0
async def worker(
        *,
        signaller: asyncio.Condition,
        processor: WatchStreamProcessor,
        settings: configuration.OperatorSettings,
        streams: Streams,
        key: ObjectRef,
) -> None:
    """
    The per-object workers consume the object's events and invoke the processors/handlers.

    The processor is expected to be an async coroutine, always the one from the framework.
    In fact, it is either a peering processor, which monitors the peer operators,
    or a generic resource processor, which internally calls the registered synchronous processors.

    The per-object worker is a time-limited task, which ends as soon as all the object's events
    have been handled. The watcher will spawn a new job when and if the new events arrive.

    To prevent the queue/job deletion and re-creation to happen too often, the jobs wait some
    reasonable, but small enough time (few seconds) before actually finishing --
    in case the new events are there, but the API or the watcher task lags a bit.
    """
    watchevents = streams[key].watchevents
    replenished = streams[key].replenished
    shouldstop = False
    try:
        while not shouldstop:

            # Try ASAP, but give it few seconds for the new events to arrive, maybe.
            # If the queue is empty for some time, then indeed finish the object's worker.
            # If the queue is filled, use the latest event only (within the short timeframe).
            # If an EOS marker is received, handle the last real event, then finish the worker ASAP.
            try:
                raw_event = await asyncio.wait_for(
                    watchevents.get(),
                    timeout=settings.batching.idle_timeout)
            except asyncio.TimeoutError:
                break
            else:
                try:
                    while True:
                        prev_event = raw_event
                        next_event = await asyncio.wait_for(
                            watchevents.get(),
                            timeout=settings.batching.batch_window)
                        shouldstop = shouldstop or isinstance(next_event, EOS)
                        raw_event = prev_event if isinstance(next_event, EOS) else next_event
                except asyncio.TimeoutError:
                    pass

            # Exit gracefully and immediately on the end-of-stream marker sent by the watcher.
            if isinstance(raw_event, EOS):
                break

            # Try the processor. In case of errors, show the error, but continue the processing.
            replenished.clear()
            await processor(raw_event=raw_event, replenished=replenished)

    except Exception:
        # Log the error for every worker: there can be several of them failing at the same time,
        # but only one will trigger the watcher's failure -- others could be lost if not logged.
        logger.exception(f"Event processing has failed with an unrecoverable error for {key}.")
        raise

    finally:
        # Whether an exception or a break or a success, notify the caller, and garbage-collect our queue.
        # The queue must not be left in the queue-cache without a corresponding job handling this queue.
        try:
            del streams[key]
        except KeyError:
            pass

        # Notify the depletion routine about the changes in the workers'/streams' overall state.
        # * This should happen STRICTLY AFTER the removal from the streams[], and
        # * This should happen A MOMENT BEFORE the job ends (within the scheduler's close_timeout).
        async with signaller:
            signaller.notify_all()
class CommunicationTokenCredential(object):
    """Credential type used for authenticating to an Azure Communication service.
    :param str token: The token used to authenticate to an Azure Communication service.
    :keyword token_refresher: The async token refresher to provide capacity to fetch a fresh token.
     The returned token must be valid (expiration date must be in the future).
    :paramtype token_refresher: Callable[[], Awaitable[AccessToken]]
    :keyword bool proactive_refresh: Whether to refresh the token proactively or not.
     If the proactive refreshing is enabled ('proactive_refresh' is true), the credential will use
     a background thread to attempt to refresh the token within 10 minutes before the cached token expires,
     the proactive refresh will request a new token by calling the 'token_refresher' callback.
     When 'proactive_refresh is enabled', the Credential object must be either run within a context manager
     or the 'close' method must be called once the object usage has been finished.
    :raises: TypeError if paramater 'token' is not a string
    :raises: ValueError if the 'proactive_refresh' is enabled without providing the 'token_refresher' function.
    """

    _ON_DEMAND_REFRESHING_INTERVAL_MINUTES = 2
    _DEFAULT_AUTOREFRESH_INTERVAL_MINUTES = 10

    def __init__(self, token: str, **kwargs: Any):
        if not isinstance(token, six.string_types):
            raise TypeError("Token must be a string.")
        self._token = create_access_token(token)
        self._token_refresher = kwargs.pop('token_refresher', None)
        self._proactive_refresh = kwargs.pop('proactive_refresh', False)
        if (self._proactive_refresh and self._token_refresher is None):
            raise ValueError(
                "When 'proactive_refresh' is True, 'token_refresher' must not be None."
            )
        self._timer = None
        self._async_mutex = Lock()
        if sys.version_info[:3] == (3, 10, 0):
            # Workaround for Python 3.10 bug(https://bugs.python.org/issue45416):
            getattr(self._async_mutex, '_get_loop', lambda: None)()
        self._lock = Condition(self._async_mutex)
        self._some_thread_refreshing = False
        self._is_closed = Event()

    async def get_token(self, *scopes, **kwargs):  # pylint: disable=unused-argument
        # type (*str, **Any) -> AccessToken
        """The value of the configured token.
        :rtype: ~azure.core.credentials.AccessToken
        """
        if self._proactive_refresh and self._is_closed.is_set():
            raise RuntimeError(
                "An instance of CommunicationTokenCredential cannot be reused once it has been closed."
            )

        if not self._token_refresher or not self._is_token_expiring_soon(
                self._token):
            return self._token
        await self._update_token_and_reschedule()
        return self._token

    async def _update_token_and_reschedule(self):
        should_this_thread_refresh = False
        async with self._lock:
            while self._is_token_expiring_soon(self._token):
                if self._some_thread_refreshing:
                    if self._is_token_valid(self._token):
                        return self._token
                    await self._wait_till_lock_owner_finishes_refreshing()
                else:
                    should_this_thread_refresh = True
                    self._some_thread_refreshing = True
                    break

        if should_this_thread_refresh:
            try:
                new_token = await self._token_refresher()
                if not self._is_token_valid(new_token):
                    raise ValueError(
                        "The token returned from the token_refresher is expired."
                    )
                async with self._lock:
                    self._token = new_token
                    self._some_thread_refreshing = False
                    self._lock.notify_all()
            except:
                async with self._lock:
                    self._some_thread_refreshing = False
                    self._lock.notify_all()
                raise
        if self._proactive_refresh:
            self._schedule_refresh()
        return self._token

    def _schedule_refresh(self):
        if self._is_closed.is_set():
            return
        if self._timer is not None:
            self._timer.cancel()

        token_ttl = self._token.expires_on - get_current_utc_as_int()

        if self._is_token_expiring_soon(self._token):
            # Schedule the next refresh for when it reaches a certain percentage of the remaining lifetime.
            timespan = token_ttl // 2
        else:
            # Schedule the next refresh for when it gets in to the soon-to-expire window.
            timespan = token_ttl - timedelta(
                minutes=self._DEFAULT_AUTOREFRESH_INTERVAL_MINUTES
            ).total_seconds()

        self._timer = AsyncTimer(timespan, self._update_token_and_reschedule)
        self._timer.start()

    async def _wait_till_lock_owner_finishes_refreshing(self):

        self._lock.release()
        await self._lock.acquire()

    def _is_token_expiring_soon(self, token):
        if self._proactive_refresh:
            interval = timedelta(
                minutes=self._DEFAULT_AUTOREFRESH_INTERVAL_MINUTES)
        else:
            interval = timedelta(
                minutes=self._ON_DEMAND_REFRESHING_INTERVAL_MINUTES)
        return ((token.expires_on - get_current_utc_as_int()) <
                interval.total_seconds())

    @classmethod
    def _is_token_valid(cls, token):
        return get_current_utc_as_int() < token.expires_on

    async def __aenter__(self):
        if self._proactive_refresh:
            if self._is_closed.is_set():
                raise RuntimeError(
                    "An instance of CommunicationTokenCredential cannot be reused once it has been closed."
                )
            self._schedule_refresh()
        return self

    async def __aexit__(self, *args):
        await self.close()

    async def close(self) -> None:
        if self._timer is not None:
            self._timer.cancel()
        self._timer = None
        self._is_closed.set()
Ejemplo n.º 13
0
class Supervisor:
    def __init__(
        self,
        pool: Executor,
        nvim: Nvim,
        vars_dir: Path,
        match: MatchOptions,
        comp: CompleteOptions,
        limits: Limits,
        reviewer: PReviewer,
    ) -> None:
        self.pool = pool
        self.vars_dir = vars_dir
        self.match, self.comp, self.limits = match, comp, limits
        self.nvim, self._reviewer = nvim, reviewer

        self.idling = Condition()
        self._workers: MutableMapping[Worker, BaseClient] = WeakKeyDictionary()

        self._lock = Lock()
        self._task: Optional[Task] = None
        self._tasks: Sequence[Task] = ()

    @property
    def clients(self) -> AbstractSet[BaseClient]:
        return {*self._workers.values()}

    def register(self, worker: Worker, assoc: BaseClient) -> None:
        self._reviewer.register(assoc)
        self._workers[worker] = assoc

    def notify_idle(self) -> None:
        async def cont() -> None:
            async with self.idling:
                self.idling.notify_all()

        go(self.nvim, aw=cont())

    async def interrupt(self) -> None:
        g = gather(*chain(((self._task, ) if self._task else ()), self._tasks))
        self._task, self._tasks = None, ()
        await cancel(g)

    def collect(self, context: Context) -> Awaitable[Sequence[Metric]]:
        loop: AbstractEventLoop = self.nvim.loop
        t1, done = monotonic(), False
        timeout = (self.limits.completion_manual_timeout
                   if context.manual else self.limits.completion_auto_timeout)

        acc: MutableSequence[Metric] = []

        async def supervise(worker: Worker, assoc: BaseClient) -> None:
            instance, items = uuid4(), 0

            with with_suppress(), timeit(f"WORKER -- {assoc.short_name}"):
                await self._reviewer.s_begin(assoc, instance=instance)
                try:
                    async for completion in worker.work(context):
                        if not done and completion:
                            metric = self._reviewer.trans(
                                instance, completion=completion)
                            acc.append(metric)
                            items += 1
                        else:
                            await sleep(0)
                finally:
                    elapsed = monotonic() - t1
                    await self._reviewer.s_end(
                        instance,
                        interrupted=done,
                        elapsed=elapsed,
                        items=items,
                    )

        async def cont() -> Sequence[Metric]:
            nonlocal done

            with with_suppress(), timeit("COLLECTED -- ALL"):
                if self._lock.locked():
                    log.warn("%s", "SHOULD NOT BE LOCKED <><> supervisor")
                async with self._lock:
                    await self._reviewer.begin(context)
                    self._tasks = tasks = tuple(
                        loop.create_task(supervise(worker, assoc=assoc))
                        for worker, assoc in self._workers.items())
                    try:
                        if not tasks:
                            return ()
                        else:
                            _, pending = await wait(tasks, timeout=timeout)
                            if not acc:
                                for fut in as_completed(pending):
                                    await fut
                                    if acc:
                                        break
                            return acc
                    finally:
                        done = True

        self._task = loop.create_task(cont())
        return self._task