async def __call__(self, client, message): """ Calls the cooldown with the respective `client` and `message`, and then yields whether the command can be called, and if not, then with what extra parameters the handler should receive. This method is a coroutine generator. Parameters ---------- client : ``Client`` The client who received the respective message. message : ``Message`` The received message. Yields ------ passed : `bool` Whether the command can be called. If not, then yields additional parameters to call the cooldown's handler with. time_left : `float` How much time is left till the cooldown's expiration. """ value = self.checker(self, message) if not value: yield True return yield False yield value - LOOP_TIME() return
async def __call__(self, command_context): """ Calls the cooldown with the respective `client` and `message`, and then yields whether the command can be called, and if not, then with what extra parameters the handler should receive. This method is a coroutine. Parameters ---------- command_context : ``CommandHandler`` The received command's context. Raises ------ CommandCooldownError If the command is on cooldown, or if guild-bound cooldown was called from non-guild. """ expires_at = self.checker(self, command_context) if expires_at: if expires_at == -1: expires_after = -1 else: expires_after = expires_at - LOOP_TIME() raise CommandCooldownError(self, expires_after)
def _check_channel(cooldown_handler, command_context): """ Executes channel cooldown check. Might be set as the ``Cooldown``'s ``.checker`` attribute. Parameters ---------- cooldown_handler : ``CooldownHandler`` The parent cooldown handler. command_context : ``CommandHandler`` The received command's context. Returns ------- expires_at : `int` When the cooldown for the given entity will expire. """ channel_id = command_context.message.channel.id cache = cooldown_handler.cache try: unit = cache[channel_id] except KeyError: at_ = LOOP_TIME() + cooldown_handler.reset cache[channel_id] = CooldownUnit(at_, cooldown_handler.limit) KOKORO.call_at(at_, dict.__delitem__, cache, channel_id) return 0. left = unit.uses_left if left > 0: unit.uses_left = left - cooldown_handler.weight return 0. return unit.expires_at
def wake_up(self): """ Wake ups the waiting futures of the ``GatewayRateLimiter``. """ queue = self.queue remaining = GATEWAY_RATE_LIMIT_LIMIT if queue: while True: if not queue: wake_upper = None break if not remaining: self.resets_at = resets_at = LOOP_TIME() + GATEWAY_RATE_LIMIT_RESET wake_upper = KOKORO.call_at(resets_at + GATEWAY_RATE_LIMIT_RESET, type(self).wake_up, self) break queue.popleft().set_result_if_pending(False) remaining -= 1 else: wake_upper = None self.wake_upper = wake_upper self.remaining = remaining
def _check_channel(self, message): """ Executes channel cooldown check. Might be set as the ``Cooldown``'s ``.checker`` attribute. Parameters ---------- message : ``Message`` The received message. Returns ------- expires_at : `int` When the cooldown for the given entity will expire. """ id_ = message.channel.id cache = self.cache try: unit = cache[id_] except KeyError: at_ = LOOP_TIME() + self.reset cache[id_] = _CDUnit(at_, self.limit) KOKORO.call_at(at_, dict.__delitem__, cache, id_) return 0. left = unit.uses_left if left > 0: unit.uses_left = left - self.weight return 0. return unit.expires_at
def __iter__(self): """ Awaits the rate limit handler. This method is a generator. Should be used with `await` expression. Returns ------- cancelled : `bool` Whether the respective gateway was closed. """ now = LOOP_TIME() if now >= self.resets_at: self.resets_at = now + GATEWAY_RATE_LIMIT_RESET remaining = GATEWAY_RATE_LIMIT_LIMIT else: remaining = self.remaining if remaining: self.remaining = remaining - 1 return False if self.wake_upper is None: self.wake_upper = KOKORO.call_at(self.resets_at, type(self).wake_up, self) future = Future(KOKORO) self.queue.append(future) return (yield from future)
def __init__(self): """ Creates a new mass user chunker. """ self.waiter = Future(KOKORO) self.last = now = LOOP_TIME() self.timer = KOKORO.call_at(now + USER_CHUNK_TIMEOUT, type(self)._cancel, self)
def set_timeout(self, value): """ Sets the timeouter of the timeouter to the given value. """ handle = self.handle if handle is None: # Cannot change timeout of expired timeouter return if value <= 0.0: self.timeout = 0.0 handle._run() handle.cancel() return now = LOOP_TIME() next_step = self.handle.when planed_end = now + value if planed_end < next_step: handle.cancel() self.handle = KOKORO.call_at(planed_end, type(self)._step, self) return self.timeout = planed_end - next_step
def __call__(self, event): """ Called when a chunk is received with it's respective nonce. Updates the chunker's last received chunk's time to push out the current timeout. Parameters ---------- event : ``GuildUserChunkEvent`` The received guild user chunk's event. Returns ------- is_last : `bool` Whether the last chunk was received. """ self.last = LOOP_TIME() if event.index + 1 != event.count: return False self.waiter.set_result_if_pending(None) timer = self.timer if (timer is not None): self.timer = None timer.cancel() return True
def __repr__(self): """Returns the gateway rate limiter's representation.""" repr_parts = [ '<', self.__class__.__name__, ] resets_at = self.resets_at if resets_at <= LOOP_TIME(): remaining = GATEWAY_RATE_LIMIT_LIMIT else: repr_parts.append(' resets_at=') repr_parts.append(repr(LOOP_TIME())) repr_parts.append(' (monotonic),') remaining = self.remaining repr_parts.append(' remaining=') repr_parts.append(repr(remaining)) repr_parts.append('>') return ''.join(repr_parts)
def get_expiration_delay(self): """ Returns after how much time the timeouter will expire. If the timeouter already expired, returns `0.0˙. Returns ------- time_left : `float` """ handle = self.handle if handle is None: return 0.0 return handle.when - LOOP_TIME() + self.timeout
def _cancel(self): """ The chunker's timer calls this method. If the chunker received any chunks since it's ``.timer`` was started, pushes out the timeout. Cancels ``.waiter`` and ``.timer``. After this method was called, the waiting coroutine will remove it's reference from the event handler. """ now = LOOP_TIME() next_ = self.last + USER_CHUNK_TIMEOUT if next_ > now: self.timer = KOKORO.call_at(next_, type(self)._cancel, self) else: self.timer = None self.waiter.cancel()
def _add_message_collection_delay(self, delay): """ Sets message collection timeout to the exact given time. Parameters ---------- delay : `float` The time to delay the message collection with. """ message_history_collector = self._message_history_collector if (message_history_collector is None): self._message_history_collector = MessageHistoryCollector( self, LOOP_TIME() + delay) else: message_history_collector.add_delay(delay)
def __repr__(self): """Returns the message history collector's representation.""" repr_parts = ['<', self.__class__.__name__] handle = self.handle if handle is None: repr_parts.append(' cancelled') else: repr_parts.append(' scheduled: ') timestamp = datetime.utcfromtimestamp(current_time() + handle.when + self.delay - LOOP_TIME()) repr_parts.append(timestamp.__format__(DATETIME_FORMAT_CODE)) repr_parts.append('>') return ''.join(repr_parts)
def next_reset_after(self): """ Familiar to ``.next_reset_at`` but it instead returns how time is left till next reset instead. Returns ------- next_reset_after : `float` """ handler = self.handler if handler is None: return 0.0 drops = handler.drops if (drops is None) or (not drops): return 0.0 return drops[0].drop - LOOP_TIME()
def format_loop_time(loop_time, style=None): """ Formats monotonic event loop time to Discord's timestamp markdown format. For formatting details please check out ``TIMESTAMP_STYLES``, which contains the usable styles. Parameters ---------- loop_time : `float` Monotonic loop time. style : `None`, `str` = `None`, Optional Format code to use. They are listed within ``TIMESTAMP_STYLES``. Returns ------- formatted_string : `str` """ return format_unix_time(loop_time - LOOP_TIME() + time_now(), style)
def _check_guild(cooldown_handler, command_context): """ Executes guild based cooldown check. Might be set as the ``Cooldown``'s ``.checker`` attribute. Parameters ---------- cooldown_handler : ``CooldownHandler`` The parent cooldown handler. command_context : ``CommandHandler`` The received command's context. Returns ------- expires_at : `int` When the cooldown for the given entity will expire. If the cooldown limitation is not applicable for the given entity, returns `-1.0`. """ channel = command_context.message.channel if not isinstance(channel, ChannelGuildBase): return -1.0 guild_id = channel.guild.id cache = cooldown_handler.cache try: unit = cache[guild_id] except KeyError: at_ = LOOP_TIME() + cooldown_handler.reset cache[guild_id] = CooldownUnit(at_, command_context.self.limit) KOKORO.call_at(at_, dict.__delitem__, cache, guild_id) return 0. left = unit.uses_left if left > 0: unit.uses_left = left - cooldown_handler.weight return 0. return unit.expires_at
def _check_guild(self, message): """ Executes guild based cooldown check. Might be set as the ``Cooldown``'s ``.checker`` attribute. Parameters ---------- message : ``Message`` The received message. Returns ------- expires_at : `int` When the cooldown for the given entity will expire. If the cooldown limitation is not applicable for the given entity, returns `-1.0`. """ channel = message.channel if channel.type in (1, 3): return -1. else: id_ = channel.guild.id cache = self.cache try: unit = cache[id_] except KeyError: at_ = LOOP_TIME() + self.reset cache[id_] = _CDUnit(at_, self.limit) KOKORO.call_at(at_, dict.__delitem__, cache, id_) return 0. left = unit.uses_left if left > 0: unit.uses_left = left - self.weight return 0. return unit.expires_at
async def get_weekend_status(self, *, force_update=False): """ Returns the weekend multiplier is on. This method is a coroutine. Parameters ---------- force_update : `bool` = `False`, Optional (Keyword only) Whether the weekend status should be forcefully updated instead of using the cached one. Returns ------- weekend_status : `bool` Raises ------ ConnectionError No internet connection. TopGGGloballyRateLimited If the client got globally rate limited by top.gg and `raise_on_top_gg_global_rate_limit` was given as `True`. TopGGHttpException Any exception raised by top.gg api. """ if force_update or (self._weekend_status_cache_time < LOOP_TIME()): task = self._weekend_status_request_task if task is None: task = Task(get_weekend_status_task(self), KOKORO) self._weekend_status_request_task = task weekend_status = await shield(task, KOKORO) else: weekend_status = self._weekend_status_cache_value return weekend_status
async def _request(self, method, url, rate_limit_handler, data=None, query_parameters=None): """ Does a request towards top.gg API. This method is a coroutine. Parameters ---------- method : `str` Http method. url : `str` Endpoint to do request towards. rate_limit_handler : ``RateLimitHandlerBase` Rate limit handle to handle rate limit as. data : `None`, `Any` = `None`, Optional Json serializable data. query_parameters : `None`, `Any` = `None`, Optional Query parameters. Raises ------ ConnectionError No internet connection. TopGGGloballyRateLimited If the client got globally rate limited by top.gg and `raise_on_top_gg_global_rate_limit` was given as `True`. TopGGHttpException Any exception raised by top.gg api. """ headers = self._headers.copy() if (data is not None): headers[CONTENT_TYPE] = 'application/json' data = to_json(data) try_again = 2 while try_again > 0: global_rate_limit_expires_at = self._global_rate_limit_expires_at if global_rate_limit_expires_at > LOOP_TIME(): if self._raise_on_top_gg_global_rate_limit: raise TopGGGloballyRateLimited(None) future = Future(KOKORO) KOKORO.call_at(global_rate_limit_expires_at, Future.set_result_if_pending, future, None) await future async with rate_limit_handler.ctx(): try: async with RequestContextManager( self.http._request(method, url, headers, data, query_parameters)) as response: response_data = await response.text(encoding='utf-8') except OSError as err: if not try_again: raise ConnectionError( 'Invalid address or no connection with Top.gg.' ) from err await sleep(0.5 / try_again, KOKORO) try_again -= 1 continue response_headers = response.headers status = response.status content_type_headers = response_headers.get(CONTENT_TYPE, None) if (content_type_headers is not None ) and content_type_headers.startswith('application/json'): response_data = from_json(response_data) if 199 < status < 305: return response_data # Are we rate limited? if status == 429: try: retry_after = headers[RETRY_AFTER] except KeyError: retry_after = RATE_LIMIT_GLOBAL_DEFAULT_DURATION else: try: retry_after = float(retry_after) except ValueError: retry_after = RATE_LIMIT_GLOBAL_DEFAULT_DURATION self._global_rate_limit_expires_at = LOOP_TIME( ) + retry_after if self._raise_on_top_gg_global_rate_limit: raise TopGGGloballyRateLimited(None) await sleep(retry_after, KOKORO) continue # Python casts sets to frozensets if (status in {400, 401, 402, 404}): raise TopGGHttpException(response, response_data) if try_again and (status >= 500): await sleep(10.0 / try_again, KOKORO) try_again -= 1 continue raise TopGGHttpException(response, response_data)
def __init__(self, ): self.waiter = Future(KOKORO) self.timer = KOKORO.call_at(LOOP_TIME() + USER_CHUNK_TIMEOUT, type(self)._cancel, self)
async def execute(self, client): """ Executes the request and returns it's result or raises. This method is a coroutine. Returns ------- result : `Any` Raises ------ ConnectionError If there is no internet connection, or there is no available cached result. DiscordException If any exception was received from the Discord API. """ if (LOOP_TIME() - self.timeout) < self._last_update: if self._active_request: waiter = self._waiter if waiter is None: waiter = self._waiter = Future(KOKORO) result = await waiter else: result = self.cached return result self._active_request = True try: result = await self.func(client) except ConnectionError as err: result = self.cached if (result is ...): waiter = self._waiter if (waiter is not None): self._waiter = None waiter.set_exception(err) raise except BaseException as err: waiter = self._waiter if (waiter is not None): self._waiter = None waiter.set_exception(err) raise else: self._last_update = LOOP_TIME() finally: self._active_request = False waiter = self._waiter if (waiter is not None): self._waiter = None waiter.set_result(result) return result
def exit(self, headers): """ Called by the rate limit handler's context manager (``RateLimitHandlerCTX``) when a respective request is done. Calculates the rate limits based on the given ``headers``. Handles first request, optimistic rate limit handling and changed rate limit sizes as well. Parameters ---------- headers : `None`, `imultidict` of (`str`, `str) items Response headers """ current_size = self.parent.size if current_size == UNLIMITED_SIZE_VALUE: return self.active -= 1 optimistic = False while True: if (headers is not None): size = headers.get(RATE_LIMIT_LIMIT, None) if size is None: if current_size < 0: optimistic = True # A not so special case when the endpoint is not rate limited yet. # If this happens, we increase the maximal size. size = current_size if size > MAXIMAL_UNLIMITED_PARARELLITY: size -= 1 break else: size = int(size) break wake_upper = self.wake_upper if (wake_upper is not None): wake_upper.cancel() self.wake_upper = None self.wake_up() return allocates = 1 if size != current_size: self.parent.size = size if optimistic: current_size = -current_size size = -size if size > current_size: if current_size == -1 or current_size == 0: current_size = 1 # We might have cooldowns from before as well allocates = size - int(headers[RATE_LIMIT_REMAINING]) can_free = size - current_size queue = self.queue queue_length = len(queue) if can_free > queue_length: can_free = queue_length while can_free > 0: future = queue.popleft() future.set_result(None) can_free -= 1 continue if optimistic: delay = 1.0 else: delay1 = ( datetime.fromtimestamp( float(headers[RATE_LIMIT_RESET]), timezone.utc) - parse_date_header_to_datetime(headers[DATE]) ).total_seconds() delay2 = float(headers[RATE_LIMIT_RESET_AFTER]) if delay1 < delay2: delay = delay1 else: delay = delay2 drop = LOOP_TIME() + delay drops = self.drops if (drops is None): self.drops = RateLimitUnit(drop, allocates) else: drops.update_with(drop, allocates) wake_upper = self.wake_upper if wake_upper is None: wake_upper = KOKORO.call_at(drop, type(self).wake_up, self) self.wake_upper = wake_upper return if wake_upper.when <= drop: return wake_upper.cancel() wake_upper = KOKORO.call_at(drop, type(self).wake_up, self) self.wake_upper = wake_upper
async def execute(self, client, parameter): """ Executes the request and returns it's result or raises. This method is a coroutine. Parameters ---------- client : ``Client`` The client, who's `.discovery_validate_term` method was called. parameter : `str` The discovery term. Returns ------- result : `Any` Raises ------ ConnectionError If there is no internet connection, or there is no available cached result. TypeError The given `parameter` was not passed as `str`. DiscordException If any exception was received from the Discord API. """ # First check parameter parameter_type = parameter.__class__ if parameter_type is str: pass elif issubclass(parameter_type, str): parameter = str(parameter) else: raise TypeError( f'`parameter` can be `str`, got {parameter_type.__class__}; {parameter!r}.' ) # First check cache try: unit = self.cached[parameter] except KeyError: unit = None else: now = LOOP_TIME() if self.timeout + unit.creation_time > now: unit.last_usage_time = now return unit.result # Second check actual request try: waiter = self._waiters[parameter] except KeyError: pass else: if waiter is None: self._waiters[parameter] = waiter = Future(KOKORO) return await waiter # No actual request is being done, so mark that we are doing a request. self._waiters[parameter] = None # Search client with free rate limits. free_count = RateLimitProxy(client, *self._rate_limit_proxy_parameters).free_count if not free_count: requester = client for client_ in CLIENTS.values(): if client_ is client: continue free_count = RateLimitProxy(client_, *self._rate_limit_proxy_parameters).free_count if free_count: requester = client_ break continue # If there is no client with free count do not care about the reset times, because probably only 1 client # forces requests anyways, so that's rate limits will reset first as well. client = requester # Do the request try: result = await self.func(client, parameter) except ConnectionError as err: if (unit is None): waiter = self._waiters.pop(parameter) if (waiter is not None): waiter.set_exception(err) raise unit.last_usage_time = LOOP_TIME() result = unit.result except BaseException as err: waiter = self._waiters.pop(parameter, None) if (waiter is not None): waiter.set_exception(err) raise else: if unit is None: self.cached[parameter] = unit = TimedCacheUnit() now = LOOP_TIME() unit.last_usage_time = now unit.creation_time = now unit.result = result finally: # Do cleanup if needed now = LOOP_TIME() if self._last_cleanup + self._minimal_cleanup_interval < now: self._last_cleanup = now cleanup_till = now - self.timeout collected = [] cached = self.cached for cached_parameter, cached_unit in cached.items(): if cached_unit.last_usage_time < cleanup_till: collected.append(cached_parameter) for cached_parameter in collected: del cached[cached_parameter] waiter = self._waiters.pop(parameter) if (waiter is not None): waiter.set_result(result) return result
async def _do_transfer(self): """ Sends the data written to the stream to the respective channel. This method is a coroutine. """ try: client = self._client while True: try: message = self._last_message if (message is None): un_poll = None should_edit = False else: last_action = message.edited_at if last_action is None: last_action = message.created_at if (last_action + MESSAGE_EDIT_TIMEDELTA > datetime.utcnow()): un_poll = self._last_chunk should_edit = True else: un_poll = None should_edit = False raw_data = self._poll(un_poll) if raw_data is None: break if len(raw_data) < (self._chunk_size >> 1): maybe_update_next = True else: maybe_update_next = False if self._sanitize: data = sanitize_content(raw_data, guild=self._channel.guild) else: data = raw_data request_start = LOOP_TIME() if should_edit: await client.message_edit(message, data) else: message = await client.message_create( self._channel, data) if maybe_update_next: self._last_message = message self._last_chunk = raw_data else: self._last_message = None self._last_chunk = None sleep_time = request_start - LOOP_TIME( ) + REQUEST_RATE_LIMIT if sleep_time > 0.0: await sleep(sleep_time, KOKORO) except BaseException as err: self._last_message = None self._last_chunk = None await client.events.error(client, f'{self!r}._do_transfer', err) finally: self._transfer_task = None