Exemple #1
0
class VoiceGate(Cog):
    """Voice channels verification management."""

    # RedisCache[t.Union[discord.User.id, discord.Member.id], t.Union[discord.Message.id, int]]
    # The cache's keys are the IDs of members who are verified or have joined a voice channel
    # The cache's values are either the message ID of the ping message or 0 (NO_MSG) if no message is present
    redis_cache = RedisCache()

    def __init__(self, bot: Bot) -> None:
        self.bot = bot

    @property
    def mod_log(self) -> ModLog:
        """Get the currently loaded ModLog cog instance."""
        return self.bot.get_cog("ModLog")

    @redis_cache.atomic_transaction  # Fully process each call until starting the next
    async def _delete_ping(self, member_id: int) -> None:
        """
        If `redis_cache` holds a message ID for `member_id`, delete the message.

        If the message was deleted, the value under the `member_id` key is then set to `NO_MSG`.
        When `member_id` is not in the cache, or has a value of `NO_MSG` already, this function
        does nothing.
        """
        if message_id := await self.redis_cache.get(member_id):
            log.trace(
                f"Removing voice gate reminder message for user: {member_id}")
            with suppress(discord.NotFound):
                await self.bot.http.delete_message(Channels.voice_gate,
                                                   message_id)
            await self.redis_cache.set(member_id, NO_MSG)
        else:
Exemple #2
0
class DMRelay(Cog):
    """Relay direct messages to and from the bot."""

    # RedisCache[str, t.Union[discord.User.id, discord.Member.id]]
    dm_cache = RedisCache()

    def __init__(self, bot: Bot):
        self.bot = bot
        self.webhook_id = constants.Webhooks.dm_log
        self.webhook = None
        self.bot.loop.create_task(self.fetch_webhook())

    @commands.command(aliases=("reply", ))
    async def send_dm(self, ctx: commands.Context,
                      member: Optional[UserMentionOrID], *,
                      message: str) -> None:
        """
        Allows you to send a DM to a user from the bot.

        If `member` is not provided, it will send to the last user who DM'd the bot.

        This feature should be used extremely sparingly. Use ModMail if you need to have a serious
        conversation with a user. This is just for responding to extraordinary DMs, having a little
        fun with users, and telling people they are DMing the wrong bot.

        NOTE: This feature will be removed if it is overused.
        """
        if not member:
            user_id = await self.dm_cache.get("last_user")
            member = ctx.guild.get_member(user_id) if user_id else None

        # If we still don't have a Member at this point, give up
        if not member:
            log.debug(
                "This bot has never gotten a DM, or the RedisCache has been cleared."
            )
            await ctx.message.add_reaction("❌")
            return

        try:
            await member.send(message)
        except discord.errors.Forbidden:
            log.debug("User has disabled DMs.")
            await ctx.message.add_reaction("❌")
        else:
            await ctx.message.add_reaction("✅")
            self.bot.stats.incr("dm_relay.dm_sent")

    async def fetch_webhook(self) -> None:
        """Fetches the webhook object, so we can post to it."""
        await self.bot.wait_until_guild_available()

        try:
            self.webhook = await self.bot.fetch_webhook(self.webhook_id)
        except discord.HTTPException:
            log.exception(
                f"Failed to fetch webhook with id `{self.webhook_id}`")

    @Cog.listener()
    async def on_message(self, message: discord.Message) -> None:
        """Relays the message's content and attachments to the dm_log channel."""
        # Only relay DMs from humans
        if message.author.bot or message.guild or self.webhook is None:
            return

        if message.clean_content:
            await send_webhook(
                webhook=self.webhook,
                content=message.clean_content,
                username=f"{message.author.display_name} ({message.author.id})",
                avatar_url=message.author.avatar_url)
            await self.dm_cache.set("last_user", message.author.id)
            self.bot.stats.incr("dm_relay.dm_received")

        # Handle any attachments
        if message.attachments:
            try:
                await send_attachments(message, self.webhook)
            except (discord.errors.Forbidden, discord.errors.NotFound):
                e = discord.Embed(
                    description=
                    ":x: **This message contained an attachment, but it could not be retrieved**",
                    color=Color.red())
                await send_webhook(
                    webhook=self.webhook,
                    embed=e,
                    username=
                    f"{message.author.display_name} ({message.author.id})",
                    avatar_url=message.author.avatar_url)
            except discord.HTTPException:
                log.exception("Failed to send an attachment to the webhook")

    async def cog_check(self, ctx: commands.Context) -> bool:
        """Only allow moderators to invoke the commands in this cog."""
        checks = [
            await
            commands.has_any_role(*constants.MODERATION_ROLES).predicate(ctx),
            in_whitelist_check(
                ctx,
                channels=[constants.Channels.dm_log],
                redirect=None,
                fail_silently=True,
            )
        ]
        return all(checks)
class SpookyNameRate(Cog):
    """
    A game that asks the user to spookify or halloweenify a name that is given everyday.

    It sends a random name everyday. The user needs to try and spookify it to his best ability and
    send that name back using the `spookynamerate add entry` command
    """

    # This cache stores the message id of each added word along with a dictionary which contains the name the author
    # added, the author's id, and the author's score (which is 0 by default)
    messages = RedisCache()

    # The data cache stores small information such as the current name that is going on and whether it is the first time
    # the bot is running
    data = RedisCache()
    debug = getenv(
        "SPOOKYNAMERATE_DEBUG", False
    )  # Enable if you do not want to limit the commands to October or if

    # you do not want to wait till 12 UTC. Note: if debug is enabled and you run `.cogs reload spookynamerate`, it
    # will automatically start the scoring and announcing the result (without waiting for 12, so do not expect it to.).
    # Also, it won't wait for the two hours (when the poll closes).

    def __init__(self, bot: Bot):
        self.bot = bot
        self.name = None

        self.bot.loop.create_task(self.load_vars())

        self.first_time = None
        self.poll = False
        self.announce_name.start()
        self.checking_messages = asyncio.Lock()
        # Define an asyncio.Lock() to make sure the dictionary isn't changed
        # when checking the messages for duplicate emojis'

    async def load_vars(self) -> None:
        """Loads the variables that couldn't be loaded in __init__."""
        self.first_time = await self.data.get("first_time", True)
        self.name = await self.data.get("name")

    @group(name="spookynamerate", invoke_without_command=True)
    async def spooky_name_rate(self, ctx: Context) -> None:
        """Get help on the Spooky Name Rate game."""
        await ctx.send(embed=Embed.from_dict(HELP_MESSAGE_DICT))

    @spooky_name_rate.command(name="list", aliases=("all", "entries"))
    async def list_entries(self, ctx: Context) -> None:
        """Send all the entries up till now in a single embed."""
        await ctx.send(embed=await self.get_responses_list(final=False))

    @spooky_name_rate.command(name="name")
    async def tell_name(self, ctx: Context) -> None:
        """Tell the current random name."""
        if not self.poll:
            await ctx.send(f"The name is **{self.name}**")
            return

        await ctx.send(
            f"The name ~~is~~ was **{self.name}**. The poll has already started, so you cannot "
            "add an entry.")

    @spooky_name_rate.command(name="add", aliases=("register", ))
    async def add_name(self, ctx: Context, *, name: str) -> None:
        """Use this command to add/register your spookified name."""
        if self.poll:
            logger.info(
                f"{ctx.author} tried to add a name, but the poll had already started."
            )
            await ctx.send(
                "Sorry, the poll has started! You can try and participate in the next round though!"
            )
            return

        for data in (json.loads(user_data)
                     for _, user_data in await self.messages.items()):
            if data["author"] == ctx.author.id:
                await ctx.send(
                    "But you have already added an entry! Type "
                    f"`{Client.prefix}spookynamerate "
                    "delete` to delete it, and then you can add it again")
                return

            elif data["name"] == name:
                await ctx.send("TOO LATE. Someone has already added this name."
                               )
                return

        msg = await (await self.get_channel()
                     ).send(f"{ctx.author.mention} added the name {name!r}!")

        await self.messages.set(
            msg.id,
            json.dumps({
                "name": name,
                "author": ctx.author.id,
                "score": 0,
            }),
        )

        for emoji in EMOJIS_VAL:
            await msg.add_reaction(emoji)

        logger.info(f"{ctx.author} added the name {name!r}")

    @spooky_name_rate.command(name="delete")
    async def delete_name(self, ctx: Context) -> None:
        """Delete the user's name."""
        if self.poll:
            await ctx.send(
                "You can't delete your name since the poll has already started!"
            )
            return
        for message_id, data in await self.messages.items():
            data = json.loads(data)

            if ctx.author.id == data["author"]:
                await self.messages.delete(message_id)
                await ctx.send(f"Name deleted successfully ({data['name']!r})!"
                               )
                return

        await ctx.send(
            f"But you don't have an entry... :eyes: Type `{Client.prefix}spookynamerate add your entry`"
        )

    @Cog.listener()
    async def on_reaction_add(self, reaction: Reaction, user: User) -> None:
        """Ensures that each user adds maximum one reaction."""
        if user.bot or not await self.messages.contains(reaction.message.id):
            return

        async with self.checking_messages:  # Acquire the lock so that the dictionary isn't reset while iterating.
            if reaction.emoji in EMOJIS_VAL:
                # create a custom counter
                reaction_counter = defaultdict(int)
                for msg_reaction in reaction.message.reactions:
                    async for reaction_user in msg_reaction.users():
                        if reaction_user == self.bot.user:
                            continue
                        reaction_counter[reaction_user] += 1

                if reaction_counter[user] > 1:
                    await user.send(
                        "Sorry, you have already added a reaction, "
                        "please remove your reaction and try again.")
                    await reaction.remove(user)
                    return

    @tasks.loop(hours=24.0)
    async def announce_name(self) -> None:
        """Announces the name needed to spookify every 24 hours and the winner of the previous game."""
        if not self.in_allowed_month():
            return

        channel = await self.get_channel()

        if self.first_time:
            await channel.send(
                "Okkey... Welcome to the **Spooky Name Rate Game**! It's a relatively simple game.\n"
                f"Everyday, a random name will be sent in <#{Channels.community_bot_commands}> "
                "and you need to try and spookify it!\nRegister your name using "
                f"`{Client.prefix}spookynamerate add spookified name`")

            await self.data.set("first_time", False)
            self.first_time = False

        else:
            if await self.messages.items():
                await channel.send(embed=await self.get_responses_list(
                    final=True))
                self.poll = True
                if not SpookyNameRate.debug:
                    await asyncio.sleep(2 * 60 * 60)  # sleep for two hours

            logger.info("Calculating score")
            for message_id, data in await self.messages.items():
                data = json.loads(data)

                msg = await channel.fetch_message(message_id)
                score = 0
                for reaction in msg.reactions:
                    reaction_value = EMOJIS_VAL.get(
                        reaction.emoji, 0)  # get the value of the emoji else 0
                    score += reaction_value * (
                        reaction.count - 1)  # multiply by the num of reactions
                    # subtract one, since one reaction was done by the bot

                logger.debug(
                    f"{self.bot.get_user(data['author'])} got a score of {score}"
                )
                data["score"] = score
                await self.messages.set(message_id, json.dumps(data))

            # Sort the winner messages
            winner_messages = sorted(
                ((msg_id, json.loads(usr_data))
                 for msg_id, usr_data in await self.messages.items()),
                key=lambda x: x[1]["score"],
                reverse=True,
            )

            winners = []
            for i, winner in enumerate(winner_messages):
                winners.append(winner)
                if len(winner_messages) > i + 1:
                    if winner_messages[i +
                                       1][1]["score"] != winner[1]["score"]:
                        break
                elif len(winner_messages) == (
                        i + 1) + 1:  # The next element is the last element
                    if winner_messages[i +
                                       1][1]["score"] != winner[1]["score"]:
                        break

            # one iteration is complete
            await channel.send(
                "Today's Spooky Name Rate Game ends now, and the winner(s) is(are)..."
            )

            async with channel.typing():
                await asyncio.sleep(1)  # give the drum roll feel

                if not winners:  # There are no winners (no participants)
                    await channel.send(
                        "Hmm... Looks like no one participated! :cry:")
                    return

                score = winners[0][1]["score"]
                congratulations = "to all" if len(
                    winners) > 1 else PING.format(id=winners[0][1]["author"])
                names = ", ".join(
                    f'{win[1]["name"]} ({PING.format(id=win[1]["author"])})'
                    for win in winners)

                # display winners, their names and scores
                await channel.send(
                    f"Congratulations {congratulations}!\n"
                    f"You have a score of {score}!\n"
                    f"Your name{ 's were' if len(winners) > 1 else 'was'}:\n{names}"
                )

                # Send random party emojis
                party = (random.choice([":partying_face:", ":tada:"])
                         for _ in range(random.randint(1, 10)))
                await channel.send(" ".join(party))

            async with self.checking_messages:  # Acquire the lock to delete the messages
                await self.messages.clear()  # reset the messages

        # send the next name
        self.name = f"{random.choice(FIRST_NAMES)} {random.choice(LAST_NAMES)}"
        await self.data.set("name", self.name)

        await channel.send(
            "Let's move on to the next name!\nAnd the next name is...\n"
            f"**{self.name}**!\nTry to spookify that... :smirk:")

        self.poll = False  # accepting responses

    @announce_name.before_loop
    async def wait_till_scheduled_time(self) -> None:
        """Waits till the next day's 12PM if crossed it, otherwise waits till the same day's 12PM."""
        if SpookyNameRate.debug:
            return

        now = datetime.utcnow()
        if now.hour < 12:
            twelve_pm = now.replace(hour=12, minute=0, second=0, microsecond=0)
            time_left = twelve_pm - now
            await asyncio.sleep(time_left.seconds)
            return

        tomorrow_12pm = now + timedelta(days=1)
        tomorrow_12pm = tomorrow_12pm.replace(hour=12,
                                              minute=0,
                                              second=0,
                                              microsecond=0)
        await asyncio.sleep((tomorrow_12pm - now).seconds)

    async def get_responses_list(self, final: bool = False) -> Embed:
        """Returns an embed containing the responses of the people."""
        channel = await self.get_channel()

        embed = Embed(color=Colour.red())

        if await self.messages.items():
            if final:
                embed.title = "Spooky Name Rate is about to end!"
                embed.description = (
                    "This Spooky Name Rate round is about to end in 2 hours! You can review "
                    "the entries below! Have you rated other's names?")
            else:
                embed.title = "All the spookified names!"
                embed.description = "See a list of all the entries entered by everyone!"
        else:
            embed.title = "No one has added an entry yet..."

        for message_id, data in await self.messages.items():
            data = json.loads(data)

            embed.add_field(
                name=(self.bot.get_user(data["author"])
                      or await self.bot.fetch_user(data["author"])).name,
                value=
                f"[{(data)['name']}](https://discord.com/channels/{Client.guild}/{channel.id}/{message_id})",
            )

        return embed

    async def get_channel(self) -> Optional[TextChannel]:
        """Gets the sir-lancebot-channel after waiting until ready."""
        await self.bot.wait_until_ready()
        channel = self.bot.get_channel(
            Channels.community_bot_commands) or await self.bot.fetch_channel(
                Channels.community_bot_commands)
        if not channel:
            logger.warning(
                "Bot is unable to get the #seasonalbot-commands channel. Please check the channel ID."
            )
        return channel

    @staticmethod
    def in_allowed_month() -> bool:
        """Returns whether running in the limited month."""
        if SpookyNameRate.debug:
            return True

        if not Client.month_override:
            return datetime.utcnow().month == Month.OCTOBER
        return Client.month_override == Month.OCTOBER

    def cog_check(self, ctx: Context) -> bool:
        """A command to check whether the command is being called in October."""
        if not self.in_allowed_month():
            raise InMonthCheckFailure(
                "You can only use these commands in October!")
        return True

    def cog_unload(self) -> None:
        """Stops the announce_name task."""
        self.announce_name.cancel()
Exemple #4
0
class ModPings(Cog):
    """Commands for a moderator to turn moderator pings on and off."""

    # RedisCache[discord.Member.id, 'Naïve ISO 8601 string']
    # The cache's keys are mods who have pings off.
    # The cache's values are the times when the role should be re-applied to them, stored in ISO format.
    pings_off_mods = RedisCache()

    # RedisCache[discord.Member.id, 'start timestamp|total worktime in seconds']
    # The cache's keys are mod's ID
    # The cache's values are their pings on schedule timestamp and the total seconds (work time) until pings off
    modpings_schedule = RedisCache()

    def __init__(self, bot: Bot):
        self.bot = bot
        self._role_scheduler = Scheduler("ModPingsOnOff")
        self._modpings_scheduler = Scheduler("ModPingsSchedule")

        self.guild = None
        self.moderators_role = None

        self.modpings_schedule_task = scheduling.create_task(
            self.reschedule_modpings_schedule(), event_loop=self.bot.loop)
        self.reschedule_task = scheduling.create_task(
            self.reschedule_roles(),
            name="mod-pings-reschedule",
            event_loop=self.bot.loop,
        )

    async def reschedule_roles(self) -> None:
        """Reschedule moderators role re-apply times."""
        await self.bot.wait_until_guild_available()
        self.guild = self.bot.get_guild(Guild.id)
        self.moderators_role = self.guild.get_role(Roles.moderators)

        mod_team = self.guild.get_role(Roles.mod_team)
        pings_on = self.moderators_role.members
        pings_off = await self.pings_off_mods.to_dict()

        log.trace(
            "Applying the moderators role to the mod team where necessary.")
        for mod in mod_team.members:
            if mod in pings_on:  # Make sure that on-duty mods aren't in the cache.
                if mod.id in pings_off:
                    await self.pings_off_mods.delete(mod.id)
                continue

            # Keep the role off only for those in the cache.
            if mod.id not in pings_off:
                await self.reapply_role(mod)
            else:
                expiry = isoparse(pings_off[mod.id])
                self._role_scheduler.schedule_at(expiry, mod.id,
                                                 self.reapply_role(mod))

    async def reschedule_modpings_schedule(self) -> None:
        """Reschedule moderators schedule ping."""
        await self.bot.wait_until_guild_available()
        schedule_cache = await self.modpings_schedule.to_dict()

        log.info(
            "Scheduling modpings schedule for applicable moderators found in cache."
        )
        for mod_id, schedule in schedule_cache.items():
            start_timestamp, work_time = schedule.split("|")
            start = datetime.datetime.fromtimestamp(float(start_timestamp))

            mod = await self.bot.fetch_user(mod_id)
            self._modpings_scheduler.schedule_at(
                start, mod_id, self.add_role_schedule(mod, work_time, start))

    async def remove_role_schedule(self, mod: Member, work_time: int,
                                   schedule_start: datetime.datetime) -> None:
        """Removes the moderator's role to the given moderator."""
        log.trace(f"Removing moderator role from mod with ID {mod.id}")
        await mod.remove_roles(self.moderators_role,
                               reason="Moderator schedule time expired.")

        # Remove the task before scheduling it again
        self._modpings_scheduler.cancel(mod.id)

        # Add the task again
        log.trace(
            f"Adding mod pings schedule task again for mod with ID {mod.id}")
        schedule_start += datetime.timedelta(days=1)
        self._modpings_scheduler.schedule_at(
            schedule_start, mod.id,
            self.add_role_schedule(mod, work_time, schedule_start))

    async def add_role_schedule(self, mod: Member, work_time: int,
                                schedule_start: datetime.datetime) -> None:
        """Adds the moderator's role to the given moderator."""
        # If the moderator has pings off, then skip adding role
        if mod.id in await self.pings_off_mods.to_dict():
            log.trace(
                f"Skipping adding moderator role to mod with ID {mod.id} - found in pings off cache."
            )
        else:
            log.trace(f"Applying moderator role to mod with ID {mod.id}")
            await mod.add_roles(self.moderators_role,
                                reason="Moderator scheduled time started!")

        log.trace(
            f"Sleeping for {work_time} seconds, worktime for mod with ID {mod.id}"
        )
        await asyncio.sleep(work_time)
        await self.remove_role_schedule(mod, work_time, schedule_start)

    async def reapply_role(self, mod: Member) -> None:
        """Reapply the moderator's role to the given moderator."""
        log.trace(f"Re-applying role to mod with ID {mod.id}.")
        await mod.add_roles(self.moderators_role,
                            reason="Pings off period expired.")
        await self.pings_off_mods.delete(mod.id)

    @group(name='modpings', aliases=('modping', ), invoke_without_command=True)
    @has_any_role(*MODERATION_ROLES)
    async def modpings_group(self, ctx: Context) -> None:
        """Allow the removal and re-addition of the pingable moderators role."""
        await ctx.send_help(ctx.command)

    @modpings_group.command(name='off')
    @has_any_role(*MODERATION_ROLES)
    async def off_command(self, ctx: Context, duration: Expiry) -> None:
        """
        Temporarily removes the pingable moderators role for a set amount of time.

        A unit of time should be appended to the duration.
        Units (∗case-sensitive):
        \u2003`y` - years
        \u2003`m` - months∗
        \u2003`w` - weeks
        \u2003`d` - days
        \u2003`h` - hours
        \u2003`M` - minutes∗
        \u2003`s` - seconds

        Alternatively, an ISO 8601 timestamp can be provided for the duration.

        The duration cannot be longer than 30 days.
        """
        delta = duration - arrow.utcnow()
        if delta > datetime.timedelta(days=30):
            await ctx.send(
                ":x: Cannot remove the role for longer than 30 days.")
            return

        mod = ctx.author

        until_date = duration.replace(
            microsecond=0).isoformat()  # Looks noisy with microseconds.
        await mod.remove_roles(self.moderators_role,
                               reason=f"Turned pings off until {until_date}.")

        await self.pings_off_mods.set(mod.id, duration.isoformat())

        # Allow rescheduling the task without cancelling it separately via the `on` command.
        if mod.id in self._role_scheduler:
            self._role_scheduler.cancel(mod.id)
        self._role_scheduler.schedule_at(duration, mod.id,
                                         self.reapply_role(mod))

        embed = Embed(timestamp=duration, colour=Colours.bright_green)
        embed.set_footer(text="Moderators role has been removed until",
                         icon_url=Icons.green_checkmark)
        await ctx.send(embed=embed)

    @modpings_group.command(name='on')
    @has_any_role(*MODERATION_ROLES)
    async def on_command(self, ctx: Context) -> None:
        """Re-apply the pingable moderators role."""
        mod = ctx.author
        if mod in self.moderators_role.members:
            await ctx.send(":question: You already have the role.")
            return

        await mod.add_roles(self.moderators_role,
                            reason="Pings off period canceled.")

        await self.pings_off_mods.delete(mod.id)

        # We assume the task exists. Lack of it may indicate a bug.
        self._role_scheduler.cancel(mod.id)

        await ctx.send(
            f"{Emojis.check_mark} Moderators role has been re-applied.")

    @modpings_group.group(name='schedule',
                          aliases=('s', ),
                          invoke_without_command=True)
    @has_any_role(*MODERATION_ROLES)
    async def schedule_modpings(self, ctx: Context, start: str,
                                end: str) -> None:
        """Schedule modpings role to be added at <start> and removed at <end> everyday at UTC time!"""
        start, end = dateutil_parse(start), dateutil_parse(end)

        if end < start:
            end += datetime.timedelta(days=1)

        if (end - start) > datetime.timedelta(hours=MAXIMUM_WORK_LIMIT):
            await ctx.send(
                f":x: {ctx.author.mention} You can't have the modpings role for"
                f" more than {MAXIMUM_WORK_LIMIT} hours!")
            return

        if start < datetime.datetime.utcnow():
            # The datetime has already gone for the day, so make it tomorrow
            # otherwise the scheduler would schedule it immediately
            start += datetime.timedelta(days=1)

        work_time = (end - start).total_seconds()

        await self.modpings_schedule.set(ctx.author.id,
                                         f"{start.timestamp()}|{work_time}")

        if ctx.author.id in self._modpings_scheduler:
            self._modpings_scheduler.cancel(ctx.author.id)

        self._modpings_scheduler.schedule_at(
            start, ctx.author.id,
            self.add_role_schedule(ctx.author, work_time, start))

        await ctx.send(
            f"{Emojis.ok_hand} {ctx.author.mention} Scheduled mod pings from "
            f"{discord_timestamp(start, TimestampFormats.TIME)} to "
            f"{discord_timestamp(end, TimestampFormats.TIME)}!")

    @schedule_modpings.command(name='delete', aliases=('del', 'd'))
    async def modpings_schedule_delete(self, ctx: Context) -> None:
        """Delete your modpings schedule."""
        self._modpings_scheduler.cancel(ctx.author.id)
        await self.modpings_schedule.delete(ctx.author.id)
        await ctx.send(
            f"{Emojis.ok_hand} {ctx.author.mention} Deleted your modpings schedule!"
        )

    def cog_unload(self) -> None:
        """Cancel role tasks when the cog unloads."""
        log.trace("Cog unload: canceling role tasks.")
        self.reschedule_task.cancel()
        self._role_scheduler.cancel_all()

        self.modpings_schedule_task.cancel()
        self._modpings_scheduler.cancel_all()
Exemple #5
0
class Defcon(Cog):
    """Time-sensitive server defense mechanisms."""

    # RedisCache[str, str]
    # The cache's keys are "threshold" and "expiry".
    # The caches' values are strings formatted as valid input to the DurationDelta converter, or empty when off.
    defcon_settings = RedisCache()

    def __init__(self, bot: Bot):
        self.bot = bot
        self.channel = None
        self.threshold = relativedelta(days=0)
        self.expiry = None

        self.scheduler = Scheduler(self.__class__.__name__)

        self.bot.loop.create_task(self._sync_settings())

    @property
    def mod_log(self) -> ModLog:
        """Get currently loaded ModLog cog instance."""
        return self.bot.get_cog("ModLog")

    @defcon_settings.atomic_transaction
    async def _sync_settings(self) -> None:
        """On cog load, try to synchronize DEFCON settings to the API."""
        log.trace("Waiting for the guild to become available before syncing.")
        await self.bot.wait_until_guild_available()
        self.channel = await self.bot.fetch_channel(Channels.defcon)

        log.trace("Syncing settings.")

        try:
            settings = await self.defcon_settings.to_dict()
            self.threshold = parse_duration_string(
                settings["threshold"]) if settings.get("threshold") else None
            self.expiry = datetime.fromisoformat(
                settings["expiry"]) if settings.get("expiry") else None
        except RedisError:
            log.exception("Unable to get DEFCON settings!")
            await self.channel.send(
                f"<@&{Roles.moderators}> <@&{Roles.devops}> **WARNING**: Unable to get DEFCON settings!"
                f"\n\n```{traceback.format_exc()}```")

        else:
            if self.expiry:
                self.scheduler.schedule_at(self.expiry, 0,
                                           self._remove_threshold())

            self._update_notifier()
            log.info(
                f"DEFCON synchronized: {humanize_delta(self.threshold) if self.threshold else '-'}"
            )

        self._update_channel_topic()

    @Cog.listener()
    async def on_member_join(self, member: Member) -> None:
        """Check newly joining users to see if they meet the account age threshold."""
        if self.threshold:
            now = datetime.utcnow()

            if now - member.created_at < relativedelta_to_timedelta(
                    self.threshold):
                log.info(f"Rejecting user {member}: Account is too new")

                message_sent = False

                try:
                    await member.send(
                        REJECTION_MESSAGE.format(user=member.mention))

                    message_sent = True
                except Exception:
                    log.exception(
                        f"Unable to send rejection message to user: {member}")

                await member.kick(reason="DEFCON active, user is too new")
                self.bot.stats.incr("defcon.leaves")

                message = (
                    f"{format_user(member)} was denied entry because their account is too new."
                )

                if not message_sent:
                    message = f"{message}\n\nUnable to send rejection message via DM; they probably have DMs disabled."

                await self.mod_log.send_log_message(
                    Icons.defcon_denied, Colours.soft_red, "Entry denied",
                    message, member.avatar_url_as(static_format="png"))

    @group(name='defcon', aliases=('dc', ), invoke_without_command=True)
    @has_any_role(*MODERATION_ROLES)
    async def defcon_group(self, ctx: Context) -> None:
        """Check the DEFCON status or run a subcommand."""
        await ctx.send_help(ctx.command)

    @defcon_group.command(aliases=('s', ))
    @has_any_role(*MODERATION_ROLES)
    async def status(self, ctx: Context) -> None:
        """Check the current status of DEFCON mode."""
        embed = Embed(colour=Colour.blurple(),
                      title="DEFCON Status",
                      description=f"""
                **Threshold:** {humanize_delta(self.threshold) if self.threshold else "-"}
                **Expires in:** {humanize_delta(relativedelta(self.expiry, datetime.utcnow())) if self.expiry else "-"}
                **Verification level:** {ctx.guild.verification_level.name}
                """)

        await ctx.send(embed=embed)

    @defcon_group.command(aliases=('t', 'd'))
    @has_any_role(*MODERATION_ROLES)
    async def threshold(self,
                        ctx: Context,
                        threshold: Union[DurationDelta, int],
                        expiry: Optional[Expiry] = None) -> None:
        """
        Set how old an account must be to join the server.

        The threshold is the minimum required account age. Can accept either a duration string or a number of days.
        Set it to 0 to have no threshold.
        The expiry allows to automatically remove the threshold after a designated time. If no expiry is specified,
        the cog will remind to remove the threshold hourly.
        """
        if isinstance(threshold, int):
            threshold = relativedelta(days=threshold)
        await self._update_threshold(ctx.author,
                                     threshold=threshold,
                                     expiry=expiry)

    @defcon_group.command()
    @has_any_role(Roles.admins)
    async def shutdown(self, ctx: Context) -> None:
        """Shut down the server by setting send permissions of everyone to False."""
        role = ctx.guild.default_role
        permissions = role.permissions

        permissions.update(send_messages=False, add_reactions=False)
        await role.edit(reason="DEFCON shutdown", permissions=permissions)
        await ctx.send(
            f"{Action.SERVER_SHUTDOWN.value.emoji} Server shut down.")

    @defcon_group.command()
    @has_any_role(Roles.admins)
    async def unshutdown(self, ctx: Context) -> None:
        """Open up the server again by setting send permissions of everyone to None."""
        role = ctx.guild.default_role
        permissions = role.permissions

        permissions.update(send_messages=True, add_reactions=True)
        await role.edit(reason="DEFCON unshutdown", permissions=permissions)
        await ctx.send(f"{Action.SERVER_OPEN.value.emoji} Server reopened.")

    def _update_channel_topic(self) -> None:
        """Update the #defcon channel topic with the current DEFCON status."""
        new_topic = f"{BASE_CHANNEL_TOPIC}\n(Threshold: {humanize_delta(self.threshold) if self.threshold else '-'})"

        self.mod_log.ignore(Event.guild_channel_update, Channels.defcon)
        asyncio.create_task(self.channel.edit(topic=new_topic))

    @defcon_settings.atomic_transaction
    async def _update_threshold(self,
                                author: User,
                                threshold: relativedelta,
                                expiry: Optional[Expiry] = None) -> None:
        """Update the new threshold in the cog, cache, defcon channel, and logs, and additionally schedule expiry."""
        self.threshold = threshold
        if threshold == relativedelta(
                days=0
        ):  # If the threshold is 0, we don't need to schedule anything
            expiry = None
        self.expiry = expiry

        # Either way, we cancel the old task.
        self.scheduler.cancel_all()
        if self.expiry is not None:
            self.scheduler.schedule_at(expiry, 0, self._remove_threshold())

        self._update_notifier()

        # Make sure to handle the critical part of the update before writing to Redis.
        error = ""
        try:
            await self.defcon_settings.update({
                'threshold':
                Defcon._stringify_relativedelta(self.threshold)
                if self.threshold else "",
                'expiry':
                expiry.isoformat() if expiry else 0
            })
        except RedisError:
            error = ", but failed to write to cache"

        action = Action.DURATION_UPDATE

        expiry_message = ""
        if expiry:
            expiry_message = f" for the next {humanize_delta(relativedelta(expiry, datetime.utcnow()), max_units=2)}"

        if self.threshold:
            channel_message = (
                f"updated; accounts must be {humanize_delta(self.threshold)} "
                f"old to join the server{expiry_message}")
        else:
            channel_message = "removed"

        await self.channel.send(
            f"{action.value.emoji} DEFCON threshold {channel_message}{error}.")
        await self._send_defcon_log(action, author)
        self._update_channel_topic()

        self._log_threshold_stat(threshold)

    async def _remove_threshold(self) -> None:
        """Resets the threshold back to 0."""
        await self._update_threshold(self.bot.user, relativedelta(days=0))

    @staticmethod
    def _stringify_relativedelta(delta: relativedelta) -> str:
        """Convert a relativedelta object to a duration string."""
        units = [("years", "y"), ("months", "m"), ("days", "d"),
                 ("hours", "h"), ("minutes", "m"), ("seconds", "s")]
        return "".join(f"{getattr(delta, unit)}{symbol}"
                       for unit, symbol in units
                       if getattr(delta, unit)) or "0s"

    def _log_threshold_stat(self, threshold: relativedelta) -> None:
        """Adds the threshold to the bot stats in days."""
        threshold_days = relativedelta_to_timedelta(
            threshold).total_seconds() / SECONDS_IN_DAY
        self.bot.stats.gauge("defcon.threshold", threshold_days)

    async def _send_defcon_log(self, action: Action, actor: User) -> None:
        """Send log message for DEFCON action."""
        info = action.value
        log_msg: str = (
            f"**Staffer:** {actor.mention} {actor} (`{actor.id}`)\n"
            f"{info.template.format(threshold=(humanize_delta(self.threshold) if self.threshold else '-'))}"
        )
        status_msg = f"DEFCON {action.name.lower()}"

        await self.mod_log.send_log_message(info.icon, info.color, status_msg,
                                            log_msg)

    def _update_notifier(self) -> None:
        """Start or stop the notifier according to the DEFCON status."""
        if self.threshold and self.expiry is None and not self.defcon_notifier.is_running(
        ):
            log.info("DEFCON notifier started.")
            self.defcon_notifier.start()

        elif (not self.threshold or self.expiry
              is not None) and self.defcon_notifier.is_running():
            log.info("DEFCON notifier stopped.")
            self.defcon_notifier.cancel()

    @tasks.loop(hours=1)
    async def defcon_notifier(self) -> None:
        """Routinely notify moderators that DEFCON is active."""
        await self.channel.send(
            f"Defcon is on and is set to {humanize_delta(self.threshold)}.")

    def cog_unload(self) -> None:
        """Cancel the notifer and threshold removal tasks when the cog unloads."""
        log.trace("Cog unload: canceling defcon notifier task.")
        self.defcon_notifier.cancel()
        self.scheduler.cancel_all()
class HacktoberStats(commands.Cog):
    """Hacktoberfest statistics Cog."""

    # Stores mapping of user IDs and GitHub usernames
    linked_accounts = RedisCache()

    def __init__(self, bot: Bot):
        self.bot = bot

    @in_month(Month.SEPTEMBER, Month.OCTOBER, Month.NOVEMBER)
    @commands.group(name="hacktoberstats",
                    aliases=("hackstats", ),
                    invoke_without_command=True)
    async def hacktoberstats_group(self,
                                   ctx: commands.Context,
                                   github_username: str = None) -> None:
        """
        Display an embed for a user's Hacktoberfest contributions.

        If invoked without a subcommand or github_username, get the invoking user's stats if they've
        linked their Discord name to GitHub using .stats link. If invoked with a github_username,
        get that user's contributions
        """
        if not github_username:
            author_id, author_mention = self._author_mention_from_context(ctx)

            if await self.linked_accounts.contains(author_id):
                github_username = await self.linked_accounts.get(author_id)
                logging.info(
                    f"Getting stats for {author_id} linked GitHub account '{github_username}'"
                )
            else:
                msg = (
                    f"{author_mention}, you have not linked a GitHub account\n\n"
                    f"You can link your GitHub account using:\n```\n{ctx.prefix}hackstats link github_username\n```\n"
                    f"Or query GitHub stats directly using:\n```\n{ctx.prefix}hackstats github_username\n```"
                )
                await ctx.send(msg)
                return

        await self.get_stats(ctx, github_username)

    @in_month(Month.SEPTEMBER, Month.OCTOBER, Month.NOVEMBER)
    @hacktoberstats_group.command(name="link")
    async def link_user(self,
                        ctx: commands.Context,
                        github_username: str = None) -> None:
        """
        Link the invoking user's Github github_username to their Discord ID.

        Linked users are stored in Redis: User ID => GitHub Username.
        """
        author_id, author_mention = self._author_mention_from_context(ctx)
        if github_username:
            if await self.linked_accounts.contains(author_id):
                old_username = await self.linked_accounts.get(author_id)
                log.info(
                    f"{author_id} has changed their github link from '{old_username}' to '{github_username}'"
                )
                await ctx.send(
                    f"{author_mention}, your GitHub username has been updated to: '{github_username}'"
                )
            else:
                log.info(
                    f"{author_id} has added a github link to '{github_username}'"
                )
                await ctx.send(
                    f"{author_mention}, your GitHub username has been added")

            await self.linked_accounts.set(author_id, github_username)
        else:
            log.info(
                f"{author_id} tried to link a GitHub account but didn't provide a username"
            )
            await ctx.send(
                f"{author_mention}, a GitHub username is required to link your account"
            )

    @in_month(Month.SEPTEMBER, Month.OCTOBER, Month.NOVEMBER)
    @hacktoberstats_group.command(name="unlink")
    async def unlink_user(self, ctx: commands.Context) -> None:
        """Remove the invoking user's account link from the log."""
        author_id, author_mention = self._author_mention_from_context(ctx)

        stored_user = await self.linked_accounts.pop(author_id, None)
        if stored_user:
            await ctx.send(
                f"{author_mention}, your GitHub profile has been unlinked")
            logging.info(f"{author_id} has unlinked their GitHub account")
        else:
            await ctx.send(
                f"{author_mention}, you do not currently have a linked GitHub account"
            )
            logging.info(
                f"{author_id} tried to unlink their GitHub account but no account was linked"
            )

    async def get_stats(self, ctx: commands.Context,
                        github_username: str) -> None:
        """
        Query GitHub's API for PRs created by a GitHub user during the month of October.

        PRs with an 'invalid' or 'spam' label are ignored

        For PRs created after October 3rd, they have to be in a repository that has a
        'hacktoberfest' topic, unless the PR is labelled 'hacktoberfest-accepted' for it
        to count.

        If a valid github_username is provided, an embed is generated and posted to the channel

        Otherwise, post a helpful error message
        """
        async with ctx.typing():
            prs = await self.get_october_prs(github_username)

            if prs is None:  # Will be None if the user was not found
                await ctx.send(embed=discord.Embed(
                    title=random.choice(NEGATIVE_REPLIES),
                    description=
                    f"GitHub user `{github_username}` was not found.",
                    colour=discord.Colour.red()))
                return

            if prs:
                stats_embed = await self.build_embed(github_username, prs)
                await ctx.send("Here are some stats!", embed=stats_embed)
            else:
                await ctx.send(
                    f"No valid Hacktoberfest PRs found for '{github_username}'"
                )

    async def build_embed(self, github_username: str,
                          prs: list[dict]) -> discord.Embed:
        """Return a stats embed built from github_username's PRs."""
        logging.info(
            f"Building Hacktoberfest embed for GitHub user: '******'"
        )
        in_review, accepted = await self._categorize_prs(prs)

        n = len(accepted) + len(in_review)  # Total number of PRs
        if n >= PRS_FOR_SHIRT:
            shirtstr = f"**{github_username} is eligible for a T-shirt or a tree!**"
        elif n == PRS_FOR_SHIRT - 1:
            shirtstr = f"**{github_username} is 1 PR away from a T-shirt or a tree!**"
        else:
            shirtstr = f"**{github_username} is {PRS_FOR_SHIRT - n} PRs away from a T-shirt or a tree!**"

        stats_embed = discord.Embed(
            title=f"{github_username}'s Hacktoberfest",
            color=Colours.purple,
            description=(f"{github_username} has made {n} valid "
                         f"{self._contributionator(n)} in "
                         f"October\n\n"
                         f"{shirtstr}\n\n"))

        stats_embed.set_thumbnail(
            url=f"https://www.github.com/{github_username}.png")
        stats_embed.set_author(
            name="Hacktoberfest",
            url="https://hacktoberfest.digitalocean.com",
            icon_url=
            "https://avatars1.githubusercontent.com/u/35706162?s=200&v=4")

        # This will handle when no PRs in_review or accepted
        review_str = self._build_prs_string(in_review,
                                            github_username) or "None"
        accepted_str = self._build_prs_string(accepted,
                                              github_username) or "None"
        stats_embed.add_field(name=":clock1: In Review", value=review_str)
        stats_embed.add_field(name=":tada: Accepted", value=accepted_str)

        logging.info(
            f"Hacktoberfest PR built for GitHub user '{github_username}'")
        return stats_embed

    async def get_october_prs(self,
                              github_username: str) -> Optional[list[dict]]:
        """
        Query GitHub's API for PRs created during the month of October by github_username.

        PRs with an 'invalid' or 'spam' label are ignored unless it is merged or approved

        For PRs created after October 3rd, they have to be in a repository that has a
        'hacktoberfest' topic, unless the PR is labelled 'hacktoberfest-accepted' for it
        to count.

        If PRs are found, return a list of dicts with basic PR information

        For each PR:
        {
            "repo_url": str
            "repo_shortname": str (e.g. "python-discord/sir-lancebot")
            "created_at": datetime.datetime
            "number": int
        }

        Otherwise, return empty list.
        None will be returned when the GitHub user was not found.
        """
        log.info(
            f"Fetching Hacktoberfest Stats for GitHub user: '******'"
        )
        base_url = "https://api.github.com/search/issues"
        action_type = "pr"
        is_query = "public"
        not_query = "draft"
        date_range = f"{CURRENT_YEAR}-09-30T10:00Z..{CURRENT_YEAR}-11-01T12:00Z"
        per_page = "300"
        query_params = (f"+type:{action_type}"
                        f"+is:{is_query}"
                        f"+author:{quote_plus(github_username)}"
                        f"+-is:{not_query}"
                        f"+created:{date_range}"
                        f"&per_page={per_page}")

        log.debug(f"GitHub query parameters generated: {query_params}")

        jsonresp = await self._fetch_url(base_url, REQUEST_HEADERS,
                                         {"q": query_params})
        if "message" in jsonresp:
            # One of the parameters is invalid, short circuit for now
            api_message = jsonresp["errors"][0]["message"]

            # Ignore logging non-existent users or users we do not have permission to see
            if api_message == GITHUB_NONEXISTENT_USER_MESSAGE:
                log.debug(f"No GitHub user found named '{github_username}'")
                return
            else:
                log.error(
                    f"GitHub API request for '{github_username}' failed with message: {api_message}"
                )
            return []  # No October PRs were found due to error

        if jsonresp["total_count"] == 0:
            # Short circuit if there aren't any PRs
            log.info(
                f"No October PRs found for GitHub user: '******'")
            return []

        logging.info(
            f"Found {len(jsonresp['items'])} Hacktoberfest PRs for GitHub user: '******'"
        )
        outlist = []  # list of pr information dicts that will get returned
        oct3 = datetime(int(CURRENT_YEAR), 10, 3, 23, 59, 59, tzinfo=None)
        hackto_topics = {
        }  # cache whether each repo has the appropriate topic (bool values)
        for item in jsonresp["items"]:
            shortname = self._get_shortname(item["repository_url"])
            itemdict = {
                "repo_url":
                f"https://www.github.com/{shortname}",
                "repo_shortname":
                shortname,
                "created_at":
                datetime.strptime(item["created_at"], "%Y-%m-%dT%H:%M:%SZ"),
                "number":
                item["number"]
            }

            # If the PR has 'invalid' or 'spam' labels, the PR must be
            # either merged or approved for it to be included
            if self._has_label(item, ["invalid", "spam"]):
                if not await self._is_accepted(itemdict):
                    continue

            # PRs before oct 3 no need to check for topics
            # continue the loop if 'hacktoberfest-accepted' is labelled then
            # there is no need to check for its topics
            if itemdict["created_at"] < oct3:
                outlist.append(itemdict)
                continue

            # Checking PR's labels for "hacktoberfest-accepted"
            if self._has_label(item, "hacktoberfest-accepted"):
                outlist.append(itemdict)
                continue

            # No need to query GitHub if repo topics are fetched before already
            if hackto_topics.get(shortname):
                outlist.append(itemdict)
                continue
            # Fetch topics for the PR's repo
            topics_query_url = f"https://api.github.com/repos/{shortname}/topics"
            log.debug(
                f"Fetching repo topics for {shortname} with url: {topics_query_url}"
            )
            jsonresp2 = await self._fetch_url(topics_query_url,
                                              GITHUB_TOPICS_ACCEPT_HEADER)
            if jsonresp2.get("names") is None:
                log.error(
                    f"Error fetching topics for {shortname}: {jsonresp2['message']}"
                )
                continue  # Assume the repo doesn't have the `hacktoberfest` topic if API  request errored

            # PRs after oct 3 that doesn't have 'hacktoberfest-accepted' label
            # must be in repo with 'hacktoberfest' topic
            if "hacktoberfest" in jsonresp2["names"]:
                hackto_topics[
                    shortname] = True  # Cache result in the dict for later use if needed
                outlist.append(itemdict)
        return outlist

    async def _fetch_url(self, url: str, headers: dict, params: dict) -> dict:
        """Retrieve API response from URL."""
        async with self.bot.http_session.get(url,
                                             headers=headers,
                                             params=params) as resp:
            return await resp.json()

    @staticmethod
    def _has_label(pr: dict, labels: Union[list[str], str]) -> bool:
        """
        Check if a PR has label 'labels'.

        'labels' can be a string or a list of strings, if it's a list of strings
        it will return true if any of the labels match.
        """
        if not pr.get("labels"):  # if PR has no labels
            return False
        if isinstance(labels, str) and any(label["name"].casefold() == labels
                                           for label in pr["labels"]):
            return True
        for item in labels:
            if any(label["name"].casefold() == item for label in pr["labels"]):
                return True
        return False

    async def _is_accepted(self, pr: dict) -> bool:
        """Check if a PR is merged, approved, or labelled hacktoberfest-accepted."""
        # checking for merge status
        query_url = f"https://api.github.com/repos/{pr['repo_shortname']}/pulls/{pr['number']}"
        jsonresp = await self._fetch_url(query_url, REQUEST_HEADERS)

        if message := jsonresp.get("message"):
            log.error(
                f"Error fetching PR stats for #{pr['number']} in repo {pr['repo_shortname']}:\n{message}"
            )
            return False

        if jsonresp.get("merged"):
            return True

        # checking for the label, using `jsonresp` which has the label information
        if self._has_label(jsonresp, "hacktoberfest-accepted"):
            return True

        # checking approval
        query_url += "/reviews"
        jsonresp2 = await self._fetch_url(query_url, REQUEST_HEADERS)
        if isinstance(jsonresp2, dict):
            # if API request is unsuccessful it will be a dict with the error in 'message'
            log.error(
                f"Error fetching PR reviews for #{pr['number']} in repo {pr['repo_shortname']}:\n"
                f"{jsonresp2['message']}")
            return False
        # if it is successful it will be a list instead of a dict
        if len(jsonresp2) == 0:  # if PR has no reviews
            return False

        # loop through reviews and check for approval
        for item in jsonresp2:
            if item.get("status") == "APPROVED":
                return True
        return False
Exemple #7
0
class Filtering(Cog):
    """Filtering out invites, blacklisting domains, and warning us of certain regular expressions."""

    # Redis cache mapping a user ID to the last timestamp a bad nickname alert was sent
    name_alerts = RedisCache()

    def __init__(self, bot: Bot):
        self.bot = bot
        self.scheduler = scheduling.Scheduler(self.__class__.__name__)
        self.name_lock = asyncio.Lock()

        staff_mistake_str = "If you believe this was a mistake, please let staff know!"
        self.filters = {
            "filter_zalgo": {
                "enabled":
                Filter.filter_zalgo,
                "function":
                self._has_zalgo,
                "type":
                "filter",
                "content_only":
                True,
                "user_notification":
                Filter.notify_user_zalgo,
                "notification_msg":
                ("Your post has been removed for abusing Unicode character rendering (aka Zalgo text). "
                 f"{staff_mistake_str}"),
                "schedule_deletion":
                False
            },
            "filter_invites": {
                "enabled":
                Filter.filter_invites,
                "function":
                self._has_invites,
                "type":
                "filter",
                "content_only":
                True,
                "user_notification":
                Filter.notify_user_invites,
                "notification_msg":
                (f"Per Rule 6, your invite link has been removed. {staff_mistake_str}\n\n"
                 r"Our server rules can be found here: <https://pythondiscord.com/pages/rules>"
                 ),
                "schedule_deletion":
                False
            },
            "filter_domains": {
                "enabled":
                Filter.filter_domains,
                "function":
                self._has_urls,
                "type":
                "filter",
                "content_only":
                True,
                "user_notification":
                Filter.notify_user_domains,
                "notification_msg":
                (f"Your URL has been removed because it matched a blacklisted domain. {staff_mistake_str}"
                 ),
                "schedule_deletion":
                False
            },
            "watch_regex": {
                "enabled": Filter.watch_regex,
                "function": self._has_watch_regex_match,
                "type": "watchlist",
                "content_only": True,
                "schedule_deletion": True
            },
            "watch_rich_embeds": {
                "enabled": Filter.watch_rich_embeds,
                "function": self._has_rich_embed,
                "type": "watchlist",
                "content_only": False,
                "schedule_deletion": False
            },
            "filter_everyone_ping": {
                "enabled":
                Filter.filter_everyone_ping,
                "function":
                self._has_everyone_ping,
                "type":
                "filter",
                "content_only":
                True,
                "user_notification":
                Filter.notify_user_everyone_ping,
                "notification_msg":
                ("Please don't try to ping `@everyone` or `@here`. "
                 f"Your message has been removed. {staff_mistake_str}"),
                "schedule_deletion":
                False,
                "ping_everyone":
                False
            },
        }

        scheduling.create_task(self.reschedule_offensive_msg_deletion(),
                               event_loop=self.bot.loop)

    def cog_unload(self) -> None:
        """Cancel scheduled tasks."""
        self.scheduler.cancel_all()

    def _get_filterlist_items(self, list_type: str, *, allowed: bool) -> list:
        """Fetch items from the filter_list_cache."""
        return self.bot.filter_list_cache[
            f"{list_type.upper()}.{allowed}"].keys()

    def _get_filterlist_value(self, list_type: str, value: Any, *,
                              allowed: bool) -> dict:
        """Fetch one specific value from filter_list_cache."""
        return self.bot.filter_list_cache[f"{list_type.upper()}.{allowed}"][
            value]

    @staticmethod
    def _expand_spoilers(text: str) -> str:
        """Return a string containing all interpretations of a spoilered message."""
        split_text = SPOILER_RE.split(text)
        return ''.join(split_text[0::2] + split_text[1::2] + split_text)

    @property
    def mod_log(self) -> ModLog:
        """Get currently loaded ModLog cog instance."""
        return self.bot.get_cog("ModLog")

    @Cog.listener()
    async def on_message(self, msg: Message) -> None:
        """Invoke message filter for new messages."""
        await self._filter_message(msg)

        # Ignore webhook messages.
        if msg.webhook_id is None:
            await self.check_bad_words_in_name(msg.author)

    @Cog.listener()
    async def on_message_edit(self, before: Message, after: Message) -> None:
        """
        Invoke message filter for message edits.

        Also calculates the time delta from the previous edit or when message was sent if there's no prior edits.
        """
        # We only care about changes to the message contents/attachments and embed additions, not pin status etc.
        if all((
                before.content == after.content,  # content hasn't changed
                before.attachments ==
                after.attachments,  # attachments haven't changed
                len(before.embeds) >= len(
                    after.embeds)  # embeds haven't been added
        )):
            return

        if not before.edited_at:
            delta = relativedelta(after.edited_at,
                                  before.created_at).microseconds
        else:
            delta = relativedelta(after.edited_at,
                                  before.edited_at).microseconds
        await self._filter_message(after, delta)

    def get_name_match(self, name: str) -> Optional[re.Match]:
        """Check bad words from passed string (name). Return the first match found."""
        normalised_name = unicodedata.normalize("NFKC", name)
        cleaned_normalised_name = "".join(
            [c for c in normalised_name if not unicodedata.combining(c)])

        # Run filters against normalised, cleaned normalised and the original name,
        # in case we have filters for one but not the other.
        names_to_check = (name, normalised_name, cleaned_normalised_name)

        watchlist_patterns = self._get_filterlist_items('filter_token',
                                                        allowed=False)
        for pattern in watchlist_patterns:
            for name in names_to_check:
                if match := re.search(pattern, name, flags=re.IGNORECASE):
                    return match
        return None
Exemple #8
0
class Metabase(Cog):
    """Commands for admins to interact with metabase."""

    session_info = RedisCache()

    def __init__(self, bot: Bot) -> None:
        self.bot = bot
        self._session_scheduler = Scheduler(self.__class__.__name__)

        self.session_token: Optional[
            str] = None  # session_info["session_token"]: str
        self.session_expiry: Optional[
            float] = None  # session_info["session_expiry"]: UtcPosixTimestamp
        self.headers = BASE_HEADERS

        self.exports: Dict[int, List[Dict]] = {
        }  # Saves the output of each question, so internal eval can access it

        self.init_task = scheduling.create_task(self.init_cog(),
                                                event_loop=self.bot.loop)

    async def cog_command_error(self, ctx: Context, error: Exception) -> None:
        """Handle ClientResponseError errors locally to invalidate token if needed."""
        if not isinstance(error.original, ClientResponseError):
            return

        if error.original.status == 403:
            # User doesn't have access to the given question
            log.warning(
                f"Failed to auth with Metabase for {error.original.url}.")
            await ctx.send(
                f":x: {ctx.author.mention} Failed to auth with Metabase for that question."
            )
        elif error.original.status == 404:
            await ctx.send(
                f":x: {ctx.author.mention} That question could not be found.")
        else:
            # User credentials are invalid, or the refresh failed.
            # Delete the expiry time, to force a refresh on next startup.
            await self.session_info.delete("session_expiry")
            log.exception("Session token is invalid or refresh failed.")
            await ctx.send(
                f":x: {ctx.author.mention} Session token is invalid or refresh failed."
            )
        error.handled = True

    async def init_cog(self) -> None:
        """Initialise the metabase session."""
        expiry_time = await self.session_info.get("session_expiry")
        if expiry_time:
            expiry_time = Arrow.utcfromtimestamp(expiry_time)

        if expiry_time is None or expiry_time < arrow.utcnow():
            # Force a refresh and end the task
            await self.refresh_session()
            return

        # Cached token is in date, so get it and schedule a refresh for later
        self.session_token = await self.session_info.get("session_token")
        self.headers["X-Metabase-Session"] = self.session_token

        self._session_scheduler.schedule_at(expiry_time, 0,
                                            self.refresh_session())

    async def refresh_session(self) -> None:
        """Refresh metabase session token."""
        data = {
            "username": MetabaseConfig.username,
            "password": MetabaseConfig.password
        }
        async with self.bot.http_session.post(
                f"{MetabaseConfig.base_url}/api/session", json=data) as resp:
            json_data = await resp.json()
            self.session_token = json_data.get("id")

        self.headers["X-Metabase-Session"] = self.session_token
        log.info("Successfully updated metabase session.")

        # When the creds are going to expire
        refresh_time = arrow.utcnow() + timedelta(
            minutes=MetabaseConfig.max_session_age)

        # Cache the session info, since login in heavily ratelimitted
        await self.session_info.set("session_token", self.session_token)
        await self.session_info.set("session_expiry", refresh_time.timestamp())

        self._session_scheduler.schedule_at(refresh_time, 0,
                                            self.refresh_session())

    @group(name="metabase", invoke_without_command=True)
    async def metabase_group(self, ctx: Context) -> None:
        """A group of commands for interacting with metabase."""
        await ctx.send_help(ctx.command)

    @metabase_group.command(name="extract", aliases=("export", ))
    async def metabase_extract(
            self,
            ctx: Context,
            question_id: int,
            extension: allowed_strings("csv", "json") = "csv") -> None:
        """
        Extract data from a metabase question.

        You can find the question_id at the end of the url on metabase.
        I.E. /question/{question_id}

        If, instead of an id, there is a long URL, make sure to save the question first.

        If you want to extract data from a question within a dashboard, click the
        question title at the top left of the chart to go directly to that page.

        Valid extensions are: csv and json.
        """
        await ctx.trigger_typing()

        # Make sure we have a session token before running anything
        await self.init_task

        url = f"{MetabaseConfig.base_url}/api/card/{question_id}/query/{extension}"

        async with self.bot.http_session.post(url,
                                              headers=self.headers,
                                              raise_for_status=True) as resp:
            if extension == "csv":
                out = await resp.text(encoding="utf-8")
                # Save the output for use with int e
                self.exports[question_id] = list(csv.DictReader(StringIO(out)))

            elif extension == "json":
                out = await resp.json(encoding="utf-8")
                # Save the output for use with int e
                self.exports[question_id] = out

                # Format it nicely for human eyes
                out = json.dumps(out, indent=4, sort_keys=True)

        paste_link = await send_to_paste_service(out, extension=extension)
        if paste_link:
            message = f":+1: {ctx.author.mention} Here's your link: {paste_link}"
        else:
            message = f":x: {ctx.author.mention} Link service is unavailible."
        await ctx.send(
            f"{message}\nYou can also access this data within internal eval by doing: "
            f"`bot.get_cog('Metabase').exports[{question_id}]`")

    @metabase_group.command(name="publish", aliases=("share", ))
    async def metabase_publish(self, ctx: Context, question_id: int) -> None:
        """Publically shares the given question and posts the link."""
        await ctx.trigger_typing()
        # Make sure we have a session token before running anything
        await self.init_task

        url = f"{MetabaseConfig.base_url}/api/card/{question_id}/public_link"

        async with self.bot.http_session.post(url,
                                              headers=self.headers,
                                              raise_for_status=True) as resp:
            response_json = await resp.json(encoding="utf-8")
            sharing_url = f"{MetabaseConfig.public_url}/public/question/{response_json['uuid']}"
            await ctx.send(
                f":+1: {ctx.author.mention} Here's your sharing link: {sharing_url}"
            )

    # This cannot be static (must have a __func__ attribute).
    async def cog_check(self, ctx: Context) -> bool:
        """Only allow admins inside moderator channels to invoke the commands in this cog."""
        checks = [
            await has_any_role(Roles.admins).predicate(ctx),
            is_mod_channel(ctx.channel)
        ]
        return all(checks)

    def cog_unload(self) -> None:
        """
        Cancel the init task and scheduled tasks.

        It's important to wait for init_task to be cancelled before cancelling scheduled
        tasks. Otherwise, it's possible for _session_scheduler to schedule another task
        after cancel_all has finished, despite _init_task.cancel being called first.
        This is cause cancel() on its own doesn't block until the task is cancelled.
        """
        self.init_task.cancel()
        self.init_task.add_done_callback(
            lambda _: self._session_scheduler.cancel_all())
Exemple #9
0
from async_rediscache import RedisCache

# This dictionary maps a help channel to the time it was claimed
# RedisCache[discord.TextChannel.id, UtcPosixTimestamp]
claim_times = RedisCache(namespace="HelpChannels.claim_times")

# This cache tracks which channels are claimed by which members.
# RedisCache[discord.TextChannel.id, t.Union[discord.User.id, discord.Member.id]]
claimants = RedisCache(namespace="HelpChannels.help_channel_claimants")

# This cache maps a help channel to original question message in same channel.
# RedisCache[discord.TextChannel.id, discord.Message.id]
question_messages = RedisCache(namespace="HelpChannels.question_messages")

# This cache maps a help channel to whether it has had any
# activity other than the original claimant. True being no other
# activity and False being other activity.
# RedisCache[discord.TextChannel.id, bool]
unanswered = RedisCache(namespace="HelpChannels.unanswered")
Exemple #10
0
class Branding(commands.Cog):
    """
    Guild branding management.

    Extension responsible for automatic synchronisation of the guild's branding with the branding repository.
    Event definitions and assets are automatically discovered and applied as appropriate.

    All state is stored in Redis. The cog should therefore seamlessly transition across restarts and maintain
    a consistent icon rotation schedule for events with multiple icon assets.

    By caching hashes of banner & icon assets, we discover changes in currently applied assets and always keep
    the latest version applied.

    The command interface allows moderators+ to control the daemon or request asset synchronisation, while
    regular users can see information about the current event and the overall event schedule.
    """

    # RedisCache[
    #     "daemon_active": bool            | If True, daemon starts on start-up. Controlled via commands.
    #     "event_path": str                | Current event's path in the branding repo.
    #     "event_description": str         | Current event's Markdown description.
    #     "event_duration": str            | Current event's human-readable date range.
    #     "banner_hash": str               | SHA of the currently applied banner.
    #     "icons_hash": str                | Compound SHA of all icons in current rotation.
    #     "last_rotation_timestamp": float | POSIX UTC timestamp.
    # ]
    cache_information = RedisCache()

    # Icons in current rotation. Keys (str) are download URLs, values (int) track the amount of times each
    # icon has been used in the current rotation.
    cache_icons = RedisCache()

    # All available event names & durations. Cached by the daemon nightly; read by the calendar command.
    cache_events = RedisCache()

    def __init__(self, bot: Bot) -> None:
        """Instantiate repository abstraction & allow daemon to start."""
        self.bot = bot
        self.repository = BrandingRepository(bot)

        self.bot.loop.create_task(
            self.maybe_start_daemon())  # Start depending on cache.

    # region: Internal logic & state management

    @mock_in_debug(return_value=True
                   )  # Mocked in development environment to prevent API spam.
    async def apply_asset(self, asset_type: AssetType,
                          download_url: str) -> bool:
        """
        Download asset from `download_url` and apply it to PyDis as `asset_type`.

        Return a boolean indicating whether the application was successful.
        """
        log.info(f"Applying '{asset_type.value}' asset to the guild.")

        try:
            file = await self.repository.fetch_file(download_url)
        except Exception:
            log.exception(f"Failed to fetch '{asset_type.value}' asset.")
            return False

        await self.bot.wait_until_guild_available()
        pydis: discord.Guild = self.bot.get_guild(Guild.id)

        timeout = 10  # Seconds.
        try:
            with async_timeout.timeout(
                    timeout):  # Raise after `timeout` seconds.
                await pydis.edit(**{asset_type.value: file})
        except discord.HTTPException:
            log.exception("Asset upload to Discord failed.")
            return False
        except asyncio.TimeoutError:
            log.error(
                f"Asset upload to Discord timed out after {timeout} seconds.")
            return False
        else:
            log.trace("Asset uploaded successfully.")
            return True

    async def apply_banner(self, banner: RemoteObject) -> bool:
        """
        Apply `banner` to the guild and cache its hash if successful.

        Banners should always be applied via this method to ensure that the last hash is cached.

        Return a boolean indicating whether the application was successful.
        """
        success = await self.apply_asset(AssetType.BANNER, banner.download_url)

        if success:
            await self.cache_information.set("banner_hash", banner.sha)

        return success

    async def rotate_icons(self) -> bool:
        """
        Choose and apply the next-up icon in rotation.

        We keep track of the amount of times each icon has been used. The values in `cache_icons` can be understood
        to be iteration IDs. When an icon is chosen & applied, we bump its count, pushing it into the next iteration.

        Once the current iteration (lowest count in the cache) depletes, we move onto the next iteration.

        In the case that there is only 1 icon in the rotation and has already been applied, do nothing.

        Return a boolean indicating whether a new icon was applied successfully.
        """
        log.debug("Rotating icons.")

        state = await self.cache_icons.to_dict()
        log.trace(f"Total icons in rotation: {len(state)}.")

        if not state:  # This would only happen if rotation not initiated, but we can handle gracefully.
            log.warning(
                "Attempted icon rotation with an empty icon cache. This indicates wrong logic."
            )
            return False

        if len(state) == 1 and 1 in state.values():
            log.debug(
                "Aborting icon rotation: only 1 icon is available and has already been applied."
            )
            return False

        current_iteration = min(
            state.values())  # Choose iteration to draw from.
        options = [
            download_url for download_url, times_used in state.items()
            if times_used == current_iteration
        ]

        log.trace(
            f"Choosing from {len(options)} icons in iteration {current_iteration}."
        )
        next_icon = random.choice(options)

        success = await self.apply_asset(AssetType.ICON, next_icon)

        if success:
            await self.cache_icons.increment(
                next_icon)  # Push the icon into the next iteration.

            timestamp = datetime.utcnow().timestamp()
            await self.cache_information.set("last_rotation_timestamp",
                                             timestamp)

        return success

    async def maybe_rotate_icons(self) -> None:
        """
        Call `rotate_icons` if the configured amount of time has passed since last rotation.

        We offset the calculated time difference into the future to avoid off-by-a-little-bit errors. Because there
        is work to be done before the timestamp is read and written, the next read will likely commence slightly
        under 24 hours after the last write.
        """
        log.debug("Checking whether it's time for icons to rotate.")

        last_rotation_timestamp = await self.cache_information.get(
            "last_rotation_timestamp")

        if last_rotation_timestamp is None:  # Maiden case ~ never rotated.
            await self.rotate_icons()
            return

        last_rotation = datetime.fromtimestamp(last_rotation_timestamp)
        difference = (datetime.utcnow() - last_rotation) + timedelta(minutes=5)

        log.trace(
            f"Icons last rotated at {last_rotation} (difference: {difference})."
        )

        if difference.days >= BrandingConfig.cycle_frequency:
            await self.rotate_icons()

    async def initiate_icon_rotation(
            self, available_icons: t.List[RemoteObject]) -> None:
        """
        Set up a new icon rotation.

        This function should be called whenever available icons change. This is generally the case when we enter
        a new event, but potentially also when the assets of an on-going event change. In such cases, a reset
        of `cache_icons` is necessary, because it contains download URLs which may have gotten stale.

        This function does not upload a new icon!
        """
        log.debug("Initiating new icon rotation.")

        await self.cache_icons.clear()

        new_state = {icon.download_url: 0 for icon in available_icons}
        await self.cache_icons.update(new_state)

        log.trace(f"Icon rotation initiated for {len(new_state)} icons.")

        await self.cache_information.set("icons_hash",
                                         compound_hash(available_icons))

    async def send_info_embed(self, channel_id: int, *,
                              is_notification: bool) -> None:
        """
        Send the currently cached event description to `channel_id`.

        When `is_notification` holds, a short contextual message for the #changelog channel is added.

        We read event information from `cache_information`. The caller is therefore responsible for making
        sure that the cache is up-to-date before calling this function.
        """
        log.debug(
            f"Sending event information event to channel: {channel_id} ({is_notification=})."
        )

        await self.bot.wait_until_guild_available()
        channel: t.Optional[discord.TextChannel] = self.bot.get_channel(
            channel_id)

        if channel is None:
            log.warning(
                f"Cannot send event information: channel {channel_id} not found!"
            )
            return

        log.trace(f"Destination channel: #{channel.name}.")

        description = await self.cache_information.get("event_description")
        duration = await self.cache_information.get("event_duration")

        if None in (description, duration):
            content = None
            embed = make_embed("No event in cache",
                               "Is the daemon enabled?",
                               success=False)

        else:
            content = "Python Discord is entering a new event!" if is_notification else None
            embed = discord.Embed(description=description[:2048],
                                  colour=discord.Colour.blurple())
            embed.set_footer(text=duration[:2048])

        await channel.send(content=content, embed=embed)

    async def enter_event(self, event: Event) -> t.Tuple[bool, bool]:
        """
        Apply `event` assets and update information cache.

        We cache `event` information to ensure that we:
        * Remember which event we're currently in across restarts
        * Provide an on-demand informational embed without re-querying the branding repository

        An event change should always be handled via this function, as it ensures that the cache is populated.

        The #changelog notification is omitted when `event` is fallback, or already applied.

        Return a 2-tuple indicating whether the banner, and the icon, were applied successfully.
        """
        log.info(f"Entering event: '{event.path}'.")

        banner_success = await self.apply_banner(
            event.banner)  # Only one asset ~ apply directly.

        await self.initiate_icon_rotation(event.icons
                                          )  # Prepare a new rotation.
        icon_success = await self.rotate_icons(
        )  # Apply an icon from the new rotation.

        # This will only be False in the case of a manual same-event re-synchronisation.
        event_changed = event.path != await self.cache_information.get(
            "event_path")

        # Cache event identity to avoid re-entry in case of restart.
        await self.cache_information.set("event_path", event.path)

        # Cache information shown in the 'about' embed.
        await self.populate_cache_event_description(event)

        # Notify guild of new event ~ this reads the information that we cached above.
        if event_changed and not event.meta.is_fallback:
            await self.send_info_embed(Channels.change_log,
                                       is_notification=True)
        else:
            log.trace(
                "Omitting #changelog notification. Event has not changed, or new event is fallback."
            )

        return banner_success, icon_success

    async def synchronise(self) -> t.Tuple[bool, bool]:
        """
        Fetch the current event and delegate to `enter_event`.

        This is a convenience function to force synchronisation via a command. It should generally only be used
        in a recovery scenario. In the usual case, the daemon already has an `Event` instance and can pass it
        to `enter_event` directly.

        Return a 2-tuple indicating whether the banner, and the icon, were applied successfully.
        """
        log.debug("Synchronise: fetching current event.")

        current_event, available_events = await self.repository.get_current_event(
        )

        await self.populate_cache_events(available_events)

        if current_event is None:
            log.error("Failed to fetch event. Cannot synchronise!")
            return False, False

        return await self.enter_event(current_event)

    async def populate_cache_events(self, events: t.List[Event]) -> None:
        """
        Clear `cache_events` and re-populate with names and durations of `events`.

        For each event, we store its name and duration string. This is the information presented to users in the
        calendar command. If a format change is needed, it has to be done here.

        The cache does not store the fallback event, as it is not shown in the calendar.
        """
        log.debug("Populating events cache.")

        await self.cache_events.clear()

        no_fallback = [event for event in events if not event.meta.is_fallback]
        chronological_events = sorted(no_fallback,
                                      key=attrgetter("meta.start_date"))

        log.trace(
            f"Writing {len(chronological_events)} events (fallback omitted).")

        with contextlib.suppress(
                ValueError):  # Cache raises when updated with an empty dict.
            await self.cache_events.update({
                extract_event_name(event): extract_event_duration(event)
                for event in chronological_events
            })

    async def populate_cache_event_description(self, event: Event) -> None:
        """
        Cache `event` description & duration.

        This should be called when entering a new event, and can be called periodically to ensure that the cache
        holds fresh information in the case that the event remains the same, but its description changes.

        The duration is stored formatted for the frontend. It is not intended to be used programmatically.
        """
        log.debug("Caching event description & duration.")

        await self.cache_information.set("event_description",
                                         event.meta.description)
        await self.cache_information.set("event_duration",
                                         extract_event_duration(event))

    # endregion
    # region: Daemon

    async def maybe_start_daemon(self) -> None:
        """
        Start the daemon depending on cache state.

        The daemon will only start if it has been explicitly enabled via a command.
        """
        log.debug("Checking whether daemon should start.")

        should_begin: t.Optional[bool] = await self.cache_information.get(
            "daemon_active")  # None if never set!

        if should_begin:
            self.daemon_loop.start()

    def cog_unload(self) -> None:
        """
        Cancel the daemon in case of cog unload.

        This is **not** done automatically! The daemon otherwise remains active in the background.
        """
        log.debug("Cog unload: cancelling daemon.")

        self.daemon_loop.cancel()

    async def daemon_main(self) -> None:
        """
        Synchronise guild & caches with branding repository.

        Pull the currently active event from the branding repository and check whether it matches the currently
        active event in the cache. If not, apply the new event.

        However, it is also possible that an event's assets change as it's active. To account for such cases,
        we check the banner & icons hashes against the currently cached values. If there is a mismatch, each
        specific asset is re-applied.
        """
        log.info("Daemon main: checking current event.")

        new_event, available_events = await self.repository.get_current_event()

        await self.populate_cache_events(available_events)

        if new_event is None:
            log.warning(
                "Daemon main: failed to get current event from branding repository, will do nothing."
            )
            return

        if new_event.path != await self.cache_information.get("event_path"):
            log.debug("Daemon main: new event detected!")
            await self.enter_event(new_event)
            return

        await self.populate_cache_event_description(
            new_event)  # Cache fresh frontend info in case of change.

        log.trace(
            "Daemon main: event has not changed, checking for change in assets."
        )

        if new_event.banner.sha != await self.cache_information.get(
                "banner_hash"):
            log.debug("Daemon main: detected banner change.")
            await self.apply_banner(new_event.banner)

        if compound_hash(new_event.icons) != await self.cache_information.get(
                "icons_hash"):
            log.debug("Daemon main: detected icon change.")
            await self.initiate_icon_rotation(new_event.icons)
            await self.rotate_icons()
        else:
            await self.maybe_rotate_icons()

    @tasks.loop(hours=24)
    async def daemon_loop(self) -> None:
        """
        Call `daemon_main` every 24 hours.

        The scheduler maintains an exact 24-hour frequency even if this coroutine takes time to complete. If the
        coroutine is started at 00:01 and completes at 00:05, it will still be started at 00:01 the next day.
        """
        log.trace("Daemon loop: calling daemon main.")

        try:
            await self.daemon_main()
        except Exception:
            log.exception("Daemon loop: failed with an unhandled exception!")

    @daemon_loop.before_loop
    async def daemon_before(self) -> None:
        """
        Call `daemon_loop` immediately, then block the loop until the next-up UTC midnight.

        The first iteration is invoked directly such that synchronisation happens immediately after daemon start.
        We then calculate the time until the next-up midnight and sleep before letting `daemon_loop` begin.
        """
        log.trace("Daemon before: performing start-up iteration.")

        await self.daemon_loop()

        log.trace(
            "Daemon before: calculating time to sleep before loop begins.")
        now = datetime.utcnow()

        # The actual midnight moment is offset into the future to prevent issues with imprecise sleep.
        tomorrow = now + timedelta(days=1)
        midnight = datetime.combine(tomorrow, time(minute=1))

        sleep_secs = (midnight - now).total_seconds()
        log.trace(
            f"Daemon before: sleeping {sleep_secs} seconds before next-up midnight: {midnight}."
        )

        await asyncio.sleep(sleep_secs)

    # endregion
    # region: Command interface (branding)

    @commands.group(name="branding")
    async def branding_group(self, ctx: commands.Context) -> None:
        """Control the branding cog."""
        if not ctx.invoked_subcommand:
            await ctx.send_help(ctx.command)

    @branding_group.command(name="about", aliases=("current", "event"))
    async def branding_about_cmd(self, ctx: commands.Context) -> None:
        """Show the current event's description and duration."""
        await self.send_info_embed(ctx.channel.id, is_notification=False)

    @commands.has_any_role(*MODERATION_ROLES)
    @branding_group.command(name="sync")
    async def branding_sync_cmd(self, ctx: commands.Context) -> None:
        """
        Force branding synchronisation.

        Show which assets have failed to synchronise, if any.
        """
        async with ctx.typing():
            banner_success, icon_success = await self.synchronise()

        failed_assets = ", ".join(
            name for name, status in [("banner",
                                       banner_success), ("icon", icon_success)]
            if status is False)

        if failed_assets:
            resp = make_embed("Synchronisation unsuccessful",
                              f"Failed to apply: {failed_assets}.",
                              success=False)
            resp.set_footer(text="Check log for details.")
        else:
            resp = make_embed("Synchronisation successful",
                              "Assets have been applied.",
                              success=True)

        await ctx.send(embed=resp)

    # endregion
    # region: Command interface (branding calendar)

    @branding_group.group(name="calendar", aliases=("schedule", "events"))
    async def branding_calendar_group(self, ctx: commands.Context) -> None:
        """
        Show the current event calendar.

        We draw event information from `cache_events` and use each key-value pair to create a field in the response
        embed. As such, we do not need to query the API to get event information. The cache is automatically
        re-populated by the daemon whenever it makes a request. A moderator+ can also explicitly request a cache
        refresh using the 'refresh' subcommand.

        Due to Discord limitations, we only show up to 25 events. This is entirely sufficient at the time of writing.
        In the case that we find ourselves with more than 25 events, a warning log will alert core devs.

        In the future, we may be interested in a field-paginating solution.
        """
        if ctx.invoked_subcommand:
            # If you're wondering why this works: when the 'refresh' subcommand eventually re-invokes
            # this group, the attribute will be automatically set to None by the framework.
            return

        available_events = await self.cache_events.to_dict()
        log.trace(
            f"Found {len(available_events)} cached events available for calendar view."
        )

        if not available_events:
            resp = make_embed(
                "No events found!",
                "Cache may be empty, try `branding calendar refresh`.",
                success=False)
            await ctx.send(embed=resp)
            return

        embed = discord.Embed(title="Current event calendar",
                              colour=discord.Colour.blurple())

        # Because Discord embeds can only contain up to 25 fields, we only show the first 25.
        first_25 = list(available_events.items())[:25]

        if len(first_25) != len(
                available_events
        ):  # Alert core devs that a paginating solution is now necessary.
            log.warning(
                f"There are {len(available_events)} events, but the calendar view can only display 25."
            )

        for name, duration in first_25:
            embed.add_field(name=name[:256], value=duration[:1024])

        embed.set_footer(text="Otherwise, the fallback season is used.")

        await ctx.send(embed=embed)

    @commands.has_any_role(*MODERATION_ROLES)
    @branding_calendar_group.command(name="refresh")
    async def branding_calendar_refresh_cmd(self,
                                            ctx: commands.Context) -> None:
        """
        Refresh event cache and show current event calendar.

        Supplementary subcommand allowing force-refreshing the event cache. Implemented as a subcommand because
        unlike the supergroup, it requires moderator privileges.
        """
        log.info("Performing command-requested event cache refresh.")

        async with ctx.typing():
            available_events = await self.repository.get_events()
            await self.populate_cache_events(available_events)

        await ctx.invoke(self.branding_calendar_group)

    # endregion
    # region: Command interface (branding daemon)

    @commands.has_any_role(*MODERATION_ROLES)
    @branding_group.group(name="daemon", aliases=("d", ))
    async def branding_daemon_group(self, ctx: commands.Context) -> None:
        """Control the branding cog's daemon."""
        if not ctx.invoked_subcommand:
            await ctx.send_help(ctx.command)

    @branding_daemon_group.command(name="enable", aliases=("start", "on"))
    async def branding_daemon_enable_cmd(self, ctx: commands.Context) -> None:
        """Enable the branding daemon."""
        await self.cache_information.set("daemon_active", True)

        if self.daemon_loop.is_running():
            resp = make_embed("Daemon is already enabled!", "", success=False)
        else:
            self.daemon_loop.start()
            resp = make_embed("Daemon enabled!",
                              "It will now automatically awaken on start-up.",
                              success=True)

        await ctx.send(embed=resp)

    @branding_daemon_group.command(name="disable", aliases=("stop", "off"))
    async def branding_daemon_disable_cmd(self, ctx: commands.Context) -> None:
        """Disable the branding daemon."""
        await self.cache_information.set("daemon_active", False)

        if self.daemon_loop.is_running():
            self.daemon_loop.cancel()
            resp = make_embed("Daemon disabled!",
                              "It will not awaken on start-up.",
                              success=True)
        else:
            resp = make_embed("Daemon is already disabled!", "", success=False)

        await ctx.send(embed=resp)

    @branding_daemon_group.command(name="status")
    async def branding_daemon_status_cmd(self, ctx: commands.Context) -> None:
        """Check whether the daemon is currently enabled."""
        if self.daemon_loop.is_running():
            resp = make_embed("Daemon is enabled",
                              "Use `branding daemon disable` to stop.",
                              success=True)
        else:
            resp = make_embed("Daemon is disabled",
                              "Use `branding daemon enable` to start.",
                              success=False)

        await ctx.send(embed=resp)
Exemple #11
0
class Silence(commands.Cog):
    """Commands for stopping channel messages for `verified` role in a channel."""

    # Maps muted channel IDs to their previous overwrites for send_message and add_reactions.
    # Overwrites are stored as JSON.
    previous_overwrites = RedisCache()

    # Maps muted channel IDs to POSIX timestamps of when they'll be unsilenced.
    # A timestamp equal to -1 means it's indefinite.
    unsilence_timestamps = RedisCache()

    def __init__(self, bot: Bot):
        self.bot = bot
        self.scheduler = Scheduler(self.__class__.__name__)

        self._init_task = self.bot.loop.create_task(self._async_init())

    async def _async_init(self) -> None:
        """Set instance attributes once the guild is available and reschedule unsilences."""
        await self.bot.wait_until_guild_available()

        guild = self.bot.get_guild(Guild.id)
        self._verified_role = guild.get_role(Roles.verified)
        self._mod_alerts_channel = self.bot.get_channel(Channels.mod_alerts)
        self.notifier = SilenceNotifier(self.bot.get_channel(Channels.mod_log))
        await self._reschedule()

    @commands.command(aliases=("hush", ))
    @lock_arg(LOCK_NAMESPACE, "ctx", attrgetter("channel"), raise_error=True)
    async def silence(self,
                      ctx: Context,
                      duration: HushDurationConverter = 10) -> None:
        """
        Silence the current channel for `duration` minutes or `forever`.

        Duration is capped at 15 minutes, passing forever makes the silence indefinite.
        Indefinitely silenced channels get added to a notifier which posts notices every 15 minutes from the start.
        """
        await self._init_task

        channel_info = f"#{ctx.channel} ({ctx.channel.id})"
        log.debug(f"{ctx.author} is silencing channel {channel_info}.")

        if not await self._set_silence_overwrites(ctx.channel):
            log.info(
                f"Tried to silence channel {channel_info} but the channel was already silenced."
            )
            await ctx.send(MSG_SILENCE_FAIL)
            return

        await self._schedule_unsilence(ctx, duration)

        if duration is None:
            self.notifier.add_channel(ctx.channel)
            log.info(f"Silenced {channel_info} indefinitely.")
            await ctx.send(MSG_SILENCE_PERMANENT)
        else:
            log.info(f"Silenced {channel_info} for {duration} minute(s).")
            await ctx.send(MSG_SILENCE_SUCCESS.format(duration=duration))

    @commands.command(aliases=("unhush", ))
    async def unsilence(self, ctx: Context) -> None:
        """
        Unsilence the current channel.

        If the channel was silenced indefinitely, notifications for the channel will stop.
        """
        await self._init_task
        log.debug(
            f"Unsilencing channel #{ctx.channel} from {ctx.author}'s command.")
        await self._unsilence_wrapper(ctx.channel)

    @lock_arg(LOCK_NAMESPACE, "channel", raise_error=True)
    async def _unsilence_wrapper(self, channel: TextChannel) -> None:
        """Unsilence `channel` and send a success/failure message."""
        if not await self._unsilence(channel):
            overwrite = channel.overwrites_for(self._verified_role)
            if overwrite.send_messages is False or overwrite.add_reactions is False:
                await channel.send(MSG_UNSILENCE_MANUAL)
            else:
                await channel.send(MSG_UNSILENCE_FAIL)
        else:
            await channel.send(MSG_UNSILENCE_SUCCESS)

    async def _set_silence_overwrites(self, channel: TextChannel) -> bool:
        """Set silence permission overwrites for `channel` and return True if successful."""
        overwrite = channel.overwrites_for(self._verified_role)
        prev_overwrites = dict(send_messages=overwrite.send_messages,
                               add_reactions=overwrite.add_reactions)

        if channel.id in self.scheduler or all(
                val is False for val in prev_overwrites.values()):
            return False

        overwrite.update(send_messages=False, add_reactions=False)
        await channel.set_permissions(self._verified_role, overwrite=overwrite)
        await self.previous_overwrites.set(channel.id,
                                           json.dumps(prev_overwrites))

        return True

    async def _schedule_unsilence(self, ctx: Context,
                                  duration: Optional[int]) -> None:
        """Schedule `ctx.channel` to be unsilenced if `duration` is not None."""
        if duration is None:
            await self.unsilence_timestamps.set(ctx.channel.id, -1)
        else:
            self.scheduler.schedule_later(duration * 60, ctx.channel.id,
                                          ctx.invoke(self.unsilence))
            unsilence_time = datetime.now(tz=timezone.utc) + timedelta(
                minutes=duration)
            await self.unsilence_timestamps.set(ctx.channel.id,
                                                unsilence_time.timestamp())

    async def _unsilence(self, channel: TextChannel) -> bool:
        """
        Unsilence `channel`.

        If `channel` has a silence task scheduled or has its previous overwrites cached, unsilence
        it, cancel the task, and remove it from the notifier. Notify admins if it has a task but
        not cached overwrites.

        Return `True` if channel permissions were changed, `False` otherwise.
        """
        prev_overwrites = await self.previous_overwrites.get(channel.id)
        if channel.id not in self.scheduler and prev_overwrites is None:
            log.info(
                f"Tried to unsilence channel #{channel} ({channel.id}) but the channel was not silenced."
            )
            return False

        overwrite = channel.overwrites_for(self._verified_role)
        if prev_overwrites is None:
            log.info(
                f"Missing previous overwrites for #{channel} ({channel.id}); defaulting to None."
            )
            overwrite.update(send_messages=None, add_reactions=None)
        else:
            overwrite.update(**json.loads(prev_overwrites))

        await channel.set_permissions(self._verified_role, overwrite=overwrite)
        log.info(f"Unsilenced channel #{channel} ({channel.id}).")

        self.scheduler.cancel(channel.id)
        self.notifier.remove_channel(channel)
        await self.previous_overwrites.delete(channel.id)
        await self.unsilence_timestamps.delete(channel.id)

        if prev_overwrites is None:
            await self._mod_alerts_channel.send(
                f"<@&{Roles.admins}> Restored overwrites with default values after unsilencing "
                f"{channel.mention}. Please check that the `Send Messages` and `Add Reactions` "
                f"overwrites for {self._verified_role.mention} are at their desired values."
            )

        return True

    async def _reschedule(self) -> None:
        """Reschedule unsilencing of active silences and add permanent ones to the notifier."""
        for channel_id, timestamp in await self.unsilence_timestamps.items():
            channel = self.bot.get_channel(channel_id)
            if channel is None:
                log.info(
                    f"Can't reschedule silence for {channel_id}: channel not found."
                )
                continue

            if timestamp == -1:
                log.info(
                    f"Adding permanent silence for #{channel} ({channel.id}) to the notifier."
                )
                self.notifier.add_channel(channel)
                continue

            dt = datetime.fromtimestamp(timestamp, tz=timezone.utc)
            delta = (dt - datetime.now(tz=timezone.utc)).total_seconds()
            if delta <= 0:
                # Suppress the error since it's not being invoked by a user via the command.
                with suppress(LockedResourceError):
                    await self._unsilence_wrapper(channel)
            else:
                log.info(
                    f"Rescheduling silence for #{channel} ({channel.id}).")
                self.scheduler.schedule_later(delta, channel_id,
                                              self._unsilence_wrapper(channel))

    def cog_unload(self) -> None:
        """Cancel the init task and scheduled tasks."""
        # It's important to wait for _init_task (specifically for _reschedule) to be cancelled
        # before cancelling scheduled tasks. Otherwise, it's possible for _reschedule to schedule
        # more tasks after cancel_all has finished, despite _init_task.cancel being called first.
        # This is cause cancel() on its own doesn't block until the task is cancelled.
        self._init_task.cancel()
        self._init_task.add_done_callback(
            lambda _: self.scheduler.cancel_all())

    # This cannot be static (must have a __func__ attribute).
    async def cog_check(self, ctx: Context) -> bool:
        """Only allow moderators to invoke the commands in this cog."""
        return await commands.has_any_role(*MODERATION_ROLES).predicate(ctx)
Exemple #12
0
class TalentPool(Cog, name="Talentpool"):
    """Used to nominate potential helper candidates."""

    # RedisCache[str, bool]
    # Can contain a single key, "autoreview_enabled", with the value a bool indicating if autoreview is enabled.
    talentpool_settings = RedisCache()

    def __init__(self, bot: Bot) -> None:
        self.bot = bot
        self.reviewer = Reviewer(self.__class__.__name__, bot, self)
        self.cache: Optional[defaultdict[dict]] = None
        self.api_default_params = {
            'active': 'true',
            'ordering': '-inserted_at'
        }

        self.initial_refresh_task = scheduling.create_task(
            self.refresh_cache(), event_loop=self.bot.loop)
        scheduling.create_task(self.schedule_autoreviews(),
                               event_loop=self.bot.loop)

    async def schedule_autoreviews(self) -> None:
        """Reschedule reviews for active nominations if autoreview is enabled."""
        if await self.autoreview_enabled():
            # Wait for a populated cache first
            await self.initial_refresh_task
            await self.reviewer.reschedule_reviews()
        else:
            log.trace("Not scheduling reviews as autoreview is disabled.")

    async def autoreview_enabled(self) -> bool:
        """Return whether automatic posting of nomination reviews is enabled."""
        return await self.talentpool_settings.get(AUTOREVIEW_ENABLED_KEY, True)

    async def refresh_cache(self) -> bool:
        """Updates TalentPool users cache."""
        # Wait until logged in to ensure bot api client exists
        await self.bot.wait_until_guild_available()
        try:
            data = await self.bot.api_client.get(
                'bot/nominations', params=self.api_default_params)
        except ResponseCodeError as err:
            log.exception(
                "Failed to fetch the currently nominated users from the API",
                exc_info=err)
            return False

        self.cache = defaultdict(dict)

        for entry in data:
            user_id = entry.pop('user')
            self.cache[user_id] = entry

        return True

    @group(name='talentpool',
           aliases=('tp', 'talent', 'nomination', 'n'),
           invoke_without_command=True)
    @has_any_role(*MODERATION_ROLES)
    async def nomination_group(self, ctx: Context) -> None:
        """Highlights the activity of helper nominees by relaying their messages to the talent pool channel."""
        await ctx.send_help(ctx.command)

    @nomination_group.group(name="autoreview",
                            aliases=("ar", ),
                            invoke_without_command=True)
    @has_any_role(*MODERATION_ROLES)
    async def nomination_autoreview_group(self, ctx: Context) -> None:
        """Commands for enabling or disabling autoreview."""
        await ctx.send_help(ctx.command)

    @nomination_autoreview_group.command(name="enable", aliases=("on", ))
    @has_any_role(Roles.admins)
    async def autoreview_enable(self, ctx: Context) -> None:
        """
        Enable automatic posting of reviews.

        This will post reviews up to one day overdue. Older nominations can be
        manually reviewed with the `tp post_review <user_id>` command.
        """
        if await self.autoreview_enabled():
            await ctx.send(":x: Autoreview is already enabled")
            return

        await self.talentpool_settings.set(AUTOREVIEW_ENABLED_KEY, True)
        await self.reviewer.reschedule_reviews()
        await ctx.send(":white_check_mark: Autoreview enabled")

    @nomination_autoreview_group.command(name="disable", aliases=("off", ))
    @has_any_role(Roles.admins)
    async def autoreview_disable(self, ctx: Context) -> None:
        """Disable automatic posting of reviews."""
        if not await self.autoreview_enabled():
            await ctx.send(":x: Autoreview is already disabled")
            return

        await self.talentpool_settings.set(AUTOREVIEW_ENABLED_KEY, False)
        self.reviewer.cancel_all()
        await ctx.send(":white_check_mark: Autoreview disabled")

    @nomination_autoreview_group.command(name="status")
    @has_any_role(*MODERATION_ROLES)
    async def autoreview_status(self, ctx: Context) -> None:
        """Show whether automatic posting of reviews is enabled or disabled."""
        if await self.autoreview_enabled():
            await ctx.send("Autoreview is currently enabled")
        else:
            await ctx.send("Autoreview is currently disabled")

    @nomination_group.command(name="nominees",
                              aliases=("nominated", "all", "list", "watched"),
                              root_aliases=("nominees", ))
    @has_any_role(*MODERATION_ROLES)
    async def list_command(self,
                           ctx: Context,
                           oldest_first: bool = False,
                           update_cache: bool = True) -> None:
        """
        Shows the users that are currently in the talent pool.

        The optional kwarg `oldest_first` can be used to order the list by oldest nomination.

        The optional kwarg `update_cache` can be used to update the user
        cache using the API before listing the users.
        """
        await self.list_nominated_users(ctx,
                                        oldest_first=oldest_first,
                                        update_cache=update_cache)

    async def list_nominated_users(self,
                                   ctx: Context,
                                   oldest_first: bool = False,
                                   update_cache: bool = True) -> None:
        """
        Gives an overview of the nominated users list.

        It specifies the users' mention, name, how long ago they were nominated, and whether their
        review was scheduled or already posted.

        The optional kwarg `oldest_first` orders the list by oldest entry.

        The optional kwarg `update_cache` specifies whether the cache should
        be refreshed by polling the API.
        """
        successful_update = False
        if update_cache:
            if not (successful_update := await self.refresh_cache()):
                await ctx.send(
                    ":warning: Unable to update cache. Data may be inaccurate."
                )

        nominations = self.cache.items()
        if oldest_first:
            nominations = reversed(nominations)

        lines = []

        for user_id, user_data in nominations:
            member = ctx.guild.get_member(user_id)
            line = f"• `{user_id}`"
            if member:
                line += f" ({member.name}#{member.discriminator})"
            inserted_at = user_data['inserted_at']
            line += f", added {get_time_delta(inserted_at)}"
            if not member:  # Cross off users who left the server.
                line = f"~~{line}~~"
            if user_data['reviewed']:
                line += " *(reviewed)*"
            elif user_id in self.reviewer:
                line += " *(scheduled)*"
            lines.append(line)

        if not lines:
            lines = ("There's nothing here yet.", )

        embed = Embed(
            title=
            f"Talent Pool active nominations ({'updated' if update_cache and successful_update else 'cached'})",
            color=Color.blue())
        await LinePaginator.paginate(lines, ctx, embed, empty=False)
Exemple #13
0
class Stream(commands.Cog):
    """Grant and revoke streaming permissions from users."""

    # Stores tasks to remove streaming permission
    # User id : timestamp relation
    task_cache = RedisCache()

    def __init__(self, bot: Bot):
        self.bot = bot
        self.scheduler = Scheduler(self.__class__.__name__)
        self.reload_task = self.bot.loop.create_task(
            self._reload_tasks_from_redis())

    async def _remove_streaming_permission(
            self, schedule_user: discord.Member) -> None:
        """Remove streaming permission from Member."""
        await self._delete_from_redis(schedule_user.id)
        await schedule_user.remove_roles(discord.Object(Roles.video),
                                         reason="Streaming access revoked")

    async def _add_to_redis_cache(self, user_id: int,
                                  timestamp: float) -> None:
        """Adds 'task' to redis cache."""
        await self.task_cache.set(user_id, timestamp)

    async def _reload_tasks_from_redis(self) -> None:
        await self.bot.wait_until_guild_available()
        items = await self.task_cache.items()
        for key, value in items:
            member = await self.bot.get_guild(Guild.id).fetch_member(key)
            self.scheduler.schedule_at(
                datetime.datetime.utcfromtimestamp(value), key,
                self._remove_streaming_permission(member))

    async def _delete_from_redis(self, key: str) -> None:
        await self.task_cache.delete(key)

    @commands.command(aliases=("streaming", ))
    @commands.has_any_role(*STAFF_ROLES)
    async def stream(self,
                     ctx: commands.Context,
                     user: discord.Member,
                     duration: Expiry = None,
                     *_) -> None:
        """
        Temporarily grant streaming permissions to a user for a given duration.

        A unit of time should be appended to the duration.
        Units (∗case-sensitive):
        \u2003`y` - years
        \u2003`m` - months∗
        \u2003`w` - weeks
        \u2003`d` - days
        \u2003`h` - hours
        \u2003`M` - minutes∗
        \u2003`s` - seconds

        Alternatively, an ISO 8601 timestamp can be provided for the duration.
        """
        # if duration is none then calculate default duration
        if duration is None:
            now = datetime.datetime.utcnow()
            duration = now + datetime.timedelta(
                minutes=VideoPermission.default_permission_duration)

        # Check if user already has streaming permission
        already_allowed = any(Roles.video == role.id for role in user.roles)
        if already_allowed:
            await ctx.send(f"{Emojis.cross_mark} This user can already stream."
                           )
            return

        # Schedule task to remove streaming permission from Member and add it to task cache
        self.scheduler.schedule_at(duration, user.id,
                                   self._remove_streaming_permission(user))
        await self._add_to_redis_cache(user.id, duration.timestamp())
        await user.add_roles(discord.Object(Roles.video),
                             reason="Temporary streaming access granted")
        duration = format_infraction_with_duration(str(duration))
        await ctx.send(
            f"{Emojis.check_mark} {user.mention} can now stream until {duration}."
        )

    @commands.command(aliases=("pstream", ))
    @commands.has_any_role(*STAFF_ROLES)
    async def permanentstream(self, ctx: commands.Context,
                              user: discord.Member, *_) -> None:
        """Permanently give user a streaming permission."""
        # Check if user already has streaming permission
        already_allowed = any(Roles.video == role.id for role in user.roles)
        if already_allowed:
            if user.id in self.scheduler:
                self.scheduler.cancel(user.id)
                await self._delete_from_redis(user.id)
                await ctx.send(
                    f"{Emojis.check_mark} Moved temporary permission to permanent"
                )
                return
            await ctx.send(f"{Emojis.cross_mark} This user can already stream."
                           )
            return

        await user.add_roles(discord.Object(Roles.video),
                             reason="Permanent streaming access granted")
        await ctx.send(
            f"{Emojis.check_mark} {user.mention} can now stream forever")

    @commands.command(aliases=("unstream", ))
    @commands.has_any_role(*STAFF_ROLES)
    async def revokestream(self, ctx: commands.Context,
                           user: discord.Member) -> None:
        """Take away streaming permission from a user."""
        # Check if user has the streaming permission to begin with
        allowed = any(Roles.video == role.id for role in user.roles)
        if allowed:
            # Cancel scheduled task to take away streaming permission to avoid errors
            if user.id in self.scheduler:
                self.scheduler.cancel(user.id)
            await self._remove_streaming_permission(user)
            await ctx.send(
                f"{Emojis.check_mark} Streaming permission taken from {user.display_name}."
            )
        else:
            await ctx.send(
                f"{Emojis.cross_mark} This user already can't stream.")

    def cog_unload(self) -> None:
        """Cancel all scheduled tasks."""
        self.reload_task.cancel()
        self.reload_task.add_done_callback(
            lambda _: self.scheduler.cancel_all())
Exemple #14
0
class BrandingManager(commands.Cog):
    """
    Manages the guild's branding.

    The purpose of this cog is to help automate the synchronization of the branding
    repository with the guild. It is capable of discovering assets in the repository
    via GitHub's API, resolving download urls for them, and delegating
    to the `bot` instance to upload them to the guild.

    BrandingManager is designed to be entirely autonomous. Its `daemon` background task awakens
    once a day (see `time_until_midnight`) to detect new seasons, or to cycle icons within a single
    season. The daemon can be turned on and off via the `daemon` cmd group. The value set via
    its `start` and `stop` commands is persisted across sessions. If turned on, the daemon will
    automatically start on the next bot start-up. Otherwise, it will wait to be started manually.

    All supported operations, e.g. setting seasons, applying the branding, or cycling icons, can
    also be invoked manually, via the following API:

        branding list
            - Show all available seasons

        branding set <season_name>
            - Set the cog's internal state to represent `season_name`, if it exists
            - If no `season_name` is given, set chronologically current season
            - This will not automatically apply the season's branding to the guild,
              the cog's state can be detached from the guild
            - Seasons can therefore be 'previewed' using this command

        branding info
            - View detailed information about resolved assets for current season

        branding refresh
            - Refresh internal state, i.e. synchronize with branding repository

        branding apply
            - Apply the current internal state to the guild, i.e. upload the assets

        branding cycle
            - If there are multiple available icons for current season, randomly pick
              and apply the next one

    The daemon calls these methods autonomously as appropriate. The use of this cog
    is locked to moderation roles. As it performs media asset uploads, it is prone to
    rate-limits - the `apply` command should be used with caution. The `set` command can,
    however, be used freely to 'preview' seasonal branding and check whether paths have been
    resolved as appropriate.

    While the bot is in debug mode, it will 'mock' asset uploads by logging the passed
    download urls and pretending that the upload was successful. Make use of this
    to test this cog's behaviour.
    """

    current_season: t.Type[_seasons.SeasonBase]

    banner: t.Optional[GitHubFile]

    available_icons: t.List[GitHubFile]
    remaining_icons: t.List[GitHubFile]

    days_since_cycle: t.Iterator

    daemon: t.Optional[asyncio.Task]

    # Branding configuration
    branding_configuration = RedisCache()

    def __init__(self, bot: Bot) -> None:
        """
        Assign safe default values on init.

        At this point, we don't have information about currently available branding.
        Most of these attributes will be overwritten once the daemon connects, or once
        the `refresh` command is used.
        """
        self.bot = bot
        self.current_season = _seasons.get_current_season()

        self.banner = None

        self.available_icons = []
        self.remaining_icons = []

        self.days_since_cycle = itertools.cycle([None])

        self.daemon = None
        self._startup_task = self.bot.loop.create_task(
            self._initial_start_daemon())

    async def _initial_start_daemon(self) -> None:
        """Checks is daemon active and when is, start it at cog load."""
        if await self.branding_configuration.get("daemon_active"):
            self.daemon = self.bot.loop.create_task(self._daemon_func())

    @property
    def _daemon_running(self) -> bool:
        """True if the daemon is currently active, False otherwise."""
        return self.daemon is not None and not self.daemon.done()

    async def _daemon_func(self) -> None:
        """
        Manage all automated behaviour of the BrandingManager cog.

        Once a day, the daemon will perform the following tasks:
            - Update `current_season`
            - Poll GitHub API to see if the available branding for `current_season` has changed
            - Update assets if changes are detected (banner, guild icon, bot avatar, bot nickname)
            - Check whether it's time to cycle guild icons

        The internal loop runs once when activated, then periodically at the time
        given by `time_until_midnight`.

        All method calls in the internal loop are considered safe, i.e. no errors propagate
        to the daemon's loop. The daemon itself does not perform any error handling on its own.
        """
        await self.bot.wait_until_guild_available()

        while True:
            self.current_season = _seasons.get_current_season()
            branding_changed = await self.refresh()

            if branding_changed:
                await self.apply()

            elif next(self.days_since_cycle) == Branding.cycle_frequency:
                await self.cycle()

            until_midnight = time_until_midnight()
            await asyncio.sleep(until_midnight.total_seconds())

    async def _info_embed(self) -> discord.Embed:
        """Make an informative embed representing current season."""
        info_embed = discord.Embed(description=self.current_season.description,
                                   colour=self.current_season.colour)

        # If we're in a non-evergreen season, also show active months
        if self.current_season is not _seasons.SeasonBase:
            title = f"{self.current_season.season_name} ({', '.join(str(m) for m in self.current_season.months)})"
        else:
            title = self.current_season.season_name

        # Use the author field to show the season's name and avatar if available
        info_embed.set_author(name=title)

        banner = self.banner.path if self.banner is not None else "Unavailable"
        info_embed.add_field(name="Banner", value=banner, inline=False)

        icons = pretty_files(self.available_icons) or "Unavailable"
        info_embed.add_field(name="Available icons", value=icons, inline=False)

        # Only display cycle frequency if we're actually cycling
        if len(self.available_icons) > 1 and Branding.cycle_frequency:
            info_embed.set_footer(
                text=f"Icon cycle frequency: {Branding.cycle_frequency}")

        return info_embed

    async def _reset_remaining_icons(self) -> None:
        """Set `remaining_icons` to a shuffled copy of `available_icons`."""
        self.remaining_icons = random.sample(self.available_icons,
                                             k=len(self.available_icons))

    async def _reset_days_since_cycle(self) -> None:
        """
        Reset the `days_since_cycle` iterator based on configured frequency.

        If the current season only has 1 icon, or if `Branding.cycle_frequency` is falsey,
        the iterator will always yield None. This signals that the icon shouldn't be cycled.

        Otherwise, it will yield ints in range [1, `Branding.cycle_frequency`] indefinitely.
        When the iterator yields a value equal to `Branding.cycle_frequency`, it is time to cycle.
        """
        if len(self.available_icons) > 1 and Branding.cycle_frequency:
            sequence = range(1, Branding.cycle_frequency + 1)
        else:
            sequence = [None]

        self.days_since_cycle = itertools.cycle(sequence)

    async def _get_files(
            self,
            path: str,
            include_dirs: bool = False) -> t.Dict[str, GitHubFile]:
        """
        Get files at `path` in the branding repository.

        If `include_dirs` is False (default), only returns files at `path`.
        Otherwise, will return both files and directories. Never returns symlinks.

        Return dict mapping from filename to corresponding `GitHubFile` instance.
        This may return an empty dict if the response status is non-200,
        or if the target directory is empty.
        """
        url = f"{_constants.BRANDING_URL}/{path}"
        async with self.bot.http_session.get(url,
                                             headers=_constants.HEADERS,
                                             params=_constants.PARAMS) as resp:
            # Short-circuit if we get non-200 response
            if resp.status != _constants.STATUS_OK:
                log.error(f"GitHub API returned non-200 response: {resp}")
                return {}
            directory = await resp.json()  # Directory at `path`

        allowed_types = {"file", "dir"} if include_dirs else {"file"}
        return {
            file["name"]: GitHubFile(file["download_url"], file["path"],
                                     file["sha"])
            for file in directory if file["type"] in allowed_types
        }

    async def refresh(self) -> bool:
        """
        Synchronize available assets with branding repository.

        If the current season is not the evergreen, and lacks at least one asset,
        we use the evergreen seasonal dir as fallback for missing assets.

        Finally, if neither the seasonal nor fallback branding directories contain
        an asset, it will simply be ignored.

        Return True if the branding has changed. This will be the case when we enter
        a new season, or when something changes in the current seasons's directory
        in the branding repository.
        """
        old_branding = (self.banner, self.available_icons)
        seasonal_dir = await self._get_files(self.current_season.branding_path,
                                             include_dirs=True)

        # Only make a call to the fallback directory if there is something to be gained
        branding_incomplete = any(asset not in seasonal_dir
                                  for asset in (_constants.FILE_BANNER,
                                                _constants.FILE_AVATAR,
                                                _constants.SERVER_ICONS))
        if branding_incomplete and self.current_season is not _seasons.SeasonBase:
            fallback_dir = await self._get_files(
                _seasons.SeasonBase.branding_path, include_dirs=True)
        else:
            fallback_dir = {}

        # Resolve assets in this directory, None is a safe value
        self.banner = (seasonal_dir.get(_constants.FILE_BANNER)
                       or fallback_dir.get(_constants.FILE_BANNER))

        # Now resolve server icons by making a call to the proper sub-directory
        if _constants.SERVER_ICONS in seasonal_dir:
            icons_dir = await self._get_files(
                f"{self.current_season.branding_path}/{_constants.SERVER_ICONS}"
            )
            self.available_icons = list(icons_dir.values())

        elif _constants.SERVER_ICONS in fallback_dir:
            icons_dir = await self._get_files(
                f"{_seasons.SeasonBase.branding_path}/{_constants.SERVER_ICONS}"
            )
            self.available_icons = list(icons_dir.values())

        else:
            self.available_icons = [
            ]  # This should never be the case, but an empty list is a safe value

        # GitHubFile instances carry a `sha` attr so this will pick up if a file changes
        branding_changed = old_branding != (self.banner, self.available_icons)

        if branding_changed:
            log.info(
                f"New branding detected (season: {self.current_season.season_name})"
            )
            await self._reset_remaining_icons()
            await self._reset_days_since_cycle()

        return branding_changed

    async def cycle(self) -> bool:
        """
        Apply the next-up server icon.

        Returns True if an icon is available and successfully gets applied, False otherwise.
        """
        if not self.available_icons:
            log.info("Cannot cycle: no icons for this season")
            return False

        if not self.remaining_icons:
            log.info("Reset & shuffle remaining icons")
            await self._reset_remaining_icons()

        next_up = self.remaining_icons.pop(0)
        success = await self.set_icon(next_up.download_url)

        return success

    async def apply(self) -> t.List[str]:
        """
        Apply current branding to the guild and bot.

        This delegates to the bot instance to do all the work. We only provide download urls
        for available assets. Assets unavailable in the branding repo will be ignored.

        Returns a list of names of all failed assets. An asset is considered failed
        if it isn't found in the branding repo, or if something goes wrong while the
        bot is trying to apply it.

        An empty list denotes that all assets have been applied successfully.
        """
        report = {asset: False for asset in ("banner", "icon")}

        if self.banner is not None:
            report["banner"] = await self.set_banner(self.banner.download_url)

        report["icon"] = await self.cycle()

        failed_assets = [
            asset for asset, succeeded in report.items() if not succeeded
        ]
        return failed_assets

    @commands.has_any_role(*MODERATION_ROLES)
    @commands.group(name="branding")
    async def branding_cmds(self, ctx: commands.Context) -> None:
        """Manual branding control."""
        if not ctx.invoked_subcommand:
            await ctx.send_help(ctx.command)

    @branding_cmds.command(name="list", aliases=["ls"])
    async def branding_list(self, ctx: commands.Context) -> None:
        """List all available seasons and branding sources."""
        embed = discord.Embed(title="Available seasons",
                              colour=Colours.soft_green)

        for season in _seasons.get_all_seasons():
            if season is _seasons.SeasonBase:
                active_when = "always"
            else:
                active_when = f"in {', '.join(str(m) for m in season.months)}"

            description = (f"Active {active_when}\n"
                           f"Branding: {season.branding_path}")
            embed.add_field(name=season.season_name,
                            value=description,
                            inline=False)

        await ctx.send(embed=embed)

    @branding_cmds.command(name="set")
    async def branding_set(self,
                           ctx: commands.Context,
                           *,
                           season_name: t.Optional[str] = None) -> None:
        """
        Manually set season, or reset to current if none given.

        Season search is a case-less comparison against both seasonal class name,
        and its `season_name` attr.

        This only pre-loads the cog's internal state to the chosen season, but does not
        automatically apply the branding. As that is an expensive operation, the `apply`
        command must be called explicitly after this command finishes.

        This means that this command can be used to 'preview' a season gathering info
        about its available assets, without applying them to the guild.

        If the daemon is running, it will automatically reset the season to current when
        it wakes up. The season set via this command can therefore remain 'detached' from
        what it should be - the daemon will make sure that it's set back properly.
        """
        if season_name is None:
            new_season = _seasons.get_current_season()
        else:
            new_season = _seasons.get_season(season_name)
            if new_season is None:
                raise _errors.BrandingError("No such season exists")

        if self.current_season is new_season:
            raise _errors.BrandingError(
                f"Season {self.current_season.season_name} already active")

        self.current_season = new_season
        await self.branding_refresh(ctx)

    @branding_cmds.command(name="info", aliases=["status"])
    async def branding_info(self, ctx: commands.Context) -> None:
        """
        Show available assets for current season.

        This can be used to confirm that assets have been resolved properly.
        When `apply` is used, it attempts to upload exactly the assets listed here.
        """
        await ctx.send(embed=await self._info_embed())

    @branding_cmds.command(name="refresh")
    async def branding_refresh(self, ctx: commands.Context) -> None:
        """Sync currently available assets with branding repository."""
        async with ctx.typing():
            await self.refresh()
            await self.branding_info(ctx)

    @branding_cmds.command(name="apply")
    async def branding_apply(self, ctx: commands.Context) -> None:
        """
        Apply current season's branding to the guild.

        Use `info` to check which assets will be applied. Shows which assets have
        failed to be applied, if any.
        """
        async with ctx.typing():
            failed_assets = await self.apply()
            if failed_assets:
                raise _errors.BrandingError(
                    f"Failed to apply following assets: {', '.join(failed_assets)}"
                )

            response = discord.Embed(
                description=f"All assets applied {Emojis.ok_hand}",
                colour=Colours.soft_green)
            await ctx.send(embed=response)

    @branding_cmds.command(name="cycle")
    async def branding_cycle(self, ctx: commands.Context) -> None:
        """
        Apply the next-up guild icon, if multiple are available.

        The order is random.
        """
        async with ctx.typing():
            success = await self.cycle()
            if not success:
                raise _errors.BrandingError("Failed to cycle icon")

            response = discord.Embed(description=f"Success {Emojis.ok_hand}",
                                     colour=Colours.soft_green)
            await ctx.send(embed=response)

    @branding_cmds.group(name="daemon", aliases=["d", "task"])
    async def daemon_group(self, ctx: commands.Context) -> None:
        """Control the background daemon."""
        if not ctx.invoked_subcommand:
            await ctx.send_help(ctx.command)

    @daemon_group.command(name="status")
    async def daemon_status(self, ctx: commands.Context) -> None:
        """Check whether daemon is currently active."""
        if self._daemon_running:
            remaining_time = (arrow.utcnow() +
                              time_until_midnight()).humanize()
            response = discord.Embed(
                description=f"Daemon running {Emojis.ok_hand}",
                colour=Colours.soft_green)
            response.set_footer(text=f"Next refresh {remaining_time}")
        else:
            response = discord.Embed(description="Daemon not running",
                                     colour=Colours.soft_red)

        await ctx.send(embed=response)

    @daemon_group.command(name="start")
    async def daemon_start(self, ctx: commands.Context) -> None:
        """If the daemon isn't running, start it."""
        if self._daemon_running:
            raise _errors.BrandingError("Daemon already running!")

        self.daemon = self.bot.loop.create_task(self._daemon_func())
        await self.branding_configuration.set("daemon_active", True)

        response = discord.Embed(
            description=f"Daemon started {Emojis.ok_hand}",
            colour=Colours.soft_green)
        await ctx.send(embed=response)

    @daemon_group.command(name="stop")
    async def daemon_stop(self, ctx: commands.Context) -> None:
        """If the daemon is running, stop it."""
        if not self._daemon_running:
            raise _errors.BrandingError("Daemon not running!")

        self.daemon.cancel()
        await self.branding_configuration.set("daemon_active", False)

        response = discord.Embed(
            description=f"Daemon stopped {Emojis.ok_hand}",
            colour=Colours.soft_green)
        await ctx.send(embed=response)

    async def _fetch_image(self, url: str) -> bytes:
        """Retrieve and read image from `url`."""
        log.debug(f"Getting image from: {url}")
        async with self.bot.http_session.get(url) as resp:
            return await resp.read()

    async def _apply_asset(self, target: discord.Guild,
                           asset: _constants.AssetType, url: str) -> bool:
        """
        Internal method for applying media assets to the guild.

        This shouldn't be called directly. The purpose of this method is mainly generic
        error handling to reduce needless code repetition.

        Return True if upload was successful, False otherwise.
        """
        log.info(f"Attempting to set {asset.name}: {url}")

        kwargs = {asset.value: await self._fetch_image(url)}
        try:
            async with async_timeout.timeout(5):
                await target.edit(**kwargs)

        except asyncio.TimeoutError:
            log.info("Asset upload timed out")
            return False

        except discord.HTTPException as discord_error:
            log.exception("Asset upload failed", exc_info=discord_error)
            return False

        else:
            log.info("Asset successfully applied")
            return True

    @_decorators.mock_in_debug(return_value=True)
    async def set_banner(self, url: str) -> bool:
        """Set the guild's banner to image at `url`."""
        guild = self.bot.get_guild(Guild.id)
        if guild is None:
            log.info("Failed to get guild instance, aborting asset upload")
            return False

        return await self._apply_asset(guild, _constants.AssetType.BANNER, url)

    @_decorators.mock_in_debug(return_value=True)
    async def set_icon(self, url: str) -> bool:
        """Sets the guild's icon to image at `url`."""
        guild = self.bot.get_guild(Guild.id)
        if guild is None:
            log.info("Failed to get guild instance, aborting asset upload")
            return False

        return await self._apply_asset(guild, _constants.AssetType.SERVER_ICON,
                                       url)

    def cog_unload(self) -> None:
        """Cancels startup and daemon task."""
        self._startup_task.cancel()
        if self.daemon is not None:
            self.daemon.cancel()
Exemple #15
0
class Filtering(Cog):
    """Filtering out invites, blacklisting domains, and warning us of certain regular expressions."""

    # Redis cache mapping a user ID to the last timestamp a bad nickname alert was sent
    name_alerts = RedisCache()

    def __init__(self, bot: Bot):
        self.bot = bot
        self.scheduler = Scheduler(self.__class__.__name__)
        self.name_lock = asyncio.Lock()

        staff_mistake_str = "If you believe this was a mistake, please let staff know!"
        self.filters = {
            "filter_zalgo": {
                "enabled":
                Filter.filter_zalgo,
                "function":
                self._has_zalgo,
                "type":
                "filter",
                "content_only":
                True,
                "user_notification":
                Filter.notify_user_zalgo,
                "notification_msg":
                ("Your post has been removed for abusing Unicode character rendering (aka Zalgo text). "
                 f"{staff_mistake_str}"),
                "schedule_deletion":
                False
            },
            "filter_invites": {
                "enabled":
                Filter.filter_invites,
                "function":
                self._has_invites,
                "type":
                "filter",
                "content_only":
                True,
                "user_notification":
                Filter.notify_user_invites,
                "notification_msg":
                (f"Per Rule 6, your invite link has been removed. {staff_mistake_str}\n\n"
                 r"Our server rules can be found here: <https://pythondiscord.com/pages/rules>"
                 ),
                "schedule_deletion":
                False
            },
            "filter_domains": {
                "enabled":
                Filter.filter_domains,
                "function":
                self._has_urls,
                "type":
                "filter",
                "content_only":
                True,
                "user_notification":
                Filter.notify_user_domains,
                "notification_msg":
                (f"Your URL has been removed because it matched a blacklisted domain. {staff_mistake_str}"
                 ),
                "schedule_deletion":
                False
            },
            "filter_everyone_ping": {
                "enabled":
                Filter.filter_everyone_ping,
                "function":
                self._has_everyone_ping,
                "type":
                "filter",
                "content_only":
                True,
                "user_notification":
                Filter.notify_user_everyone_ping,
                "notification_msg":
                ("Please don't try to ping `@everyone` or `@here`. "
                 f"Your message has been removed. {staff_mistake_str}"),
                "schedule_deletion":
                False,
                "ping_everyone":
                False
            },
            "watch_regex": {
                "enabled": Filter.watch_regex,
                "function": self._has_watch_regex_match,
                "type": "watchlist",
                "content_only": True,
                "schedule_deletion": True
            },
            "watch_rich_embeds": {
                "enabled": Filter.watch_rich_embeds,
                "function": self._has_rich_embed,
                "type": "watchlist",
                "content_only": False,
                "schedule_deletion": False
            }
        }

        self.bot.loop.create_task(self.reschedule_offensive_msg_deletion())

    def cog_unload(self) -> None:
        """Cancel scheduled tasks."""
        self.scheduler.cancel_all()

    def _get_filterlist_items(self, list_type: str, *, allowed: bool) -> list:
        """Fetch items from the filter_list_cache."""
        return self.bot.filter_list_cache[
            f"{list_type.upper()}.{allowed}"].keys()

    @staticmethod
    def _expand_spoilers(text: str) -> str:
        """Return a string containing all interpretations of a spoilered message."""
        split_text = SPOILER_RE.split(text)
        return ''.join(split_text[0::2] + split_text[1::2] + split_text)

    @property
    def mod_log(self) -> ModLog:
        """Get currently loaded ModLog cog instance."""
        return self.bot.get_cog("ModLog")

    @Cog.listener()
    async def on_message(self, msg: Message) -> None:
        """Invoke message filter for new messages."""
        await self._filter_message(msg)

        # Ignore webhook messages.
        if msg.webhook_id is None:
            await self.check_bad_words_in_name(msg.author)

    @Cog.listener()
    async def on_message_edit(self, before: Message, after: Message) -> None:
        """
        Invoke message filter for message edits.

        If there have been multiple edits, calculate the time delta from the previous edit.
        """
        if not before.edited_at:
            delta = relativedelta(after.edited_at,
                                  before.created_at).microseconds
        else:
            delta = relativedelta(after.edited_at,
                                  before.edited_at).microseconds
        await self._filter_message(after, delta)

    def get_name_matches(self, name: str) -> List[re.Match]:
        """Check bad words from passed string (name). Return list of matches."""
        matches = []
        watchlist_patterns = self._get_filterlist_items('filter_token',
                                                        allowed=False)
        for pattern in watchlist_patterns:
            if match := re.search(pattern, name, flags=re.IGNORECASE):
                matches.append(match)
        return matches
Exemple #16
0
class ModPings(Cog):
    """Commands for a moderator to turn moderator pings on and off."""

    # RedisCache[discord.Member.id, 'Naïve ISO 8601 string']
    # The cache's keys are mods who have pings off.
    # The cache's values are the times when the role should be re-applied to them, stored in ISO format.
    pings_off_mods = RedisCache()

    def __init__(self, bot: Bot):
        self.bot = bot
        self._role_scheduler = Scheduler(self.__class__.__name__)

        self.guild = None
        self.moderators_role = None

        self.reschedule_task = self.bot.loop.create_task(self.reschedule_roles(), name="mod-pings-reschedule")

    async def reschedule_roles(self) -> None:
        """Reschedule moderators role re-apply times."""
        await self.bot.wait_until_guild_available()
        self.guild = self.bot.get_guild(Guild.id)
        self.moderators_role = self.guild.get_role(Roles.moderators)

        mod_team = self.guild.get_role(Roles.mod_team)
        pings_on = self.moderators_role.members
        pings_off = await self.pings_off_mods.to_dict()

        log.trace("Applying the moderators role to the mod team where necessary.")
        for mod in mod_team.members:
            if mod in pings_on:  # Make sure that on-duty mods aren't in the cache.
                if mod in pings_off:
                    await self.pings_off_mods.delete(mod.id)
                continue

            # Keep the role off only for those in the cache.
            if mod.id not in pings_off:
                await self.reapply_role(mod)
            else:
                expiry = isoparse(pings_off[mod.id]).replace(tzinfo=None)
                self._role_scheduler.schedule_at(expiry, mod.id, self.reapply_role(mod))

    async def reapply_role(self, mod: Member) -> None:
        """Reapply the moderator's role to the given moderator."""
        log.trace(f"Re-applying role to mod with ID {mod.id}.")
        await mod.add_roles(self.moderators_role, reason="Pings off period expired.")

    @group(name='modpings', aliases=('modping',), invoke_without_command=True)
    @has_any_role(*MODERATION_ROLES)
    async def modpings_group(self, ctx: Context) -> None:
        """Allow the removal and re-addition of the pingable moderators role."""
        await ctx.send_help(ctx.command)

    @modpings_group.command(name='off')
    @has_any_role(*MODERATION_ROLES)
    async def off_command(self, ctx: Context, duration: Expiry) -> None:
        """
        Temporarily removes the pingable moderators role for a set amount of time.

        A unit of time should be appended to the duration.
        Units (∗case-sensitive):
        \u2003`y` - years
        \u2003`m` - months∗
        \u2003`w` - weeks
        \u2003`d` - days
        \u2003`h` - hours
        \u2003`M` - minutes∗
        \u2003`s` - seconds

        Alternatively, an ISO 8601 timestamp can be provided for the duration.

        The duration cannot be longer than 30 days.
        """
        duration: datetime.datetime
        delta = duration - datetime.datetime.utcnow()
        if delta > datetime.timedelta(days=30):
            await ctx.send(":x: Cannot remove the role for longer than 30 days.")
            return

        mod = ctx.author

        until_date = duration.replace(microsecond=0).isoformat()  # Looks noisy with microseconds.
        await mod.remove_roles(self.moderators_role, reason=f"Turned pings off until {until_date}.")

        await self.pings_off_mods.set(mod.id, duration.isoformat())

        # Allow rescheduling the task without cancelling it separately via the `on` command.
        if mod.id in self._role_scheduler:
            self._role_scheduler.cancel(mod.id)
        self._role_scheduler.schedule_at(duration, mod.id, self.reapply_role(mod))

        await ctx.send(f"{Emojis.check_mark} Moderators role has been removed until {until_date}.")

    @modpings_group.command(name='on')
    @has_any_role(*MODERATION_ROLES)
    async def on_command(self, ctx: Context) -> None:
        """Re-apply the pingable moderators role."""
        mod = ctx.author
        if mod in self.moderators_role.members:
            await ctx.send(":question: You already have the role.")
            return

        await mod.add_roles(self.moderators_role, reason="Pings off period canceled.")

        await self.pings_off_mods.delete(mod.id)

        # We assume the task exists. Lack of it may indicate a bug.
        self._role_scheduler.cancel(mod.id)

        await ctx.send(f"{Emojis.check_mark} Moderators role has been re-applied.")

    def cog_unload(self) -> None:
        """Cancel role tasks when the cog unloads."""
        log.trace("Cog unload: canceling role tasks.")
        self.reschedule_task.cancel()
        self._role_scheduler.cancel_all()
Exemple #17
0
class Silence(commands.Cog):
    """Commands for stopping channel messages for `everyone` role in a channel."""

    # Maps muted channel IDs to their previous overwrites for send_message and add_reactions.
    # Overwrites are stored as JSON.
    previous_overwrites = RedisCache()

    # Maps muted channel IDs to POSIX timestamps of when they'll be unsilenced.
    # A timestamp equal to -1 means it's indefinite.
    unsilence_timestamps = RedisCache()

    def __init__(self, bot: Bot):
        self.bot = bot
        self.scheduler = Scheduler(self.__class__.__name__)

        self._init_task = scheduling.create_task(self._async_init(),
                                                 event_loop=self.bot.loop)

    async def _async_init(self) -> None:
        """Set instance attributes once the guild is available and reschedule unsilences."""
        await self.bot.wait_until_guild_available()

        guild = self.bot.get_guild(constants.Guild.id)

        self._everyone_role = guild.default_role
        self._verified_voice_role = guild.get_role(
            constants.Roles.voice_verified)

        self._mod_alerts_channel = self.bot.get_channel(
            constants.Channels.mod_alerts)

        self.notifier = SilenceNotifier(
            self.bot.get_channel(constants.Channels.mod_log))
        await self._reschedule()

    async def send_message(self,
                           message: str,
                           source_channel: TextChannel,
                           target_channel: TextOrVoiceChannel,
                           *,
                           alert_target: bool = False) -> None:
        """Helper function to send message confirmation to `source_channel`, and notification to `target_channel`."""
        # Reply to invocation channel
        source_reply = message
        if source_channel != target_channel:
            source_reply = source_reply.format(channel=target_channel.mention)
        else:
            source_reply = source_reply.format(channel="current channel")
        await source_channel.send(source_reply)

        # Reply to target channel
        if alert_target:
            if isinstance(target_channel, VoiceChannel):
                voice_chat = self.bot.get_channel(
                    VOICE_CHANNELS.get(target_channel.id))
                if voice_chat and source_channel != voice_chat:
                    await voice_chat.send(
                        message.format(channel=target_channel.mention))

            elif source_channel != target_channel:
                await target_channel.send(
                    message.format(channel="current channel"))

    @commands.command(aliases=("hush", ))
    @lock(LOCK_NAMESPACE, _select_lock_channel, raise_error=True)
    async def silence(self,
                      ctx: Context,
                      duration_or_channel: typing.Union[
                          TextOrVoiceChannel, HushDurationConverter] = None,
                      duration: HushDurationConverter = 10,
                      *,
                      kick: bool = False) -> None:
        """
        Silence the current channel for `duration` minutes or `forever`.

        Duration is capped at 15 minutes, passing forever makes the silence indefinite.
        Indefinitely silenced channels get added to a notifier which posts notices every 15 minutes from the start.

        Passing a voice channel will attempt to move members out of the channel and back to force sync permissions.
        If `kick` is True, members will not be added back to the voice channel, and members will be unable to rejoin.
        """
        await self._init_task
        channel, duration = self.parse_silence_args(ctx, duration_or_channel,
                                                    duration)

        channel_info = f"#{channel} ({channel.id})"
        log.debug(f"{ctx.author} is silencing channel {channel_info}.")

        # Since threads don't have specific overrides, we cannot silence them individually.
        # The parent channel has to be muted or the thread should be archived.
        if isinstance(channel, Thread):
            await ctx.send(":x: Threads cannot be silenced.")
            return

        if not await self._set_silence_overwrites(channel, kick=kick):
            log.info(
                f"Tried to silence channel {channel_info} but the channel was already silenced."
            )
            await self.send_message(MSG_SILENCE_FAIL,
                                    ctx.channel,
                                    channel,
                                    alert_target=False)
            return

        if isinstance(channel, VoiceChannel):
            if kick:
                await self._kick_voice_members(channel)
            else:
                await self._force_voice_sync(channel)

        await self._schedule_unsilence(ctx, channel, duration)

        if duration is None:
            self.notifier.add_channel(channel)
            log.info(f"Silenced {channel_info} indefinitely.")
            await self.send_message(MSG_SILENCE_PERMANENT,
                                    ctx.channel,
                                    channel,
                                    alert_target=True)

        else:
            log.info(f"Silenced {channel_info} for {duration} minute(s).")
            formatted_message = MSG_SILENCE_SUCCESS.format(duration=duration)
            await self.send_message(formatted_message,
                                    ctx.channel,
                                    channel,
                                    alert_target=True)

    @staticmethod
    def parse_silence_args(
        ctx: Context, duration_or_channel: typing.Union[TextOrVoiceChannel,
                                                        int],
        duration: HushDurationConverter
    ) -> typing.Tuple[TextOrVoiceChannel, Optional[int]]:
        """Helper method to parse the arguments of the silence command."""
        if duration_or_channel:
            if isinstance(duration_or_channel, (TextChannel, VoiceChannel)):
                channel = duration_or_channel
            else:
                channel = ctx.channel
                duration = duration_or_channel
        else:
            channel = ctx.channel

        if duration == -1:
            duration = None

        return channel, duration

    async def _set_silence_overwrites(self,
                                      channel: TextOrVoiceChannel,
                                      *,
                                      kick: bool = False) -> bool:
        """Set silence permission overwrites for `channel` and return True if successful."""
        # Get the original channel overwrites
        if isinstance(channel, TextChannel):
            role = self._everyone_role
            overwrite = channel.overwrites_for(role)
            prev_overwrites = dict(
                send_messages=overwrite.send_messages,
                add_reactions=overwrite.add_reactions,
                create_private_threads=overwrite.create_private_threads,
                create_public_threads=overwrite.create_public_threads,
                send_messages_in_threads=overwrite.send_messages_in_threads)

        else:
            role = self._verified_voice_role
            overwrite = channel.overwrites_for(role)
            prev_overwrites = dict(speak=overwrite.speak)
            if kick:
                prev_overwrites.update(connect=overwrite.connect)

        # Stop if channel was already silenced
        if channel.id in self.scheduler or all(
                val is False for val in prev_overwrites.values()):
            return False

        # Set new permissions, store
        overwrite.update(**dict.fromkeys(prev_overwrites, False))
        await channel.set_permissions(role, overwrite=overwrite)
        await self.previous_overwrites.set(channel.id,
                                           json.dumps(prev_overwrites))

        return True

    async def _schedule_unsilence(self, ctx: Context,
                                  channel: TextOrVoiceChannel,
                                  duration: Optional[int]) -> None:
        """Schedule `ctx.channel` to be unsilenced if `duration` is not None."""
        if duration is None:
            await self.unsilence_timestamps.set(channel.id, -1)
        else:
            self.scheduler.schedule_later(
                duration * 60, channel.id,
                ctx.invoke(self.unsilence, channel=channel))
            unsilence_time = datetime.now(tz=timezone.utc) + timedelta(
                minutes=duration)
            await self.unsilence_timestamps.set(channel.id,
                                                unsilence_time.timestamp())

    @commands.command(aliases=("unhush", ))
    async def unsilence(self,
                        ctx: Context,
                        *,
                        channel: TextOrVoiceChannel = None) -> None:
        """
        Unsilence the given channel if given, else the current one.

        If the channel was silenced indefinitely, notifications for the channel will stop.
        """
        await self._init_task
        if channel is None:
            channel = ctx.channel
        log.debug(
            f"Unsilencing channel #{channel} from {ctx.author}'s command.")
        await self._unsilence_wrapper(channel, ctx)

    @lock_arg(LOCK_NAMESPACE, "channel", raise_error=True)
    async def _unsilence_wrapper(self,
                                 channel: TextOrVoiceChannel,
                                 ctx: Optional[Context] = None) -> None:
        """
        Unsilence `channel` and send a success/failure message to ctx.channel.

        If ctx is None or not passed, `channel` is used in its place.
        If `channel` and ctx.channel are the same, only one message is sent.
        """
        msg_channel = channel
        if ctx is not None:
            msg_channel = ctx.channel

        if not await self._unsilence(channel):
            if isinstance(channel, VoiceChannel):
                overwrite = channel.overwrites_for(self._verified_voice_role)
                has_channel_overwrites = overwrite.speak is False
            else:
                overwrite = channel.overwrites_for(self._everyone_role)
                has_channel_overwrites = overwrite.send_messages is False or overwrite.add_reactions is False

            # Send fail message to muted channel or voice chat channel, and invocation channel
            if has_channel_overwrites:
                await self.send_message(MSG_UNSILENCE_MANUAL,
                                        msg_channel,
                                        channel,
                                        alert_target=False)
            else:
                await self.send_message(MSG_UNSILENCE_FAIL,
                                        msg_channel,
                                        channel,
                                        alert_target=False)

        else:
            await self.send_message(MSG_UNSILENCE_SUCCESS,
                                    msg_channel,
                                    channel,
                                    alert_target=True)

    async def _unsilence(self, channel: TextOrVoiceChannel) -> bool:
        """
        Unsilence `channel`.

        If `channel` has a silence task scheduled or has its previous overwrites cached, unsilence
        it, cancel the task, and remove it from the notifier. Notify admins if it has a task but
        not cached overwrites.

        Return `True` if channel permissions were changed, `False` otherwise.
        """
        # Get stored overwrites, and return if channel is unsilenced
        prev_overwrites = await self.previous_overwrites.get(channel.id)
        if channel.id not in self.scheduler and prev_overwrites is None:
            log.info(
                f"Tried to unsilence channel #{channel} ({channel.id}) but the channel was not silenced."
            )
            return False

        # Select the role based on channel type, and get current overwrites
        if isinstance(channel, TextChannel):
            role = self._everyone_role
            overwrite = channel.overwrites_for(role)
            permissions = "`Send Messages` and `Add Reactions`"
        else:
            role = self._verified_voice_role
            overwrite = channel.overwrites_for(role)
            permissions = "`Speak` and `Connect`"

        # Check if old overwrites were not stored
        if prev_overwrites is None:
            log.info(
                f"Missing previous overwrites for #{channel} ({channel.id}); defaulting to None."
            )
            overwrite.update(send_messages=None,
                             add_reactions=None,
                             create_private_threads=None,
                             create_public_threads=None,
                             send_messages_in_threads=None,
                             speak=None,
                             connect=None)
        else:
            overwrite.update(**json.loads(prev_overwrites))

        # Update Permissions
        await channel.set_permissions(role, overwrite=overwrite)
        if isinstance(channel, VoiceChannel):
            await self._force_voice_sync(channel)

        log.info(f"Unsilenced channel #{channel} ({channel.id}).")

        self.scheduler.cancel(channel.id)
        self.notifier.remove_channel(channel)
        await self.previous_overwrites.delete(channel.id)
        await self.unsilence_timestamps.delete(channel.id)

        # Alert Admin team if old overwrites were not available
        if prev_overwrites is None:
            await self._mod_alerts_channel.send(
                f"<@&{constants.Roles.admins}> Restored overwrites with default values after unsilencing "
                f"{channel.mention}. Please check that the {permissions} "
                f"overwrites for {role.mention} are at their desired values.")

        return True

    @staticmethod
    async def _get_afk_channel(guild: Guild) -> VoiceChannel:
        """Get a guild's AFK channel, or create one if it does not exist."""
        afk_channel = guild.afk_channel

        if afk_channel is None:
            overwrites = {
                guild.default_role:
                PermissionOverwrite(speak=False,
                                    connect=False,
                                    view_channel=False)
            }
            afk_channel = await guild.create_voice_channel(
                "mute-temp", overwrites=overwrites)
            log.info(
                f"Failed to get afk-channel, created #{afk_channel} ({afk_channel.id})"
            )

        return afk_channel

    @staticmethod
    async def _kick_voice_members(channel: VoiceChannel) -> None:
        """Remove all non-staff members from a voice channel."""
        log.debug(
            f"Removing all non staff members from #{channel.name} ({channel.id})."
        )

        for member in channel.members:
            # Skip staff
            if any(role.id in constants.MODERATION_ROLES
                   for role in member.roles):
                continue

            try:
                await member.move_to(
                    None, reason="Kicking member from voice channel.")
                log.trace(f"Kicked {member.name} from voice channel.")
            except Exception as e:
                log.debug(f"Failed to move {member.name}. Reason: {e}")
                continue

        log.debug("Removed all members.")

    async def _force_voice_sync(self, channel: VoiceChannel) -> None:
        """
        Move all non-staff members from `channel` to a temporary channel and back to force toggle role mute.

        Permission modification has to happen before this function.
        """
        # Obtain temporary channel
        delete_channel = channel.guild.afk_channel is None
        afk_channel = await self._get_afk_channel(channel.guild)

        try:
            # Move all members to temporary channel and back
            for member in channel.members:
                # Skip staff
                if any(role.id in constants.MODERATION_ROLES
                       for role in member.roles):
                    continue

                try:
                    await member.move_to(afk_channel,
                                         reason="Muting VC member.")
                    log.trace(f"Moved {member.name} to afk channel.")

                    await member.move_to(channel, reason="Muting VC member.")
                    log.trace(
                        f"Moved {member.name} to original voice channel.")
                except Exception as e:
                    log.debug(f"Failed to move {member.name}. Reason: {e}")
                    continue

        finally:
            # Delete VC channel if it was created.
            if delete_channel:
                await afk_channel.delete(
                    reason="Deleting temporary mute channel.")

    async def _reschedule(self) -> None:
        """Reschedule unsilencing of active silences and add permanent ones to the notifier."""
        for channel_id, timestamp in await self.unsilence_timestamps.items():
            channel = self.bot.get_channel(channel_id)
            if channel is None:
                log.info(
                    f"Can't reschedule silence for {channel_id}: channel not found."
                )
                continue

            if timestamp == -1:
                log.info(
                    f"Adding permanent silence for #{channel} ({channel.id}) to the notifier."
                )
                self.notifier.add_channel(channel)
                continue

            dt = datetime.fromtimestamp(timestamp, tz=timezone.utc)
            delta = (dt - datetime.now(tz=timezone.utc)).total_seconds()
            if delta <= 0:
                # Suppress the error since it's not being invoked by a user via the command.
                with suppress(LockedResourceError):
                    await self._unsilence_wrapper(channel)
            else:
                log.info(
                    f"Rescheduling silence for #{channel} ({channel.id}).")
                self.scheduler.schedule_later(delta, channel_id,
                                              self._unsilence_wrapper(channel))

    def cog_unload(self) -> None:
        """Cancel the init task and scheduled tasks."""
        # It's important to wait for _init_task (specifically for _reschedule) to be cancelled
        # before cancelling scheduled tasks. Otherwise, it's possible for _reschedule to schedule
        # more tasks after cancel_all has finished, despite _init_task.cancel being called first.
        # This is cause cancel() on its own doesn't block until the task is cancelled.
        self._init_task.cancel()
        self._init_task.add_done_callback(
            lambda _: self.scheduler.cancel_all())

    # This cannot be static (must have a __func__ attribute).
    async def cog_check(self, ctx: Context) -> bool:
        """Only allow moderators to invoke the commands in this cog."""
        return await commands.has_any_role(*constants.MODERATION_ROLES
                                           ).predicate(ctx)
Exemple #18
0
class Stream(commands.Cog):
    """Grant and revoke streaming permissions from members."""

    # Stores tasks to remove streaming permission
    # RedisCache[discord.Member.id, UtcPosixTimestamp]
    task_cache = RedisCache()

    def __init__(self, bot: Bot):
        self.bot = bot
        self.scheduler = Scheduler(self.__class__.__name__)
        self.reload_task = self.bot.loop.create_task(
            self._reload_tasks_from_redis())

    def cog_unload(self) -> None:
        """Cancel all scheduled tasks."""
        self.reload_task.cancel()
        self.reload_task.add_done_callback(
            lambda _: self.scheduler.cancel_all())

    async def _revoke_streaming_permission(self,
                                           member: discord.Member) -> None:
        """Remove the streaming permission from the given Member."""
        await self.task_cache.delete(member.id)
        await member.remove_roles(discord.Object(Roles.video),
                                  reason="Streaming access revoked")

    async def _reload_tasks_from_redis(self) -> None:
        """Reload outstanding tasks from redis on startup, delete the task if the member has since left the server."""
        await self.bot.wait_until_guild_available()
        items = await self.task_cache.items()
        for key, value in items:
            member = self.bot.get_guild(Guild.id).get_member(key)

            if not member:
                # Member isn't found in the cache
                try:
                    member = await self.bot.get_guild(Guild.id
                                                      ).fetch_member(key)
                except discord.errors.NotFound:
                    log.debug(
                        f"Member {key} left the guild before we could schedule "
                        "the revoking of their streaming permissions.")
                    await self.task_cache.delete(key)
                    continue
                except discord.HTTPException:
                    log.exception(
                        f"Exception while trying to retrieve member {key} from Discord."
                    )
                    continue

            revoke_time = Arrow.utcfromtimestamp(value)
            log.debug(
                f"Scheduling {member} ({member.id}) to have streaming permission revoked at {revoke_time}"
            )
            self.scheduler.schedule_at(
                revoke_time, key, self._revoke_streaming_permission(member))

    @commands.command(aliases=("streaming", ))
    @commands.has_any_role(*STAFF_ROLES)
    async def stream(self,
                     ctx: commands.Context,
                     member: discord.Member,
                     duration: Expiry = None) -> None:
        """
        Temporarily grant streaming permissions to a member for a given duration.

        A unit of time should be appended to the duration.
        Units (∗case-sensitive):
        \u2003`y` - years
        \u2003`m` - months∗
        \u2003`w` - weeks
        \u2003`d` - days
        \u2003`h` - hours
        \u2003`M` - minutes∗
        \u2003`s` - seconds

        Alternatively, an ISO 8601 timestamp can be provided for the duration.
        """
        log.trace(
            f"Attempting to give temporary streaming permission to {member} ({member.id})."
        )

        if duration is None:
            # Use default duration and convert back to datetime as Embed.timestamp doesn't support Arrow
            duration = arrow.utcnow() + timedelta(
                minutes=VideoPermission.default_permission_duration)
            duration = duration.datetime
        elif duration.tzinfo is None:
            # Make duration tz-aware.
            # ISODateTime could already include tzinfo, this check is so it isn't overwritten.
            duration.replace(tzinfo=timezone.utc)

        # Check if the member already has streaming permission
        already_allowed = any(Roles.video == role.id for role in member.roles)
        if already_allowed:
            await ctx.send(
                f"{Emojis.cross_mark} {member.mention} can already stream.")
            log.debug(
                f"{member} ({member.id}) already has permission to stream.")
            return

        # Schedule task to remove streaming permission from Member and add it to task cache
        self.scheduler.schedule_at(duration, member.id,
                                   self._revoke_streaming_permission(member))
        await self.task_cache.set(member.id, duration.timestamp())

        await member.add_roles(discord.Object(Roles.video),
                               reason="Temporary streaming access granted")

        # Use embed as embed timestamps do timezone conversions.
        embed = discord.Embed(
            description=f"{Emojis.check_mark} {member.mention} can now stream.",
            colour=Colours.soft_green)
        embed.set_footer(
            text=f"Streaming permission has been given to {member} until")
        embed.timestamp = duration

        # Mention in content as mentions in embeds don't ping
        await ctx.send(content=member.mention, embed=embed)

        # Convert here for nicer logging
        revoke_time = format_infraction_with_duration(str(duration))
        log.debug(
            f"Successfully gave {member} ({member.id}) permission to stream until {revoke_time}."
        )

    @commands.command(aliases=("pstream", ))
    @commands.has_any_role(*STAFF_ROLES)
    async def permanentstream(self, ctx: commands.Context,
                              member: discord.Member) -> None:
        """Permanently grants the given member the permission to stream."""
        log.trace(
            f"Attempting to give permanent streaming permission to {member} ({member.id})."
        )

        # Check if the member already has streaming permission
        if any(Roles.video == role.id for role in member.roles):
            if member.id in self.scheduler:
                # Member has temp permission, so cancel the task to revoke later and delete from cache
                self.scheduler.cancel(member.id)
                await self.task_cache.delete(member.id)

                await ctx.send(
                    f"{Emojis.check_mark} Permanently granted {member.mention} the permission to stream."
                )
                log.debug(
                    f"Successfully upgraded temporary streaming permission for {member} ({member.id}) to permanent."
                )
                return

            await ctx.send(
                f"{Emojis.cross_mark} This member can already stream.")
            log.debug(
                f"{member} ({member.id}) already had permanent streaming permission."
            )
            return

        await member.add_roles(discord.Object(Roles.video),
                               reason="Permanent streaming access granted")
        await ctx.send(
            f"{Emojis.check_mark} Permanently granted {member.mention} the permission to stream."
        )
        log.debug(
            f"Successfully gave {member} ({member.id}) permanent streaming permission."
        )

    @commands.command(aliases=("unstream", "rstream"))
    @commands.has_any_role(*STAFF_ROLES)
    async def revokestream(self, ctx: commands.Context,
                           member: discord.Member) -> None:
        """Revoke the permission to stream from the given member."""
        log.trace(
            f"Attempting to remove streaming permission from {member} ({member.id})."
        )

        # Check if the member already has streaming permission
        if any(Roles.video == role.id for role in member.roles):
            if member.id in self.scheduler:
                # Member has temp permission, so cancel the task to revoke later and delete from cache
                self.scheduler.cancel(member.id)
                await self.task_cache.delete(member.id)
            await self._revoke_streaming_permission(member)

            await ctx.send(
                f"{Emojis.check_mark} Revoked the permission to stream from {member.mention}."
            )
            log.debug(
                f"Successfully revoked streaming permission from {member} ({member.id})."
            )
            return

        await ctx.send(
            f"{Emojis.cross_mark} This member doesn't have video permissions to remove!"
        )
        log.debug(
            f"{member} ({member.id}) didn't have the streaming permission to remove!"
        )
Exemple #19
0
class Stream(commands.Cog):
    """Grant and revoke streaming permissions from members."""

    # Stores tasks to remove streaming permission
    # RedisCache[discord.Member.id, UtcPosixTimestamp]
    task_cache = RedisCache()

    def __init__(self, bot: Bot):
        self.bot = bot
        self.scheduler = Scheduler(self.__class__.__name__)
        self.reload_task = self.bot.loop.create_task(
            self._reload_tasks_from_redis())

    def cog_unload(self) -> None:
        """Cancel all scheduled tasks."""
        self.reload_task.cancel()
        self.reload_task.add_done_callback(
            lambda _: self.scheduler.cancel_all())

    async def _revoke_streaming_permission(self,
                                           member: discord.Member) -> None:
        """Remove the streaming permission from the given Member."""
        await self.task_cache.delete(member.id)
        await member.remove_roles(discord.Object(Roles.video),
                                  reason="Streaming access revoked")

    async def _reload_tasks_from_redis(self) -> None:
        """Reload outstanding tasks from redis on startup, delete the task if the member has since left the server."""
        await self.bot.wait_until_guild_available()
        items = await self.task_cache.items()
        for key, value in items:
            member = self.bot.get_guild(Guild.id).get_member(key)

            if not member:
                # Member isn't found in the cache
                try:
                    member = await self.bot.get_guild(Guild.id
                                                      ).fetch_member(key)
                except discord.errors.NotFound:
                    log.debug(
                        f"Member {key} left the guild before we could schedule "
                        "the revoking of their streaming permissions.")
                    await self.task_cache.delete(key)
                    continue
                except discord.HTTPException:
                    log.exception(
                        f"Exception while trying to retrieve member {key} from Discord."
                    )
                    continue

            revoke_time = Arrow.utcfromtimestamp(value)
            log.debug(
                f"Scheduling {member} ({member.id}) to have streaming permission revoked at {revoke_time}"
            )
            self.scheduler.schedule_at(
                revoke_time, key, self._revoke_streaming_permission(member))

    async def _suspend_stream(self, ctx: commands.Context,
                              member: discord.Member) -> None:
        """Suspend a member's stream."""
        await self.bot.wait_until_guild_available()
        voice_state = member.voice

        if not voice_state:
            return

        # If the user is streaming.
        if voice_state.self_stream:
            # End user's stream by moving them to AFK voice channel and back.
            original_vc = voice_state.channel
            await member.move_to(ctx.guild.afk_channel)
            await member.move_to(original_vc)

            # Notify.
            await ctx.send(f"{member.mention}'s stream has been suspended!")
            log.debug(
                f"Successfully suspended stream from {member} ({member.id}).")
            return

        log.debug(f"No stream found to suspend from {member} ({member.id}).")

    @commands.command(aliases=("streaming", ))
    @commands.has_any_role(*MODERATION_ROLES)
    async def stream(self,
                     ctx: commands.Context,
                     member: discord.Member,
                     duration: Expiry = None) -> None:
        """
        Temporarily grant streaming permissions to a member for a given duration.

        A unit of time should be appended to the duration.
        Units (∗case-sensitive):
        \u2003`y` - years
        \u2003`m` - months∗
        \u2003`w` - weeks
        \u2003`d` - days
        \u2003`h` - hours
        \u2003`M` - minutes∗
        \u2003`s` - seconds

        Alternatively, an ISO 8601 timestamp can be provided for the duration.
        """
        log.trace(
            f"Attempting to give temporary streaming permission to {member} ({member.id})."
        )

        if duration is None:
            # Use default duration and convert back to datetime as Embed.timestamp doesn't support Arrow
            duration = arrow.utcnow() + timedelta(
                minutes=VideoPermission.default_permission_duration)
            duration = duration.datetime
        elif duration.tzinfo is None:
            # Make duration tz-aware.
            # ISODateTime could already include tzinfo, this check is so it isn't overwritten.
            duration.replace(tzinfo=timezone.utc)

        # Check if the member already has streaming permission
        already_allowed = any(Roles.video == role.id for role in member.roles)
        if already_allowed:
            await ctx.send(
                f"{Emojis.cross_mark} {member.mention} can already stream.")
            log.debug(
                f"{member} ({member.id}) already has permission to stream.")
            return

        # Schedule task to remove streaming permission from Member and add it to task cache
        self.scheduler.schedule_at(duration, member.id,
                                   self._revoke_streaming_permission(member))
        await self.task_cache.set(member.id, duration.timestamp())

        await member.add_roles(discord.Object(Roles.video),
                               reason="Temporary streaming access granted")

        # Use embed as embed timestamps do timezone conversions.
        embed = discord.Embed(
            description=f"{Emojis.check_mark} {member.mention} can now stream.",
            colour=Colours.soft_green)
        embed.set_footer(
            text=f"Streaming permission has been given to {member} until")
        embed.timestamp = duration

        # Mention in content as mentions in embeds don't ping
        await ctx.send(content=member.mention, embed=embed)

        # Convert here for nicer logging
        revoke_time = format_infraction_with_duration(str(duration))
        log.debug(
            f"Successfully gave {member} ({member.id}) permission to stream until {revoke_time}."
        )

    @commands.command(aliases=("pstream", ))
    @commands.has_any_role(*MODERATION_ROLES)
    async def permanentstream(self, ctx: commands.Context,
                              member: discord.Member) -> None:
        """Permanently grants the given member the permission to stream."""
        log.trace(
            f"Attempting to give permanent streaming permission to {member} ({member.id})."
        )

        # Check if the member already has streaming permission
        if any(Roles.video == role.id for role in member.roles):
            if member.id in self.scheduler:
                # Member has temp permission, so cancel the task to revoke later and delete from cache
                self.scheduler.cancel(member.id)
                await self.task_cache.delete(member.id)

                await ctx.send(
                    f"{Emojis.check_mark} Permanently granted {member.mention} the permission to stream."
                )
                log.debug(
                    f"Successfully upgraded temporary streaming permission for {member} ({member.id}) to permanent."
                )
                return

            await ctx.send(
                f"{Emojis.cross_mark} This member can already stream.")
            log.debug(
                f"{member} ({member.id}) already had permanent streaming permission."
            )
            return

        await member.add_roles(discord.Object(Roles.video),
                               reason="Permanent streaming access granted")
        await ctx.send(
            f"{Emojis.check_mark} Permanently granted {member.mention} the permission to stream."
        )
        log.debug(
            f"Successfully gave {member} ({member.id}) permanent streaming permission."
        )

    @commands.command(aliases=("unstream", "rstream"))
    @commands.has_any_role(*MODERATION_ROLES)
    async def revokestream(self, ctx: commands.Context,
                           member: discord.Member) -> None:
        """Revoke the permission to stream from the given member."""
        log.trace(
            f"Attempting to remove streaming permission from {member} ({member.id})."
        )

        # Check if the member already has streaming permission
        if any(Roles.video == role.id for role in member.roles):
            if member.id in self.scheduler:
                # Member has temp permission, so cancel the task to revoke later and delete from cache
                self.scheduler.cancel(member.id)
                await self.task_cache.delete(member.id)
            await self._revoke_streaming_permission(member)

            await ctx.send(
                f"{Emojis.check_mark} Revoked the permission to stream from {member.mention}."
            )
            log.debug(
                f"Successfully revoked streaming permission from {member} ({member.id})."
            )

        else:
            await ctx.send(
                f"{Emojis.cross_mark} This member doesn't have video permissions to remove!"
            )
            log.debug(
                f"{member} ({member.id}) didn't have the streaming permission to remove!"
            )

        await self._suspend_stream(ctx, member)

    @commands.command(aliases=('lstream', ))
    @commands.has_any_role(*MODERATION_ROLES)
    async def liststream(self, ctx: commands.Context) -> None:
        """Lists all non-staff users who have permission to stream."""
        non_staff_members_with_stream = [
            member for member in ctx.guild.get_role(Roles.video).members
            if not any(role.id in STAFF_ROLES for role in member.roles)
        ]

        # List of tuples (UtcPosixTimestamp, str)
        # So that the list can be sorted on the UtcPosixTimestamp before the message is passed to the paginator.
        streamer_info = []
        for member in non_staff_members_with_stream:
            if revoke_time := await self.task_cache.get(member.id):
                # Member only has temporary streaming perms
                revoke_delta = Arrow.utcfromtimestamp(revoke_time).humanize()
                message = f"{member.mention} will have stream permissions revoked {revoke_delta}."
            else:
                message = f"{member.mention} has permanent streaming permissions."

            # If revoke_time is None use max timestamp to force sort to put them at the end
            streamer_info.append((revoke_time
                                  or Arrow.max.timestamp(), message))

        if streamer_info:
            # Sort based on duration left of streaming perms
            streamer_info.sort(key=itemgetter(0))

            # Only output the message in the pagination
            lines = [line[1] for line in streamer_info]
            embed = discord.Embed(
                title=
                f"Members with streaming permission (`{len(lines)}` total)",
                colour=Colours.soft_green)
            await LinePaginator.paginate(lines,
                                         ctx,
                                         embed,
                                         max_size=400,
                                         empty=False)
        else:
            await ctx.send("No members with stream permissions found.")
Exemple #20
0
from async_rediscache import RedisCache

# This dictionary maps a help channel to the time it was claimed
# RedisCache[discord.TextChannel.id, UtcPosixTimestamp]
claim_times = RedisCache(namespace="HelpChannels.claim_times")

# This cache tracks which channels are claimed by which members.
# RedisCache[discord.TextChannel.id, t.Union[discord.User.id, discord.Member.id]]
claimants = RedisCache(namespace="HelpChannels.help_channel_claimants")

# Stores the timestamp of the last message from the claimant of a help channel
# RedisCache[discord.TextChannel.id, UtcPosixTimestamp]
claimant_last_message_times = RedisCache(
    namespace="HelpChannels.claimant_last_message_times")

# This cache maps a help channel to the timestamp of the last non-claimant message.
# This cache being empty for a given help channel indicates the question is unanswered.
# RedisCache[discord.TextChannel.id, UtcPosixTimestamp]
non_claimant_last_message_times = RedisCache(
    namespace="HelpChannels.non_claimant_last_message_times")

# This cache maps a help channel to original question message in same channel.
# RedisCache[discord.TextChannel.id, discord.Message.id]
question_messages = RedisCache(namespace="HelpChannels.question_messages")

# This cache keeps track of the dynamic message ID for
# the continuously updated message in the #How-to-get-help channel.
dynamic_message = RedisCache(namespace="HelpChannels.dynamic_message")
Exemple #21
0
class Verification(Cog):
    """
    User verification and role management.

    There are two internal tasks in this cog:

    * `update_unverified_members`
        * Unverified members are given the @Unverified role after configured `unverified_after` days
        * Unverified members are kicked after configured `kicked_after` days
    * `ping_unverified`
        * Periodically ping the @Unverified role in the verification channel

    Statistics are collected in the 'verification.' namespace.

    Moderators+ can use the `verification` command group to start or stop both internal
    tasks, if necessary. Settings are persisted in Redis across sessions.

    Additionally, this cog offers the !accept, !subscribe and !unsubscribe commands,
    and keeps the verification channel clean by deleting messages.
    """

    # Persist task settings & last sent `REMINDER_MESSAGE` id
    # RedisCache[
    #   "tasks_running": int (0 or 1),
    #   "last_reminder": int (discord.Message.id),
    # ]
    task_cache = RedisCache()

    # Create a cache for storing recipients of the alternate welcome DM.
    member_gating_cache = RedisCache()

    def __init__(self, bot: Bot) -> None:
        """Start internal tasks."""
        self.bot = bot
        self.bot.loop.create_task(self._maybe_start_tasks())

    def cog_unload(self) -> None:
        """
        Cancel internal tasks.

        This is necessary, as tasks are not automatically cancelled on cog unload.
        """
        self._stop_tasks(gracefully=False)

    @property
    def mod_log(self) -> ModLog:
        """Get currently loaded ModLog cog instance."""
        return self.bot.get_cog("ModLog")

    async def _maybe_start_tasks(self) -> None:
        """
        Poll Redis to check whether internal tasks should start.

        Redis must be interfaced with from an async function.
        """
        log.trace("Checking whether background tasks should begin")
        setting: t.Optional[int] = await self.task_cache.get("tasks_running")  # This can be None if never set

        if setting:
            log.trace("Background tasks will be started")
            self.update_unverified_members.start()
            self.ping_unverified.start()

    def _stop_tasks(self, *, gracefully: bool) -> None:
        """
        Stop the update users & ping @Unverified tasks.

        If `gracefully` is True, the tasks will be able to finish their current iteration.
        Otherwise, they are cancelled immediately.
        """
        log.info(f"Stopping internal tasks ({gracefully=})")
        if gracefully:
            self.update_unverified_members.stop()
            self.ping_unverified.stop()
        else:
            self.update_unverified_members.cancel()
            self.ping_unverified.cancel()

    # region: automatically update unverified users

    async def _verify_kick(self, n_members: int) -> bool:
        """
        Determine whether `n_members` is a reasonable amount of members to kick.

        First, `n_members` is checked against the size of the PyDis guild. If `n_members` are
        more than the configured `kick_confirmation_threshold` of the guild, the operation
        must be confirmed by staff in #core-dev. Otherwise, the operation is seen as safe.
        """
        log.debug(f"Checking whether {n_members} members are safe to kick")

        await self.bot.wait_until_guild_available()  # Ensure cache is populated before we grab the guild
        pydis = self.bot.get_guild(constants.Guild.id)

        percentage = n_members / len(pydis.members)
        if percentage < constants.Verification.kick_confirmation_threshold:
            log.debug(f"Kicking {percentage:.2%} of the guild's population is seen as safe")
            return True

        # Since `n_members` is a suspiciously large number, we will ask for confirmation
        log.debug("Amount of users is too large, requesting staff confirmation")

        core_dev_channel = pydis.get_channel(constants.Channels.dev_core)
        core_dev_ping = f"<@&{constants.Roles.core_developers}>"

        confirmation_msg = await core_dev_channel.send(
            f"{core_dev_ping} Verification determined that `{n_members}` members should be kicked as they haven't "
            f"verified in `{constants.Verification.kicked_after}` days. This is `{percentage:.2%}` of the guild's "
            f"population. Proceed?",
            allowed_mentions=mention_role(constants.Roles.core_developers),
        )

        options = (constants.Emojis.incident_actioned, constants.Emojis.incident_unactioned)
        for option in options:
            await confirmation_msg.add_reaction(option)

        core_dev_ids = [member.id for member in pydis.get_role(constants.Roles.core_developers).members]

        def check(reaction: discord.Reaction, user: discord.User) -> bool:
            """Check whether `reaction` is a valid reaction to `confirmation_msg`."""
            return (
                reaction.message.id == confirmation_msg.id  # Reacted to `confirmation_msg`
                and str(reaction.emoji) in options  # With one of `options`
                and user.id in core_dev_ids  # By a core developer
            )

        timeout = 60 * 5  # Seconds, i.e. 5 minutes
        try:
            choice, _ = await self.bot.wait_for("reaction_add", check=check, timeout=timeout)
        except asyncio.TimeoutError:
            log.debug("Staff prompt not answered, aborting operation")
            return False
        finally:
            with suppress(discord.HTTPException):
                await confirmation_msg.clear_reactions()

        result = str(choice) == constants.Emojis.incident_actioned
        log.debug(f"Received answer: {choice}, result: {result}")

        # Edit the prompt message to reflect the final choice
        if result is True:
            result_msg = f":ok_hand: {core_dev_ping} Request to kick `{n_members}` members was authorized!"
        else:
            result_msg = f":warning: {core_dev_ping} Request to kick `{n_members}` members was denied!"

        with suppress(discord.HTTPException):
            await confirmation_msg.edit(content=result_msg)

        return result

    async def _alert_admins(self, exception: discord.HTTPException) -> None:
        """
        Ping @Admins with information about `exception`.

        This is used when a critical `exception` caused a verification task to abort.
        """
        await self.bot.wait_until_guild_available()
        log.info(f"Sending admin alert regarding exception: {exception}")

        admins_channel = self.bot.get_guild(constants.Guild.id).get_channel(constants.Channels.admins)
        ping = f"<@&{constants.Roles.admins}>"

        await admins_channel.send(
            f"{ping} Aborted updating unverified users due to the following exception:\n"
            f"```{exception}```\n"
            f"Internal tasks will be stopped.",
            allowed_mentions=mention_role(constants.Roles.admins),
        )

    async def _send_requests(self, members: t.Collection[discord.Member], request: Request, limit: Limit) -> int:
        """
        Pass `members` one by one to `request` handling Discord exceptions.

        This coroutine serves as a generic `request` executor for kicking members and adding
        roles, as it allows us to define the error handling logic in one place only.

        Any `request` has the ability to completely abort the execution by raising `StopExecution`.
        In such a case, the @Admins will be alerted of the reason attribute.

        To avoid rate-limits, pass a `limit` configuring the batch size and the amount of seconds
        to sleep between batches.

        Returns the amount of successful requests. Failed requests are logged at info level.
        """
        log.trace(f"Sending {len(members)} requests")
        n_success, bad_statuses = 0, set()

        for progress, member in enumerate(members, start=1):
            if is_verified(member):  # Member could have verified in the meantime
                continue
            try:
                await request(member)
            except StopExecution as stop_execution:
                await self._alert_admins(stop_execution.reason)
                await self.task_cache.set("tasks_running", 0)
                self._stop_tasks(gracefully=True)  # Gracefully finish current iteration, then stop
                break
            except discord.HTTPException as http_exc:
                bad_statuses.add(http_exc.status)
            else:
                n_success += 1

            if progress % limit.batch_size == 0:
                log.trace(f"Processed {progress} requests, pausing for {limit.sleep_secs} seconds")
                await asyncio.sleep(limit.sleep_secs)

        if bad_statuses:
            log.info(f"Failed to send {len(members) - n_success} requests due to following statuses: {bad_statuses}")

        return n_success

    async def _kick_members(self, members: t.Collection[discord.Member]) -> int:
        """
        Kick `members` from the PyDis guild.

        Due to strict ratelimits on sending messages (120 requests / 60 secs), we sleep for a second
        after each 2 requests to allow breathing room for other features.

        Note that this is a potentially destructive operation. Returns the amount of successful requests.
        """
        log.info(f"Kicking {len(members)} members (not verified after {constants.Verification.kicked_after} days)")

        async def kick_request(member: discord.Member) -> None:
            """Send `KICKED_MESSAGE` to `member` and kick them from the guild."""
            try:
                await safe_dm(member.send(KICKED_MESSAGE))  # Suppress disabled DMs
            except discord.HTTPException as suspicious_exception:
                raise StopExecution(reason=suspicious_exception)
            await member.kick(reason=f"User has not verified in {constants.Verification.kicked_after} days")

        n_kicked = await self._send_requests(members, kick_request, Limit(batch_size=2, sleep_secs=1))
        self.bot.stats.incr("verification.kicked", count=n_kicked)

        return n_kicked

    async def _give_role(self, members: t.Collection[discord.Member], role: discord.Role) -> int:
        """
        Give `role` to all `members`.

        We pause for a second after batches of 25 requests to ensure ratelimits aren't exceeded.

        Returns the amount of successful requests.
        """
        log.info(
            f"Assigning {role} role to {len(members)} members (not verified "
            f"after {constants.Verification.unverified_after} days)"
        )

        async def role_request(member: discord.Member) -> None:
            """Add `role` to `member`."""
            await member.add_roles(role, reason=f"Not verified after {constants.Verification.unverified_after} days")

        return await self._send_requests(members, role_request, Limit(batch_size=25, sleep_secs=1))

    async def _check_members(self) -> t.Tuple[t.Set[discord.Member], t.Set[discord.Member]]:
        """
        Check in on the verification status of PyDis members.

        This coroutine finds two sets of users:
        * Not verified after configured `unverified_after` days, should be given the @Unverified role
        * Not verified after configured `kicked_after` days, should be kicked from the guild

        These sets are always disjoint, i.e. share no common members.
        """
        await self.bot.wait_until_guild_available()  # Ensure cache is ready
        pydis = self.bot.get_guild(constants.Guild.id)

        unverified = pydis.get_role(constants.Roles.unverified)
        current_dt = datetime.utcnow()  # Discord timestamps are UTC

        # Users to be given the @Unverified role, and those to be kicked, these should be entirely disjoint
        for_role, for_kick = set(), set()

        log.debug("Checking verification status of guild members")
        for member in pydis.members:

            # Skip verified members, bots, and members for which we do not know their join date,
            # this should be extremely rare but docs mention that it can happen
            if is_verified(member) or member.bot or member.joined_at is None:
                continue

            # At this point, we know that `member` is an unverified user, and we will decide what
            # to do with them based on time passed since their join date
            since_join = current_dt - member.joined_at

            if since_join > timedelta(days=constants.Verification.kicked_after):
                for_kick.add(member)  # User should be removed from the guild

            elif (
                since_join > timedelta(days=constants.Verification.unverified_after)
                and unverified not in member.roles
            ):
                for_role.add(member)  # User should be given the @Unverified role

        log.debug(f"Found {len(for_role)} users for {unverified} role, {len(for_kick)} users to be kicked")
        return for_role, for_kick

    @tasks.loop(minutes=30)
    async def update_unverified_members(self) -> None:
        """
        Periodically call `_check_members` and update unverified members accordingly.

        After each run, a summary will be sent to the modlog channel. If a suspiciously high
        amount of members to be kicked is found, the operation is guarded by `_verify_kick`.
        """
        log.info("Updating unverified guild members")

        await self.bot.wait_until_guild_available()
        unverified = self.bot.get_guild(constants.Guild.id).get_role(constants.Roles.unverified)

        for_role, for_kick = await self._check_members()

        if not for_role:
            role_report = f"Found no users to be assigned the {unverified.mention} role."
        else:
            n_roles = await self._give_role(for_role, unverified)
            role_report = f"Assigned {unverified.mention} role to `{n_roles}`/`{len(for_role)}` members."

        if not for_kick:
            kick_report = "Found no users to be kicked."
        elif not await self._verify_kick(len(for_kick)):
            kick_report = f"Not authorized to kick `{len(for_kick)}` members."
        else:
            n_kicks = await self._kick_members(for_kick)
            kick_report = f"Kicked `{n_kicks}`/`{len(for_kick)}` members from the guild."

        await self.mod_log.send_log_message(
            icon_url=self.bot.user.avatar_url,
            colour=discord.Colour.blurple(),
            title="Verification system",
            text=f"{kick_report}\n{role_report}",
        )

    # endregion
    # region: periodically ping @Unverified

    @tasks.loop(hours=constants.Verification.reminder_frequency)
    async def ping_unverified(self) -> None:
        """
        Delete latest `REMINDER_MESSAGE` and send it again.

        This utilizes RedisCache to persist the latest reminder message id.
        """
        await self.bot.wait_until_guild_available()
        verification = self.bot.get_guild(constants.Guild.id).get_channel(constants.Channels.verification)

        last_reminder: t.Optional[int] = await self.task_cache.get("last_reminder")

        if last_reminder is not None:
            log.trace(f"Found verification reminder message in cache, deleting: {last_reminder}")

            with suppress(discord.HTTPException):  # If something goes wrong, just ignore it
                await self.bot.http.delete_message(verification.id, last_reminder)

        log.trace("Sending verification reminder")
        new_reminder = await verification.send(
            REMINDER_MESSAGE, allowed_mentions=mention_role(constants.Roles.unverified),
        )

        await self.task_cache.set("last_reminder", new_reminder.id)

    @ping_unverified.before_loop
    async def _before_first_ping(self) -> None:
        """
        Sleep until `REMINDER_MESSAGE` should be sent again.

        If latest reminder is not cached, exit instantly. Otherwise, wait wait until the
        configured `reminder_frequency` has passed.
        """
        last_reminder: t.Optional[int] = await self.task_cache.get("last_reminder")

        if last_reminder is None:
            log.trace("Latest verification reminder message not cached, task will not wait")
            return

        # Convert cached message id into a timestamp
        time_since = datetime.utcnow() - snowflake_time(last_reminder)
        log.trace(f"Time since latest verification reminder: {time_since}")

        to_sleep = timedelta(hours=constants.Verification.reminder_frequency) - time_since
        log.trace(f"Time to sleep until next ping: {to_sleep}")

        # Delta can be negative if `reminder_frequency` has already passed
        secs = max(to_sleep.total_seconds(), 0)
        await asyncio.sleep(secs)

    # endregion
    # region: listeners

    @Cog.listener()
    async def on_member_join(self, member: discord.Member) -> None:
        """Attempt to send initial direct message to each new member."""
        if member.guild.id != constants.Guild.id:
            return  # Only listen for PyDis events

        raw_member = await self.bot.http.get_member(member.guild.id, member.id)

        # If the user has the is_pending flag set, they will be using the alternate
        # gate and will not need a welcome DM with verification instructions.
        # We will send them an alternate DM once they verify with the welcome
        # video.
        if raw_member.get("is_pending"):
            await self.member_gating_cache.set(member.id, True)
            return

        log.trace(f"Sending on join message to new member: {member.id}")
        try:
            await safe_dm(member.send(ON_JOIN_MESSAGE))
        except discord.HTTPException:
            log.exception("DM dispatch failed on unexpected error code")

    @Cog.listener()
    async def on_member_update(self, before: discord.Member, after: discord.Member) -> None:
        """Check if we need to send a verification DM to a gated user."""
        before_roles = [role.id for role in before.roles]
        after_roles = [role.id for role in after.roles]

        if constants.Roles.verified not in before_roles and constants.Roles.verified in after_roles:
            if await self.member_gating_cache.pop(after.id):
                try:
                    # If the member has not received a DM from our !accept command
                    # and has gone through the alternate gating system we should send
                    # our alternate welcome DM which includes info such as our welcome
                    # video.
                    await safe_dm(after.send(ALTERNATE_VERIFIED_MESSAGE))
                except discord.HTTPException:
                    log.exception("DM dispatch failed on unexpected error code")

    @Cog.listener()
    async def on_message(self, message: discord.Message) -> None:
        """Check new message event for messages to the checkpoint channel & process."""
        if message.channel.id != constants.Channels.verification:
            return  # Only listen for #checkpoint messages

        if message.content == REMINDER_MESSAGE:
            return  # Ignore bots own verification reminder

        if message.author.bot:
            # They're a bot, delete their message after the delay.
            await message.delete(delay=constants.Verification.bot_message_delete_delay)
            return

        # if a user mentions a role or guild member
        # alert the mods in mod-alerts channel
        if message.mentions or message.role_mentions:
            log.debug(
                f"{message.author} mentioned one or more users "
                f"and/or roles in {message.channel.name}"
            )

            embed_text = (
                f"{format_user(message.author)} sent a message in "
                f"{message.channel.mention} that contained user and/or role mentions."
                f"\n\n**Original message:**\n>>> {message.content}"
            )

            # Send pretty mod log embed to mod-alerts
            await self.mod_log.send_log_message(
                icon_url=constants.Icons.filtering,
                colour=discord.Colour(constants.Colours.soft_red),
                title=f"User/Role mentioned in {message.channel.name}",
                text=embed_text,
                thumbnail=message.author.avatar_url_as(static_format="png"),
                channel_id=constants.Channels.mod_alerts,
            )

        ctx: Context = await self.bot.get_context(message)
        if ctx.command is not None and ctx.command.name == "accept":
            return

        if any(r.id == constants.Roles.verified for r in ctx.author.roles):
            log.info(
                f"{ctx.author} posted '{ctx.message.content}' "
                "in the verification channel, but is already verified."
            )
            return

        log.debug(
            f"{ctx.author} posted '{ctx.message.content}' in the verification "
            "channel. We are providing instructions how to verify."
        )
        await ctx.send(
            f"{ctx.author.mention} Please type `!accept` to verify that you accept our rules, "
            f"and gain access to the rest of the server.",
            delete_after=20
        )

        log.trace(f"Deleting the message posted by {ctx.author}")
        with suppress(discord.NotFound):
            await ctx.message.delete()

    # endregion
    # region: task management commands

    @has_any_role(*constants.MODERATION_ROLES)
    @group(name="verification")
    async def verification_group(self, ctx: Context) -> None:
        """Manage internal verification tasks."""
        if ctx.invoked_subcommand is None:
            await ctx.send_help(ctx.command)

    @verification_group.command(name="status")
    async def status_cmd(self, ctx: Context) -> None:
        """Check whether verification tasks are running."""
        log.trace("Checking status of verification tasks")

        if self.update_unverified_members.is_running():
            update_status = f"{constants.Emojis.incident_actioned} Member update task is running."
        else:
            update_status = f"{constants.Emojis.incident_unactioned} Member update task is **not** running."

        mention = f"<@&{constants.Roles.unverified}>"
        if self.ping_unverified.is_running():
            ping_status = f"{constants.Emojis.incident_actioned} Ping {mention} task is running."
        else:
            ping_status = f"{constants.Emojis.incident_unactioned} Ping {mention} task is **not** running."

        embed = discord.Embed(
            title="Verification system",
            description=f"{update_status}\n{ping_status}",
            colour=discord.Colour.blurple(),
        )
        await ctx.send(embed=embed)

    @verification_group.command(name="start")
    async def start_cmd(self, ctx: Context) -> None:
        """Start verification tasks if they are not already running."""
        log.info("Starting verification tasks")

        if not self.update_unverified_members.is_running():
            self.update_unverified_members.start()

        if not self.ping_unverified.is_running():
            self.ping_unverified.start()

        await self.task_cache.set("tasks_running", 1)

        colour = discord.Colour.blurple()
        await ctx.send(embed=discord.Embed(title="Verification system", description="Done. :ok_hand:", colour=colour))

    @verification_group.command(name="stop", aliases=["kill"])
    async def stop_cmd(self, ctx: Context) -> None:
        """Stop verification tasks."""
        log.info("Stopping verification tasks")

        self._stop_tasks(gracefully=False)
        await self.task_cache.set("tasks_running", 0)

        colour = discord.Colour.blurple()
        await ctx.send(embed=discord.Embed(title="Verification system", description="Tasks canceled.", colour=colour))

    # endregion
    # region: accept and subscribe commands

    def _bump_verified_stats(self, verified_member: discord.Member) -> None:
        """
        Increment verification stats for `verified_member`.

        Each member falls into one of the three categories:
            * Verified within 24 hours after joining
            * Does not have @Unverified role yet
            * Does have @Unverified role

        Stats for member kicking are handled separately.
        """
        if verified_member.joined_at is None:  # Docs mention this can happen
            return

        if (datetime.utcnow() - verified_member.joined_at) < timedelta(hours=24):
            category = "accepted_on_day_one"
        elif constants.Roles.unverified not in [role.id for role in verified_member.roles]:
            category = "accepted_before_unverified"
        else:
            category = "accepted_after_unverified"

        log.trace(f"Bumping verification stats in category: {category}")
        self.bot.stats.incr(f"verification.{category}")

    @command(name='accept', aliases=('verify', 'verified', 'accepted'), hidden=True)
    @has_no_roles(constants.Roles.verified)
    @in_whitelist(channels=(constants.Channels.verification,))
    async def accept_command(self, ctx: Context, *_) -> None:  # We don't actually care about the args
        """Accept our rules and gain access to the rest of the server."""
        log.debug(f"{ctx.author} called !accept. Assigning the 'Developer' role.")
        await ctx.author.add_roles(discord.Object(constants.Roles.verified), reason="Accepted the rules")

        self._bump_verified_stats(ctx.author)  # This checks for @Unverified so make sure it's not yet removed

        if constants.Roles.unverified in [role.id for role in ctx.author.roles]:
            log.debug(f"Removing Unverified role from: {ctx.author}")
            await ctx.author.remove_roles(discord.Object(constants.Roles.unverified))

        try:
            await safe_dm(ctx.author.send(VERIFIED_MESSAGE))
        except discord.HTTPException:
            log.exception(f"Sending welcome message failed for {ctx.author}.")
        finally:
            log.trace(f"Deleting accept message by {ctx.author}.")
            with suppress(discord.NotFound):
                self.mod_log.ignore(constants.Event.message_delete, ctx.message.id)
                await ctx.message.delete()

    @command(name='subscribe')
    @in_whitelist(channels=(constants.Channels.bot_commands,))
    async def subscribe_command(self, ctx: Context, *_) -> None:  # We don't actually care about the args
        """Subscribe to announcement notifications by assigning yourself the role."""
        has_role = False

        for role in ctx.author.roles:
            if role.id == constants.Roles.announcements:
                has_role = True
                break

        if has_role:
            await ctx.send(f"{ctx.author.mention} You're already subscribed!")
            return

        log.debug(f"{ctx.author} called !subscribe. Assigning the 'Announcements' role.")
        await ctx.author.add_roles(discord.Object(constants.Roles.announcements), reason="Subscribed to announcements")

        log.trace(f"Deleting the message posted by {ctx.author}.")

        await ctx.send(
            f"{ctx.author.mention} Subscribed to <#{constants.Channels.announcements}> notifications.",
        )

    @command(name='unsubscribe')
    @in_whitelist(channels=(constants.Channels.bot_commands,))
    async def unsubscribe_command(self, ctx: Context, *_) -> None:  # We don't actually care about the args
        """Unsubscribe from announcement notifications by removing the role from yourself."""
        has_role = False

        for role in ctx.author.roles:
            if role.id == constants.Roles.announcements:
                has_role = True
                break

        if not has_role:
            await ctx.send(f"{ctx.author.mention} You're already unsubscribed!")
            return

        log.debug(f"{ctx.author} called !unsubscribe. Removing the 'Announcements' role.")
        await ctx.author.remove_roles(
            discord.Object(constants.Roles.announcements), reason="Unsubscribed from announcements"
        )

        log.trace(f"Deleting the message posted by {ctx.author}.")

        await ctx.send(
            f"{ctx.author.mention} Unsubscribed from <#{constants.Channels.announcements}> notifications."
        )

    # endregion
    # region: miscellaneous

    # This cannot be static (must have a __func__ attribute).
    async def cog_command_error(self, ctx: Context, error: Exception) -> None:
        """Check for & ignore any InWhitelistCheckFailure."""
        if isinstance(error, InWhitelistCheckFailure):
            error.handled = True

    @staticmethod
    async def bot_check(ctx: Context) -> bool:
        """Block any command within the verification channel that is not !accept."""
        is_verification = ctx.channel.id == constants.Channels.verification
        if is_verification and await has_no_roles_check(ctx, *constants.MODERATION_ROLES):
            return ctx.command.name == "accept"
        else:
            return True
Exemple #22
0
class CandyCollection(commands.Cog):
    """Candy collection game Cog."""

    # User candy amount records
    candy_records = RedisCache()

    # Candy and skull messages mapping
    candy_messages = RedisCache()
    skull_messages = RedisCache()

    def __init__(self, bot: Bot):
        self.bot = bot

    @in_month(Month.OCTOBER)
    @commands.Cog.listener()
    async def on_message(self, message: discord.Message) -> None:
        """Randomly adds candy or skull reaction to non-bot messages in the Event channel."""
        # Ignore messages in DMs
        if not message.guild:
            return
        # make sure its a human message
        if message.author.bot:
            return
        # ensure it's hacktober channel
        if message.channel.id != Channels.community_bot_commands:
            return

        # do random check for skull first as it has the lower chance
        if random.randint(1, ADD_SKULL_REACTION_CHANCE) == 1:
            await self.skull_messages.set(message.id, "skull")
            await message.add_reaction(EMOJIS["SKULL"])
        # check for the candy chance next
        elif random.randint(1, ADD_CANDY_REACTION_CHANCE) == 1:
            await self.candy_messages.set(message.id, "candy")
            await message.add_reaction(EMOJIS["CANDY"])

    @in_month(Month.OCTOBER)
    @commands.Cog.listener()
    async def on_reaction_add(self, reaction: discord.Reaction, user: Union[discord.User, discord.Member]) -> None:
        """Add/remove candies from a person if the reaction satisfies criteria."""
        message = reaction.message
        # check to ensure the reactor is human
        if user.bot:
            return

        # check to ensure it is in correct channel
        if message.channel.id != Channels.community_bot_commands:
            return

        # if its not a candy or skull, and it is one of 10 most recent messages,
        # proceed to add a skull/candy with higher chance
        if str(reaction.emoji) not in (EMOJIS["SKULL"], EMOJIS["CANDY"]):
            recent_message_ids = map(
                lambda m: m.id,
                await self.hacktober_channel.history(limit=10).flatten()
            )
            if message.id in recent_message_ids:
                await self.reacted_msg_chance(message)
            return

        if await self.candy_messages.get(message.id) == "candy" and str(reaction.emoji) == EMOJIS["CANDY"]:
            await self.candy_messages.delete(message.id)
            if await self.candy_records.contains(user.id):
                await self.candy_records.increment(user.id)
            else:
                await self.candy_records.set(user.id, 1)

        elif await self.skull_messages.get(message.id) == "skull" and str(reaction.emoji) == EMOJIS["SKULL"]:
            await self.skull_messages.delete(message.id)

            if prev_record := await self.candy_records.get(user.id):
                lost = min(random.randint(1, 3), prev_record)
                await self.candy_records.decrement(user.id, lost)

                if lost == prev_record:
                    await CandyCollection.send_spook_msg(user, message.channel, "all of your")
                else:
                    await CandyCollection.send_spook_msg(user, message.channel, lost)
            else:
                await CandyCollection.send_no_candy_spook_message(user, message.channel)
        else:
Exemple #23
0
class HelpChannels(commands.Cog):
    """
    Manage the help channel system of the guild.

    The system is based on a 3-category system:

    Available Category

    * Contains channels which are ready to be occupied by someone who needs help
    * Will always contain `constants.HelpChannels.max_available` channels; refilled automatically
      from the pool of dormant channels
        * Prioritise using the channels which have been dormant for the longest amount of time
        * If there are no more dormant channels, the bot will automatically create a new one
        * If there are no dormant channels to move, helpers will be notified (see `notify()`)
    * When a channel becomes available, the dormant embed will be edited to show `AVAILABLE_MSG`
    * User can only claim a channel at an interval `constants.HelpChannels.claim_minutes`
        * To keep track of cooldowns, user which claimed a channel will have a temporary role

    In Use Category

    * Contains all channels which are occupied by someone needing help
    * Channel moves to dormant category after `constants.HelpChannels.idle_minutes` of being idle
    * Command can prematurely mark a channel as dormant
        * Channel claimant is allowed to use the command
        * Allowed roles for the command are configurable with `constants.HelpChannels.cmd_whitelist`
    * When a channel becomes dormant, an embed with `DORMANT_MSG` will be sent

    Dormant Category

    * Contains channels which aren't in use
    * Channels are used to refill the Available category

    Help channels are named after the chemical elements in `bot/resources/elements.json`.
    """

    # This cache tracks which channels are claimed by which members.
    # RedisCache[discord.TextChannel.id, t.Union[discord.User.id, discord.Member.id]]
    help_channel_claimants = RedisCache()

    # This cache maps a help channel to whether it has had any
    # activity other than the original claimant. True being no other
    # activity and False being other activity.
    # RedisCache[discord.TextChannel.id, bool]
    unanswered = RedisCache()

    # This dictionary maps a help channel to the time it was claimed
    # RedisCache[discord.TextChannel.id, UtcPosixTimestamp]
    claim_times = RedisCache()

    # This cache maps a help channel to original question message in same channel.
    # RedisCache[discord.TextChannel.id, discord.Message.id]
    question_messages = RedisCache()

    def __init__(self, bot: Bot):
        self.bot = bot
        self.scheduler = Scheduler(self.__class__.__name__)

        # Categories
        self.available_category: discord.CategoryChannel = None
        self.in_use_category: discord.CategoryChannel = None
        self.dormant_category: discord.CategoryChannel = None

        # Queues
        self.channel_queue: asyncio.Queue[discord.TextChannel] = None
        self.name_queue: t.Deque[str] = None

        self.name_positions = self.get_names()
        self.last_notification: t.Optional[datetime] = None

        # Asyncio stuff
        self.queue_tasks: t.List[asyncio.Task] = []
        self.ready = asyncio.Event()
        self.on_message_lock = asyncio.Lock()
        self.init_task = self.bot.loop.create_task(self.init_cog())

    def cog_unload(self) -> None:
        """Cancel the init task and scheduled tasks when the cog unloads."""
        log.trace("Cog unload: cancelling the init_cog task")
        self.init_task.cancel()

        log.trace("Cog unload: cancelling the channel queue tasks")
        for task in self.queue_tasks:
            task.cancel()

        self.scheduler.cancel_all()

    def create_channel_queue(self) -> asyncio.Queue:
        """
        Return a queue of dormant channels to use for getting the next available channel.

        The channels are added to the queue in a random order.
        """
        log.trace("Creating the channel queue.")

        channels = list(self.get_category_channels(self.dormant_category))
        random.shuffle(channels)

        log.trace("Populating the channel queue with channels.")
        queue = asyncio.Queue()
        for channel in channels:
            queue.put_nowait(channel)

        return queue

    async def create_dormant(self) -> t.Optional[discord.TextChannel]:
        """
        Create and return a new channel in the Dormant category.

        The new channel will sync its permission overwrites with the category.

        Return None if no more channel names are available.
        """
        log.trace("Getting a name for a new dormant channel.")

        try:
            name = self.name_queue.popleft()
        except IndexError:
            log.debug("No more names available for new dormant channels.")
            return None

        log.debug(f"Creating a new dormant channel named {name}.")
        return await self.dormant_category.create_text_channel(
            name, topic=HELP_CHANNEL_TOPIC)

    def create_name_queue(self) -> deque:
        """Return a queue of element names to use for creating new channels."""
        log.trace("Creating the chemical element name queue.")

        used_names = self.get_used_names()

        log.trace("Determining the available names.")
        available_names = (name for name in self.name_positions
                           if name not in used_names)

        log.trace("Populating the name queue with names.")
        return deque(available_names)

    async def dormant_check(self, ctx: commands.Context) -> bool:
        """Return True if the user is the help channel claimant or passes the role check."""
        if await self.help_channel_claimants.get(ctx.channel.id
                                                 ) == ctx.author.id:
            log.trace(
                f"{ctx.author} is the help channel claimant, passing the check for dormant."
            )
            self.bot.stats.incr("help.dormant_invoke.claimant")
            return True

        log.trace(
            f"{ctx.author} is not the help channel claimant, checking roles.")
        has_role = await commands.has_any_role(
            *constants.HelpChannels.cmd_whitelist).predicate(ctx)

        if has_role:
            self.bot.stats.incr("help.dormant_invoke.staff")

        return has_role

    @commands.command(name="close",
                      aliases=["dormant", "solved"],
                      enabled=False)
    async def close_command(self, ctx: commands.Context) -> None:
        """
        Make the current in-use help channel dormant.

        Make the channel dormant if the user passes the `dormant_check`,
        delete the message that invoked this,
        and reset the send permissions cooldown for the user who started the session.
        """
        log.trace("close command invoked; checking if the channel is in-use.")
        if ctx.channel.category == self.in_use_category:
            if await self.dormant_check(ctx):
                await self.remove_cooldown_role(ctx.author)

                # Ignore missing task when cooldown has passed but the channel still isn't dormant.
                if ctx.author.id in self.scheduler:
                    self.scheduler.cancel(ctx.author.id)

                await self.move_to_dormant(ctx.channel, "command")
                self.scheduler.cancel(ctx.channel.id)
        else:
            log.debug(
                f"{ctx.author} invoked command 'dormant' outside an in-use help channel"
            )

    async def get_available_candidate(self) -> discord.TextChannel:
        """
        Return a dormant channel to turn into an available channel.

        If no channel is available, wait indefinitely until one becomes available.
        """
        log.trace("Getting an available channel candidate.")

        try:
            channel = self.channel_queue.get_nowait()
        except asyncio.QueueEmpty:
            log.info(
                "No candidate channels in the queue; creating a new channel.")
            channel = await self.create_dormant()

            if not channel:
                log.info(
                    "Couldn't create a candidate channel; waiting to get one from the queue."
                )
                await self.notify()
                channel = await self.wait_for_dormant_channel()

        return channel

    @staticmethod
    def get_clean_channel_name(channel: discord.TextChannel) -> str:
        """Return a clean channel name without status emojis prefix."""
        prefix = constants.HelpChannels.name_prefix
        try:
            # Try to remove the status prefix using the index of the channel prefix
            name = channel.name[channel.name.index(prefix):]
            log.trace(f"The clean name for `{channel}` is `{name}`")
        except ValueError:
            # If, for some reason, the channel name does not contain "help-" fall back gracefully
            log.info(
                f"Can't get clean name because `{channel}` isn't prefixed by `{prefix}`."
            )
            name = channel.name

        return name

    @staticmethod
    def is_excluded_channel(channel: discord.abc.GuildChannel) -> bool:
        """Check if a channel should be excluded from the help channel system."""
        return not isinstance(
            channel, discord.TextChannel) or channel.id in EXCLUDED_CHANNELS

    def get_category_channels(
            self, category: discord.CategoryChannel
    ) -> t.Iterable[discord.TextChannel]:
        """Yield the text channels of the `category` in an unsorted manner."""
        log.trace(
            f"Getting text channels in the category '{category}' ({category.id})."
        )

        # This is faster than using category.channels because the latter sorts them.
        for channel in self.bot.get_guild(constants.Guild.id).channels:
            if channel.category_id == category.id and not self.is_excluded_channel(
                    channel):
                yield channel

    async def get_in_use_time(self, channel_id: int) -> t.Optional[timedelta]:
        """Return the duration `channel_id` has been in use. Return None if it's not in use."""
        log.trace(f"Calculating in use time for channel {channel_id}.")

        claimed_timestamp = await self.claim_times.get(channel_id)
        if claimed_timestamp:
            claimed = datetime.utcfromtimestamp(claimed_timestamp)
            return datetime.utcnow() - claimed

    @staticmethod
    def get_names() -> t.List[str]:
        """
        Return a truncated list of prefixed element names.

        The amount of names is configured with `HelpChannels.max_total_channels`.
        The prefix is configured with `HelpChannels.name_prefix`.
        """
        count = constants.HelpChannels.max_total_channels
        prefix = constants.HelpChannels.name_prefix

        log.trace(f"Getting the first {count} element names from JSON.")

        with Path("bot/resources/elements.json").open(
                encoding="utf-8") as elements_file:
            all_names = json.load(elements_file)

        if prefix:
            return [prefix + name for name in all_names[:count]]
        else:
            return all_names[:count]

    def get_used_names(self) -> t.Set[str]:
        """Return channel names which are already being used."""
        log.trace("Getting channel names which are already being used.")

        names = set()
        for cat in (self.available_category, self.in_use_category,
                    self.dormant_category):
            for channel in self.get_category_channels(cat):
                names.add(self.get_clean_channel_name(channel))

        if len(names) > MAX_CHANNELS_PER_CATEGORY:
            log.warning(
                f"Too many help channels ({len(names)}) already exist! "
                f"Discord only supports {MAX_CHANNELS_PER_CATEGORY} in a category."
            )

        log.trace(f"Got {len(names)} used names: {names}")
        return names

    @classmethod
    async def get_idle_time(cls,
                            channel: discord.TextChannel) -> t.Optional[int]:
        """
        Return the time elapsed, in seconds, since the last message sent in the `channel`.

        Return None if the channel has no messages.
        """
        log.trace(f"Getting the idle time for #{channel} ({channel.id}).")

        msg = await cls.get_last_message(channel)
        if not msg:
            log.debug(
                f"No idle time available; #{channel} ({channel.id}) has no messages."
            )
            return None

        idle_time = (datetime.utcnow() - msg.created_at).seconds

        log.trace(
            f"#{channel} ({channel.id}) has been idle for {idle_time} seconds."
        )
        return idle_time

    @staticmethod
    async def get_last_message(
            channel: discord.TextChannel) -> t.Optional[discord.Message]:
        """Return the last message sent in the channel or None if no messages exist."""
        log.trace(f"Getting the last message in #{channel} ({channel.id}).")

        try:
            return await channel.history(limit=1).next()  # noqa: B305
        except discord.NoMoreItems:
            log.debug(
                f"No last message available; #{channel} ({channel.id}) has no messages."
            )
            return None

    async def init_available(self) -> None:
        """Initialise the Available category with channels."""
        log.trace("Initialising the Available category with channels.")

        channels = list(self.get_category_channels(self.available_category))
        missing = constants.HelpChannels.max_available - len(channels)

        # If we've got less than `max_available` channel available, we should add some.
        if missing > 0:
            log.trace(
                f"Moving {missing} missing channels to the Available category."
            )
            for _ in range(missing):
                await self.move_to_available()

        # If for some reason we have more than `max_available` channels available,
        # we should move the superfluous ones over to dormant.
        elif missing < 0:
            log.trace(
                f"Moving {abs(missing)} superfluous available channels over to the Dormant category."
            )
            for channel in channels[:abs(missing)]:
                await self.move_to_dormant(channel, "auto")

    async def init_categories(self) -> None:
        """Get the help category objects. Remove the cog if retrieval fails."""
        log.trace(
            "Getting the CategoryChannel objects for the help categories.")

        try:
            self.available_category = await channel_utils.try_get_channel(
                constants.Categories.help_available, self.bot)
            self.in_use_category = await channel_utils.try_get_channel(
                constants.Categories.help_in_use, self.bot)
            self.dormant_category = await channel_utils.try_get_channel(
                constants.Categories.help_dormant, self.bot)
        except discord.HTTPException:
            log.exception("Failed to get a category; cog will be removed")
            self.bot.remove_cog(self.qualified_name)

    async def init_cog(self) -> None:
        """Initialise the help channel system."""
        log.trace(
            "Waiting for the guild to be available before initialisation.")
        await self.bot.wait_until_guild_available()

        log.trace("Initialising the cog.")
        await self.init_categories()
        await self.check_cooldowns()

        self.channel_queue = self.create_channel_queue()
        self.name_queue = self.create_name_queue()

        log.trace("Moving or rescheduling in-use channels.")
        for channel in self.get_category_channels(self.in_use_category):
            await self.move_idle_channel(channel, has_task=False)

        # Prevent the command from being used until ready.
        # The ready event wasn't used because channels could change categories between the time
        # the command is invoked and the cog is ready (e.g. if move_idle_channel wasn't called yet).
        # This may confuse users. So would potentially long delays for the cog to become ready.
        self.close_command.enabled = True

        await self.init_available()

        log.info("Cog is ready!")
        self.ready.set()

        self.report_stats()

    def report_stats(self) -> None:
        """Report the channel count stats."""
        total_in_use = sum(
            1 for _ in self.get_category_channels(self.in_use_category))
        total_available = sum(
            1 for _ in self.get_category_channels(self.available_category))
        total_dormant = sum(
            1 for _ in self.get_category_channels(self.dormant_category))

        self.bot.stats.gauge("help.total.in_use", total_in_use)
        self.bot.stats.gauge("help.total.available", total_available)
        self.bot.stats.gauge("help.total.dormant", total_dormant)

    @staticmethod
    def is_claimant(member: discord.Member) -> bool:
        """Return True if `member` has the 'Help Cooldown' role."""
        return any(constants.Roles.help_cooldown == role.id
                   for role in member.roles)

    def match_bot_embed(self, message: t.Optional[discord.Message],
                        description: str) -> bool:
        """Return `True` if the bot's `message`'s embed description matches `description`."""
        if not message or not message.embeds:
            return False

        bot_msg_desc = message.embeds[0].description
        if bot_msg_desc is discord.Embed.Empty:
            log.trace("Last message was a bot embed but it was empty.")
            return False
        return message.author == self.bot.user and bot_msg_desc.strip(
        ) == description.strip()

    async def move_idle_channel(self,
                                channel: discord.TextChannel,
                                has_task: bool = True) -> None:
        """
        Make the `channel` dormant if idle or schedule the move if still active.

        If `has_task` is True and rescheduling is required, the extant task to make the channel
        dormant will first be cancelled.
        """
        log.trace(f"Handling in-use channel #{channel} ({channel.id}).")

        if not await self.is_empty(channel):
            idle_seconds = constants.HelpChannels.idle_minutes * 60
        else:
            idle_seconds = constants.HelpChannels.deleted_idle_minutes * 60

        time_elapsed = await self.get_idle_time(channel)

        if time_elapsed is None or time_elapsed >= idle_seconds:
            log.info(
                f"#{channel} ({channel.id}) is idle longer than {idle_seconds} seconds "
                f"and will be made dormant.")

            await self.move_to_dormant(channel, "auto")
        else:
            # Cancel the existing task, if any.
            if has_task:
                self.scheduler.cancel(channel.id)

            delay = idle_seconds - time_elapsed
            log.info(f"#{channel} ({channel.id}) is still active; "
                     f"scheduling it to be moved after {delay} seconds.")

            self.scheduler.schedule_later(delay, channel.id,
                                          self.move_idle_channel(channel))

    async def move_to_bottom_position(self, channel: discord.TextChannel,
                                      category_id: int, **options) -> None:
        """
        Move the `channel` to the bottom position of `category` and edit channel attributes.

        To ensure "stable sorting", we use the `bulk_channel_update` endpoint and provide the current
        positions of the other channels in the category as-is. This should make sure that the channel
        really ends up at the bottom of the category.

        If `options` are provided, the channel will be edited after the move is completed. This is the
        same order of operations that `discord.TextChannel.edit` uses. For information on available
        options, see the documentation on `discord.TextChannel.edit`. While possible, position-related
        options should be avoided, as it may interfere with the category move we perform.
        """
        # Get a fresh copy of the category from the bot to avoid the cache mismatch issue we had.
        category = await channel_utils.try_get_channel(category_id, self.bot)

        payload = [{
            "id": c.id,
            "position": c.position
        } for c in category.channels]

        # Calculate the bottom position based on the current highest position in the category. If the
        # category is currently empty, we simply use the current position of the channel to avoid making
        # unnecessary changes to positions in the guild.
        bottom_position = payload[-1][
            "position"] + 1 if payload else channel.position

        payload.append({
            "id": channel.id,
            "position": bottom_position,
            "parent_id": category.id,
            "lock_permissions": True,
        })

        # We use d.py's method to ensure our request is processed by d.py's rate limit manager
        await self.bot.http.bulk_channel_update(category.guild.id, payload)

        # Now that the channel is moved, we can edit the other attributes
        if options:
            await channel.edit(**options)

    async def move_to_available(self) -> None:
        """Make a channel available."""
        log.trace("Making a channel available.")

        channel = await self.get_available_candidate()
        log.info(f"Making #{channel} ({channel.id}) available.")

        await self.send_available_message(channel)

        log.trace(
            f"Moving #{channel} ({channel.id}) to the Available category.")

        await self.move_to_bottom_position(
            channel=channel,
            category_id=constants.Categories.help_available,
        )

        self.report_stats()

    async def move_to_dormant(self, channel: discord.TextChannel,
                              caller: str) -> None:
        """
        Make the `channel` dormant.

        A caller argument is provided for metrics.
        """
        log.info(f"Moving #{channel} ({channel.id}) to the Dormant category.")

        await self.help_channel_claimants.delete(channel.id)
        await self.move_to_bottom_position(
            channel=channel,
            category_id=constants.Categories.help_dormant,
        )

        self.bot.stats.incr(f"help.dormant_calls.{caller}")

        in_use_time = await self.get_in_use_time(channel.id)
        if in_use_time:
            self.bot.stats.timing("help.in_use_time", in_use_time)

        unanswered = await self.unanswered.get(channel.id)
        if unanswered:
            self.bot.stats.incr("help.sessions.unanswered")
        elif unanswered is not None:
            self.bot.stats.incr("help.sessions.answered")

        log.trace(
            f"Position of #{channel} ({channel.id}) is actually {channel.position}."
        )
        log.trace(f"Sending dormant message for #{channel} ({channel.id}).")
        embed = discord.Embed(description=DORMANT_MSG)
        await channel.send(embed=embed)

        await self.unpin(channel)

        log.trace(f"Pushing #{channel} ({channel.id}) into the channel queue.")
        self.channel_queue.put_nowait(channel)
        self.report_stats()

    async def move_to_in_use(self, channel: discord.TextChannel) -> None:
        """Make a channel in-use and schedule it to be made dormant."""
        log.info(f"Moving #{channel} ({channel.id}) to the In Use category.")

        await self.move_to_bottom_position(
            channel=channel,
            category_id=constants.Categories.help_in_use,
        )

        timeout = constants.HelpChannels.idle_minutes * 60

        log.trace(
            f"Scheduling #{channel} ({channel.id}) to become dormant in {timeout} sec."
        )
        self.scheduler.schedule_later(timeout, channel.id,
                                      self.move_idle_channel(channel))
        self.report_stats()

    async def notify(self) -> None:
        """
        Send a message notifying about a lack of available help channels.

        Configuration:

        * `HelpChannels.notify` - toggle notifications
        * `HelpChannels.notify_channel` - destination channel for notifications
        * `HelpChannels.notify_minutes` - minimum interval between notifications
        * `HelpChannels.notify_roles` - roles mentioned in notifications
        """
        if not constants.HelpChannels.notify:
            return

        log.trace("Notifying about lack of channels.")

        if self.last_notification:
            elapsed = (datetime.utcnow() - self.last_notification).seconds
            minimum_interval = constants.HelpChannels.notify_minutes * 60
            should_send = elapsed >= minimum_interval
        else:
            should_send = True

        if not should_send:
            log.trace(
                "Notification not sent because it's too recent since the previous one."
            )
            return

        try:
            log.trace("Sending notification message.")

            channel = self.bot.get_channel(
                constants.HelpChannels.notify_channel)
            mentions = " ".join(
                f"<@&{role}>" for role in constants.HelpChannels.notify_roles)
            allowed_roles = [
                discord.Object(id_)
                for id_ in constants.HelpChannels.notify_roles
            ]

            message = await channel.send(
                f"{mentions} A new available help channel is needed but there "
                f"are no more dormant ones. Consider freeing up some in-use channels manually by "
                f"using the `{constants.Bot.prefix}dormant` command within the channels.",
                allowed_mentions=discord.AllowedMentions(everyone=False,
                                                         roles=allowed_roles))

            self.bot.stats.incr("help.out_of_channel_alerts")

            self.last_notification = message.created_at
        except Exception:
            # Handle it here cause this feature isn't critical for the functionality of the system.
            log.exception(
                "Failed to send notification about lack of dormant channels!")

    async def check_for_answer(self, message: discord.Message) -> None:
        """Checks for whether new content in a help channel comes from non-claimants."""
        channel = message.channel

        # Confirm the channel is an in use help channel
        if channel_utils.is_in_category(channel,
                                        constants.Categories.help_in_use):
            log.trace(
                f"Checking if #{channel} ({channel.id}) has been answered.")

            # Check if there is an entry in unanswered
            if await self.unanswered.contains(channel.id):
                claimant_id = await self.help_channel_claimants.get(channel.id)
                if not claimant_id:
                    # The mapping for this channel doesn't exist, we can't do anything.
                    return

                # Check the message did not come from the claimant
                if claimant_id != message.author.id:
                    # Mark the channel as answered
                    await self.unanswered.set(channel.id, False)

    @commands.Cog.listener()
    async def on_message(self, message: discord.Message) -> None:
        """Move an available channel to the In Use category and replace it with a dormant one."""
        if message.author.bot:
            return  # Ignore messages sent by bots.

        channel = message.channel

        await self.check_for_answer(message)

        is_available = channel_utils.is_in_category(
            channel, constants.Categories.help_available)
        if not is_available or self.is_excluded_channel(channel):
            return  # Ignore messages outside the Available category or in excluded channels.

        log.trace(
            "Waiting for the cog to be ready before processing messages.")
        await self.ready.wait()

        log.trace(
            "Acquiring lock to prevent a channel from being processed twice..."
        )
        async with self.on_message_lock:
            log.trace(f"on_message lock acquired for {message.id}.")

            if not channel_utils.is_in_category(
                    channel, constants.Categories.help_available):
                log.debug(
                    f"Message {message.id} will not make #{channel} ({channel.id}) in-use "
                    f"because another message in the channel already triggered that."
                )
                return

            log.info(
                f"Channel #{channel} was claimed by `{message.author.id}`.")
            await self.move_to_in_use(channel)
            await self.revoke_send_permissions(message.author)

            await self.pin(message)

            # Add user with channel for dormant check.
            await self.help_channel_claimants.set(channel.id,
                                                  message.author.id)

            self.bot.stats.incr("help.claimed")

            # Must use a timezone-aware datetime to ensure a correct POSIX timestamp.
            timestamp = datetime.now(timezone.utc).timestamp()
            await self.claim_times.set(channel.id, timestamp)

            await self.unanswered.set(channel.id, True)

            log.trace(f"Releasing on_message lock for {message.id}.")

        # Move a dormant channel to the Available category to fill in the gap.
        # This is done last and outside the lock because it may wait indefinitely for a channel to
        # be put in the queue.
        await self.move_to_available()

    @commands.Cog.listener()
    async def on_message_delete(self, msg: discord.Message) -> None:
        """
        Reschedule an in-use channel to become dormant sooner if the channel is empty.

        The new time for the dormant task is configured with `HelpChannels.deleted_idle_minutes`.
        """
        if not channel_utils.is_in_category(msg.channel,
                                            constants.Categories.help_in_use):
            return

        if not await self.is_empty(msg.channel):
            return

        log.info(
            f"Claimant of #{msg.channel} ({msg.author}) deleted message, channel is empty now. Rescheduling task."
        )

        # Cancel existing dormant task before scheduling new.
        self.scheduler.cancel(msg.channel.id)

        delay = constants.HelpChannels.deleted_idle_minutes * 60
        self.scheduler.schedule_later(delay, msg.channel.id,
                                      self.move_idle_channel(msg.channel))

    async def is_empty(self, channel: discord.TextChannel) -> bool:
        """Return True if there's an AVAILABLE_MSG and the messages leading up are bot messages."""
        log.trace(f"Checking if #{channel} ({channel.id}) is empty.")

        # A limit of 100 results in a single API call.
        # If AVAILABLE_MSG isn't found within 100 messages, then assume the channel is not empty.
        # Not gonna do an extensive search for it cause it's too expensive.
        async for msg in channel.history(limit=100):
            if not msg.author.bot:
                log.trace(f"#{channel} ({channel.id}) has a non-bot message.")
                return False

            if self.match_bot_embed(msg, AVAILABLE_MSG):
                log.trace(
                    f"#{channel} ({channel.id}) has the available message embed."
                )
                return True

        return False

    async def check_cooldowns(self) -> None:
        """Remove expired cooldowns and re-schedule active ones."""
        log.trace("Checking all cooldowns to remove or re-schedule them.")
        guild = self.bot.get_guild(constants.Guild.id)
        cooldown = constants.HelpChannels.claim_minutes * 60

        for channel_id, member_id in await self.help_channel_claimants.items():
            member = guild.get_member(member_id)
            if not member:
                continue  # Member probably left the guild.

            in_use_time = await self.get_in_use_time(channel_id)

            if not in_use_time or in_use_time.seconds > cooldown:
                # Remove the role if no claim time could be retrieved or if the cooldown expired.
                # Since the channel is in the claimants cache, it is definitely strange for a time
                # to not exist. However, it isn't a reason to keep the user stuck with a cooldown.
                await self.remove_cooldown_role(member)
            else:
                # The member is still on a cooldown; re-schedule it for the remaining time.
                delay = cooldown - in_use_time.seconds
                self.scheduler.schedule_later(
                    delay, member.id, self.remove_cooldown_role(member))

    async def add_cooldown_role(self, member: discord.Member) -> None:
        """Add the help cooldown role to `member`."""
        log.trace(f"Adding cooldown role for {member} ({member.id}).")
        await self._change_cooldown_role(member, member.add_roles)

    async def remove_cooldown_role(self, member: discord.Member) -> None:
        """Remove the help cooldown role from `member`."""
        log.trace(f"Removing cooldown role for {member} ({member.id}).")
        await self._change_cooldown_role(member, member.remove_roles)

    async def _change_cooldown_role(self, member: discord.Member,
                                    coro_func: CoroutineFunc) -> None:
        """
        Change `member`'s cooldown role via awaiting `coro_func` and handle errors.

        `coro_func` is intended to be `discord.Member.add_roles` or `discord.Member.remove_roles`.
        """
        guild = self.bot.get_guild(constants.Guild.id)
        role = guild.get_role(constants.Roles.help_cooldown)
        if role is None:
            log.warning(
                f"Help cooldown role ({constants.Roles.help_cooldown}) could not be found!"
            )
            return

        try:
            await coro_func(role)
        except discord.NotFound:
            log.debug(
                f"Failed to change role for {member} ({member.id}): member not found"
            )
        except discord.Forbidden:
            log.debug(f"Forbidden to change role for {member} ({member.id}); "
                      f"possibly due to role hierarchy")
        except discord.HTTPException as e:
            log.error(
                f"Failed to change role for {member} ({member.id}): {e.status} {e.code}"
            )

    async def revoke_send_permissions(self, member: discord.Member) -> None:
        """
        Disallow `member` to send messages in the Available category for a certain time.

        The time until permissions are reinstated can be configured with
        `HelpChannels.claim_minutes`.
        """
        log.trace(
            f"Revoking {member}'s ({member.id}) send message permissions in the Available category."
        )

        await self.add_cooldown_role(member)

        # Cancel the existing task, if any.
        # Would mean the user somehow bypassed the lack of permissions (e.g. user is guild owner).
        if member.id in self.scheduler:
            self.scheduler.cancel(member.id)

        delay = constants.HelpChannels.claim_minutes * 60
        self.scheduler.schedule_later(delay, member.id,
                                      self.remove_cooldown_role(member))

    async def send_available_message(self,
                                     channel: discord.TextChannel) -> None:
        """Send the available message by editing a dormant message or sending a new message."""
        channel_info = f"#{channel} ({channel.id})"
        log.trace(f"Sending available message in {channel_info}.")

        embed = discord.Embed(description=AVAILABLE_MSG)

        msg = await self.get_last_message(channel)
        if self.match_bot_embed(msg, DORMANT_MSG):
            log.trace(
                f"Found dormant message {msg.id} in {channel_info}; editing it."
            )
            await msg.edit(embed=embed)
        else:
            log.trace(
                f"Dormant message not found in {channel_info}; sending a new message."
            )
            await channel.send(embed=embed)

    async def pin_wrapper(self, msg_id: int, channel: discord.TextChannel, *,
                          pin: bool) -> bool:
        """
        Pin message `msg_id` in `channel` if `pin` is True or unpin if it's False.

        Return True if successful and False otherwise.
        """
        channel_str = f"#{channel} ({channel.id})"
        if pin:
            func = self.bot.http.pin_message
            verb = "pin"
        else:
            func = self.bot.http.unpin_message
            verb = "unpin"

        try:
            await func(channel.id, msg_id)
        except discord.HTTPException as e:
            if e.code == 10008:
                log.debug(
                    f"Message {msg_id} in {channel_str} doesn't exist; can't {verb}."
                )
            else:
                log.exception(
                    f"Error {verb}ning message {msg_id} in {channel_str}: {e.status} ({e.code})"
                )
            return False
        else:
            log.trace(
                f"{verb.capitalize()}ned message {msg_id} in {channel_str}.")
            return True

    async def pin(self, message: discord.Message) -> None:
        """Pin an initial question `message` and store it in a cache."""
        if await self.pin_wrapper(message.id, message.channel, pin=True):
            await self.question_messages.set(message.channel.id, message.id)

    async def unpin(self, channel: discord.TextChannel) -> None:
        """Unpin the initial question message sent in `channel`."""
        msg_id = await self.question_messages.pop(channel.id)
        if msg_id is None:
            log.debug(
                f"#{channel} ({channel.id}) doesn't have a message pinned.")
        else:
            await self.pin_wrapper(msg_id, channel, pin=False)

    async def wait_for_dormant_channel(self) -> discord.TextChannel:
        """Wait for a dormant channel to become available in the queue and return it."""
        log.trace("Waiting for a dormant channel.")

        task = asyncio.create_task(self.channel_queue.get())
        self.queue_tasks.append(task)
        channel = await task

        log.trace(
            f"Channel #{channel} ({channel.id}) finally retrieved from the queue."
        )
        self.queue_tasks.remove(task)

        return channel
Exemple #24
0
class AdventOfCode(commands.Cog):
    """Advent of Code festivities! Ho Ho Ho!"""

    # Redis Cache for linking Discord IDs to Advent of Code usernames
    # RedisCache[member_id: aoc_username_string]
    account_links = RedisCache()

    # A dict with keys of member_ids to block from getting the role
    # RedisCache[member_id: None]
    completionist_block_list = RedisCache()

    def __init__(self, bot: Bot):
        self.bot = bot

        self._base_url = f"https://adventofcode.com/{AocConfig.year}"
        self.global_leaderboard_url = f"https://adventofcode.com/{AocConfig.year}/leaderboard"

        self.about_aoc_filepath = Path(
            "./bot/resources/events/advent_of_code/about.json")
        self.cached_about_aoc = self._build_about_embed()

        notification_coro = _helpers.new_puzzle_notification(self.bot)
        self.notification_task = self.bot.loop.create_task(notification_coro)
        self.notification_task.set_name("Daily AoC Notification")
        self.notification_task.add_done_callback(
            _helpers.background_task_callback)

        status_coro = _helpers.countdown_status(self.bot)
        self.status_task = self.bot.loop.create_task(status_coro)
        self.status_task.set_name("AoC Status Countdown")
        self.status_task.add_done_callback(_helpers.background_task_callback)

        # Don't start task while event isn't running
        # self.completionist_task.start()

    @tasks.loop(minutes=10.0)
    async def completionist_task(self) -> None:
        """
        Give members who have completed all 50 AoC stars the completionist role.

        Runs on a schedule, as defined in the task.loop decorator.
        """
        await self.bot.wait_until_guild_available()
        guild = self.bot.get_guild(Client.guild)
        completionist_role = guild.get_role(Roles.aoc_completionist)
        if completionist_role is None:
            log.warning(
                "Could not find the AoC completionist role; cancelling completionist task."
            )
            self.completionist_task.cancel()
            return

        aoc_name_to_member_id = {
            aoc_name: member_id
            for member_id, aoc_name in await self.account_links.items()
        }

        try:
            leaderboard = await _helpers.fetch_leaderboard()
        except _helpers.FetchingLeaderboardFailedError:
            await self.bot.send_log(
                "Unable to fetch AoC leaderboard during role sync.")
            return

        placement_leaderboard = json.loads(
            leaderboard["placement_leaderboard"])

        for member_aoc_info in placement_leaderboard.values():
            if not member_aoc_info["stars"] == 50:
                # Only give the role to people who have completed all 50 stars
                continue

            aoc_name = member_aoc_info[
                "name"] or f"Anonymous #{member_aoc_info['id']}"

            member_id = aoc_name_to_member_id.get(aoc_name)
            if not member_id:
                log.debug(
                    f"Could not find member_id for {member_aoc_info['name']}, not giving role."
                )
                continue

            member = await members.get_or_fetch_member(guild, member_id)
            if member is None:
                log.debug(f"Could not find {member_id}, not giving role.")
                continue

            if completionist_role in member.roles:
                log.debug(
                    f"{member.name} ({member.mention}) already has the completionist role."
                )
                continue

            if not await self.completionist_block_list.contains(member_id):
                log.debug(
                    f"Giving completionist role to {member.name} ({member.mention})."
                )
                await members.handle_role_change(member, member.add_roles,
                                                 completionist_role)

    @commands.group(name="adventofcode", aliases=("aoc", ))
    @whitelist_override(channels=AOC_WHITELIST)
    async def adventofcode_group(self, ctx: commands.Context) -> None:
        """All of the Advent of Code commands."""
        if not ctx.invoked_subcommand:
            await invoke_help_command(ctx)

    @with_role(Roles.admins)
    @adventofcode_group.command(
        name="block",
        brief="Block a user from getting the completionist role.",
    )
    async def block_from_role(self, ctx: commands.Context,
                              member: discord.Member) -> None:
        """Block the given member from receiving the AoC completionist role, removing it from them if needed."""
        completionist_role = ctx.guild.get_role(Roles.aoc_completionist)
        if completionist_role in member.roles:
            await member.remove_roles(completionist_role)

        await self.completionist_block_list.set(member.id, "sentinel")
        await ctx.send(
            f":+1: Blocked {member.mention} from getting the AoC completionist role."
        )

    @commands.guild_only()
    @adventofcode_group.command(
        name="subscribe",
        aliases=("sub", "notifications", "notify", "notifs", "unsubscribe",
                 "unsub"),
        help=f"NOTE: This command has been moved to {PYTHON_PREFIX}subscribe",
    )
    @whitelist_override(channels=AOC_WHITELIST)
    async def aoc_subscribe(self, ctx: commands.Context) -> None:
        """
        Deprecated role command.

        This command has been moved to bot, and will be removed in the future.
        """
        raise MovedCommandError(f"{PYTHON_PREFIX}subscribe")

    @adventofcode_group.command(name="countdown",
                                aliases=("count", "c"),
                                brief="Return time left until next day")
    @whitelist_override(channels=AOC_WHITELIST)
    async def aoc_countdown(self, ctx: commands.Context) -> None:
        """Return time left until next day."""
        if _helpers.is_in_advent():
            tomorrow, _ = _helpers.time_left_to_est_midnight()
            next_day_timestamp = int(tomorrow.timestamp())

            await ctx.send(
                f"Day {tomorrow.day} starts <t:{next_day_timestamp}:R>.")
            return

        datetime_now = arrow.now(_helpers.EST)
        # Calculate the delta to this & next year's December 1st to see which one is closest and not in the past
        this_year = arrow.get(datetime(datetime_now.year, 12, 1), _helpers.EST)
        next_year = arrow.get(datetime(datetime_now.year + 1, 12, 1),
                              _helpers.EST)
        deltas = (dec_first - datetime_now
                  for dec_first in (this_year, next_year))
        delta = min(
            delta for delta in deltas
            if delta >= timedelta())  # timedelta() gives 0 duration delta

        next_aoc_timestamp = int((datetime_now + delta).timestamp())

        await ctx.send("The Advent of Code event is not currently running. "
                       f"The next event will start <t:{next_aoc_timestamp}:R>."
                       )

    @adventofcode_group.command(name="about",
                                aliases=("ab", "info"),
                                brief="Learn about Advent of Code")
    @whitelist_override(channels=AOC_WHITELIST)
    async def about_aoc(self, ctx: commands.Context) -> None:
        """Respond with an explanation of all things Advent of Code."""
        await ctx.send(embed=self.cached_about_aoc)

    @commands.guild_only()
    @adventofcode_group.command(
        name="join",
        aliases=("j", ),
        brief="Learn how to join the leaderboard (via DM)")
    @whitelist_override(channels=AOC_WHITELIST)
    async def join_leaderboard(self, ctx: commands.Context) -> None:
        """DM the user the information for joining the Python Discord leaderboard."""
        current_date = datetime.now()
        allowed_months = (Month.NOVEMBER.value, Month.DECEMBER.value)
        if not (current_date.month in allowed_months and current_date.year
                == AocConfig.year or current_date.month == Month.JANUARY.value
                and current_date.year == AocConfig.year + 1):
            # Only allow joining the leaderboard in the run up to AOC and the January following.
            await ctx.send(
                f"The Python Discord leaderboard for {current_date.year} is not yet available!"
            )
            return

        author = ctx.author
        log.info(
            f"{author.name} ({author.id}) has requested a PyDis AoC leaderboard code"
        )

        if AocConfig.staff_leaderboard_id and any(r.id == Roles.helpers
                                                  for r in author.roles):
            join_code = AocConfig.leaderboards[
                AocConfig.staff_leaderboard_id].join_code
        else:
            try:
                join_code = await _helpers.get_public_join_code(author)
            except _helpers.FetchingLeaderboardFailedError:
                await ctx.send(
                    ":x: Failed to get join code! Notified maintainers.")
                return

        if not join_code:
            log.error(
                f"Failed to get a join code for user {author} ({author.id})")
            error_embed = discord.Embed(
                title="Unable to get join code",
                description=
                "Failed to get a join code to one of our boards. Please notify staff.",
                colour=discord.Colour.red(),
            )
            await ctx.send(embed=error_embed)
            return

        info_str = [
            "To join our leaderboard, follow these steps:",
            "• Log in on https://adventofcode.com",
            "• Head over to https://adventofcode.com/leaderboard/private",
            f"• Use this code `{join_code}` to join the Python Discord leaderboard!",
        ]
        try:
            await author.send("\n".join(info_str))
        except discord.errors.Forbidden:
            log.debug(
                f"{author.name} ({author.id}) has disabled DMs from server members"
            )
            await ctx.send(
                f":x: {author.mention}, please (temporarily) enable DMs to receive the join code"
            )
        else:
            await ctx.message.add_reaction(Emojis.envelope)

    @in_month(Month.NOVEMBER, Month.DECEMBER, Month.JANUARY)
    @adventofcode_group.command(
        name="link",
        aliases=("connect", ),
        brief="Tie your Discord account with your Advent of Code name.")
    @whitelist_override(channels=AOC_WHITELIST)
    async def aoc_link_account(self,
                               ctx: commands.Context,
                               *,
                               aoc_name: str = None) -> None:
        """
        Link your Discord Account to your Advent of Code name.

        Stored in a Redis Cache with the format of `Discord ID: Advent of Code Name`
        """
        cache_items = await self.account_links.items()
        cache_aoc_names = [value for _, value in cache_items]

        if aoc_name:
            # Let's check the current values in the cache to make sure it isn't already tied to a different account
            if aoc_name == await self.account_links.get(ctx.author.id):
                await ctx.reply(f"{aoc_name} is already tied to your account.")
                return
            elif aoc_name in cache_aoc_names:
                log.info(
                    f"{ctx.author} ({ctx.author.id}) tried to connect their account to {aoc_name},"
                    " but it's already connected to another user.")
                await ctx.reply(
                    f"{aoc_name} is already tied to another account."
                    " Please contact an admin if you believe this is an error."
                )
                return

            # Update an existing link
            if old_aoc_name := await self.account_links.get(ctx.author.id):
                log.info(
                    f"Changing link for {ctx.author} ({ctx.author.id}) from {old_aoc_name} to {aoc_name}."
                )
                await self.account_links.set(ctx.author.id, aoc_name)
                await ctx.reply(
                    f"Your linked account has been changed to {aoc_name}.")
            else:
                # Create a new link
                log.info(
                    f"Linking {ctx.author} ({ctx.author.id}) to account {aoc_name}."
                )
                await self.account_links.set(ctx.author.id, aoc_name)
                await ctx.reply(
                    f"You have linked your Discord ID to {aoc_name}.")
        else:
Exemple #25
0
class HacktoberStats(commands.Cog):
    """Hacktoberfest statistics Cog."""

    # Stores mapping of user IDs and GitHub usernames
    linked_accounts = RedisCache()

    def __init__(self, bot: commands.Bot):
        self.bot = bot

    @in_month(Month.SEPTEMBER, Month.OCTOBER, Month.NOVEMBER)
    @commands.group(name="hacktoberstats",
                    aliases=("hackstats", ),
                    invoke_without_command=True)
    @override_in_channel(HACKTOBER_WHITELIST)
    async def hacktoberstats_group(self,
                                   ctx: commands.Context,
                                   github_username: str = None) -> None:
        """
        Display an embed for a user's Hacktoberfest contributions.

        If invoked without a subcommand or github_username, get the invoking user's stats if they've
        linked their Discord name to GitHub using .stats link. If invoked with a github_username,
        get that user's contributions
        """
        if not github_username:
            author_id, author_mention = self._author_mention_from_context(ctx)

            if await self.linked_accounts.contains(author_id):
                github_username = await self.linked_accounts.get(author_id)
                logging.info(
                    f"Getting stats for {author_id} linked GitHub account '{github_username}'"
                )
            else:
                msg = (
                    f"{author_mention}, you have not linked a GitHub account\n\n"
                    f"You can link your GitHub account using:\n```{ctx.prefix}hackstats link github_username```\n"
                    f"Or query GitHub stats directly using:\n```{ctx.prefix}hackstats github_username```"
                )
                await ctx.send(msg)
                return

        await self.get_stats(ctx, github_username)

    @in_month(Month.SEPTEMBER, Month.OCTOBER, Month.NOVEMBER)
    @hacktoberstats_group.command(name="link")
    @override_in_channel(HACKTOBER_WHITELIST)
    async def link_user(self,
                        ctx: commands.Context,
                        github_username: str = None) -> None:
        """
        Link the invoking user's Github github_username to their Discord ID.

        Linked users are stored in Redis: User ID => GitHub Username.
        """
        author_id, author_mention = self._author_mention_from_context(ctx)
        if github_username:
            if await self.linked_accounts.contains(author_id):
                old_username = await self.linked_accounts.get(author_id)
                logging.info(
                    f"{author_id} has changed their github link from '{old_username}' to '{github_username}'"
                )
                await ctx.send(
                    f"{author_mention}, your GitHub username has been updated to: '{github_username}'"
                )
            else:
                logging.info(
                    f"{author_id} has added a github link to '{github_username}'"
                )
                await ctx.send(
                    f"{author_mention}, your GitHub username has been added")

            await self.linked_accounts.set(author_id, github_username)
        else:
            logging.info(
                f"{author_id} tried to link a GitHub account but didn't provide a username"
            )
            await ctx.send(
                f"{author_mention}, a GitHub username is required to link your account"
            )

    @in_month(Month.SEPTEMBER, Month.OCTOBER, Month.NOVEMBER)
    @hacktoberstats_group.command(name="unlink")
    @override_in_channel(HACKTOBER_WHITELIST)
    async def unlink_user(self, ctx: commands.Context) -> None:
        """Remove the invoking user's account link from the log."""
        author_id, author_mention = self._author_mention_from_context(ctx)

        stored_user = await self.linked_accounts.pop(author_id, None)
        if stored_user:
            await ctx.send(
                f"{author_mention}, your GitHub profile has been unlinked")
            logging.info(f"{author_id} has unlinked their GitHub account")
        else:
            await ctx.send(
                f"{author_mention}, you do not currently have a linked GitHub account"
            )
            logging.info(
                f"{author_id} tried to unlink their GitHub account but no account was linked"
            )

    async def get_stats(self, ctx: commands.Context,
                        github_username: str) -> None:
        """
        Query GitHub's API for PRs created by a GitHub user during the month of October.

        PRs with an 'invalid' or 'spam' label are ignored

        For PRs created after October 3rd, they have to be in a repository that has a
        'hacktoberfest' topic, unless the PR is labelled 'hacktoberfest-accepted' for it
        to count.

        If a valid github_username is provided, an embed is generated and posted to the channel

        Otherwise, post a helpful error message
        """
        async with ctx.typing():
            prs = await self.get_october_prs(github_username)

            if prs:
                stats_embed = await self.build_embed(github_username, prs)
                await ctx.send('Here are some stats!', embed=stats_embed)
            else:
                await ctx.send(
                    f"No valid October GitHub contributions found for '{github_username}'"
                )

    async def build_embed(self, github_username: str,
                          prs: List[dict]) -> discord.Embed:
        """Return a stats embed built from github_username's PRs."""
        logging.info(
            f"Building Hacktoberfest embed for GitHub user: '******'"
        )
        in_review, accepted = await self._categorize_prs(prs)

        n = len(accepted) + len(in_review)  # total number of PRs
        if n >= PRS_FOR_SHIRT:
            shirtstr = f"**{github_username} is eligible for a T-shirt or a tree!**"
        elif n == PRS_FOR_SHIRT - 1:
            shirtstr = f"**{github_username} is 1 PR away from a T-shirt or a tree!**"
        else:
            shirtstr = f"**{github_username} is {PRS_FOR_SHIRT - n} PRs away from a T-shirt or a tree!**"

        stats_embed = discord.Embed(
            title=f"{github_username}'s Hacktoberfest",
            color=discord.Color(0x9c4af7),
            description=(f"{github_username} has made {n} valid "
                         f"{self._contributionator(n)} in "
                         f"October\n\n"
                         f"{shirtstr}\n\n"))

        stats_embed.set_thumbnail(
            url=f"https://www.github.com/{github_username}.png")
        stats_embed.set_author(
            name="Hacktoberfest",
            url="https://hacktoberfest.digitalocean.com",
            icon_url=
            "https://avatars1.githubusercontent.com/u/35706162?s=200&v=4")

        # this will handle when no PRs in_review or accepted
        review_str = self._build_prs_string(in_review,
                                            github_username) or "None"
        accepted_str = self._build_prs_string(accepted,
                                              github_username) or "None"
        stats_embed.add_field(name=":clock1: In Review", value=review_str)
        stats_embed.add_field(name=":tada: Accepted", value=accepted_str)

        logging.info(
            f"Hacktoberfest PR built for GitHub user '{github_username}'")
        return stats_embed

    @staticmethod
    async def get_october_prs(github_username: str) -> Union[List[dict], None]:
        """
        Query GitHub's API for PRs created during the month of October by github_username.

        PRs with an 'invalid' or 'spam' label are ignored unless it is merged or approved

        For PRs created after October 3rd, they have to be in a repository that has a
        'hacktoberfest' topic, unless the PR is labelled 'hacktoberfest-accepted' for it
        to count.

        If PRs are found, return a list of dicts with basic PR information

        For each PR:
        {
            "repo_url": str
            "repo_shortname": str (e.g. "python-discord/sir-lancebot")
            "created_at": datetime.datetime
            "number": int
        }

        Otherwise, return None
        """
        logging.info(
            f"Fetching Hacktoberfest Stats for GitHub user: '******'"
        )
        base_url = "https://api.github.com/search/issues?q="
        action_type = "pr"
        is_query = "public"
        not_query = "draft"
        date_range = f"{CURRENT_YEAR}-09-30T10:00Z..{CURRENT_YEAR}-11-01T12:00Z"
        per_page = "300"
        query_url = (f"{base_url}"
                     f"+type:{action_type}"
                     f"+is:{is_query}"
                     f"+author:{github_username}"
                     f"+-is:{not_query}"
                     f"+created:{date_range}"
                     f"&per_page={per_page}")
        logging.debug(f"GitHub query URL generated: {query_url}")

        jsonresp = await HacktoberStats._fetch_url(query_url, REQUEST_HEADERS)
        if "message" in jsonresp.keys():
            # One of the parameters is invalid, short circuit for now
            api_message = jsonresp["errors"][0]["message"]

            # Ignore logging non-existent users or users we do not have permission to see
            if api_message == GITHUB_NONEXISTENT_USER_MESSAGE:
                logging.debug(
                    f"No GitHub user found named '{github_username}'")
            else:
                logging.error(
                    f"GitHub API request for '{github_username}' failed with message: {api_message}"
                )
            return

        if jsonresp["total_count"] == 0:
            # Short circuit if there aren't any PRs
            logging.info(
                f"No Hacktoberfest PRs found for GitHub user: '******'"
            )
            return

        logging.info(
            f"Found {len(jsonresp['items'])} Hacktoberfest PRs for GitHub user: '******'"
        )
        outlist = []  # list of pr information dicts that will get returned
        oct3 = datetime(int(CURRENT_YEAR), 10, 3, 23, 59, 59, tzinfo=None)
        hackto_topics = {
        }  # cache whether each repo has the appropriate topic (bool values)
        for item in jsonresp["items"]:
            shortname = HacktoberStats._get_shortname(item["repository_url"])
            itemdict = {
                "repo_url":
                f"https://www.github.com/{shortname}",
                "repo_shortname":
                shortname,
                "created_at":
                datetime.strptime(item["created_at"], r"%Y-%m-%dT%H:%M:%SZ"),
                "number":
                item["number"]
            }

            # if the PR has 'invalid' or 'spam' labels, the PR must be
            # either merged or approved for it to be included
            if HacktoberStats._has_label(item, ["invalid", "spam"]):
                if not await HacktoberStats._is_accepted(itemdict):
                    continue

            # PRs before oct 3 no need to check for topics
            # continue the loop if 'hacktoberfest-accepted' is labelled then
            # there is no need to check for its topics
            if itemdict["created_at"] < oct3:
                outlist.append(itemdict)
                continue

            # checking PR's labels for "hacktoberfest-accepted"
            if HacktoberStats._has_label(item, "hacktoberfest-accepted"):
                outlist.append(itemdict)
                continue

            # no need to query github if repo topics are fetched before already
            if shortname in hackto_topics.keys():
                if hackto_topics[shortname]:
                    outlist.append(itemdict)
                    continue
            # fetch topics for the pr repo
            topics_query_url = f"https://api.github.com/repos/{shortname}/topics"
            logging.debug(
                f"Fetching repo topics for {shortname} with url: {topics_query_url}"
            )
            jsonresp2 = await HacktoberStats._fetch_url(
                topics_query_url, GITHUB_TOPICS_ACCEPT_HEADER)
            if jsonresp2.get("names") is None:
                logging.error(
                    f"Error fetching topics for {shortname}: {jsonresp2['message']}"
                )
                return

            # PRs after oct 3 that doesn't have 'hacktoberfest-accepted' label
            # must be in repo with 'hacktoberfest' topic
            if "hacktoberfest" in jsonresp2["names"]:
                hackto_topics[
                    shortname] = True  # cache result in the dict for later use if needed
                outlist.append(itemdict)
        return outlist

    @staticmethod
    async def _fetch_url(url: str, headers: dict) -> dict:
        """Retrieve API response from URL."""
        async with aiohttp.ClientSession() as session:
            async with session.get(url, headers=headers) as resp:
                jsonresp = await resp.json()
        return jsonresp

    @staticmethod
    def _has_label(pr: dict, labels: Union[List[str], str]) -> bool:
        """
        Check if a PR has label 'labels'.

        'labels' can be a string or a list of strings, if it's a list of strings
        it will return true if any of the labels match.
        """
        if not pr.get("labels"):  # if PR has no labels
            return False
        if (isinstance(labels, str)) and (any(
                label["name"].casefold() == labels for label in pr["labels"])):
            return True
        for item in labels:
            if any(label["name"].casefold() == item for label in pr["labels"]):
                return True
        return False

    @staticmethod
    async def _is_accepted(pr: dict) -> bool:
        """Check if a PR is merged, approved, or labelled hacktoberfest-accepted."""
        # checking for merge status
        query_url = f"https://api.github.com/repos/{pr['repo_shortname']}/pulls/"
        query_url += str(pr["number"])
        jsonresp = await HacktoberStats._fetch_url(query_url, REQUEST_HEADERS)

        if "message" in jsonresp.keys():
            logging.error(
                f"Error fetching PR stats for #{pr['number']} in repo {pr['repo_shortname']}:\n"
                f"{jsonresp['message']}")
            return False
        if ("merged" in jsonresp.keys()) and jsonresp["merged"]:
            return True

        # checking for the label, using `jsonresp` which has the label information
        if HacktoberStats._has_label(jsonresp, "hacktoberfest-accepted"):
            return True

        # checking approval
        query_url += "/reviews"
        jsonresp2 = await HacktoberStats._fetch_url(query_url, REQUEST_HEADERS)
        if isinstance(jsonresp2, dict):
            # if API request is unsuccessful it will be a dict with the error in 'message'
            logging.error(
                f"Error fetching PR reviews for #{pr['number']} in repo {pr['repo_shortname']}:\n"
                f"{jsonresp2['message']}")
            return False
        # if it is successful it will be a list instead of a dict
        if len(jsonresp2) == 0:  # if PR has no reviews
            return False

        # loop through reviews and check for approval
        for item in jsonresp2:
            if "status" in item.keys():
                if item['status'] == "APPROVED":
                    return True
        return False

    @staticmethod
    def _get_shortname(in_url: str) -> str:
        """
        Extract shortname from https://api.github.com/repos/* URL.

        e.g. "https://api.github.com/repos/python-discord/sir-lancebot"
             |
             V
             "python-discord/sir-lancebot"
        """
        exp = r"https?:\/\/api.github.com\/repos\/([/\-\_\.\w]+)"
        return re.findall(exp, in_url)[0]

    @staticmethod
    async def _categorize_prs(prs: List[dict]) -> tuple:
        """
        Categorize PRs into 'in_review' and 'accepted' and returns as a tuple.

        PRs created less than 14 days ago are 'in_review', PRs that are not
        are 'accepted' (after 14 days review period).

        PRs that are accepted must either be merged, approved, or labelled
        'hacktoberfest-accepted.
        """
        now = datetime.now()
        oct3 = datetime(CURRENT_YEAR, 10, 3, 23, 59, 59, tzinfo=None)
        in_review = []
        accepted = []
        for pr in prs:
            if (pr['created_at'] + timedelta(REVIEW_DAYS)) > now:
                in_review.append(pr)
            elif (pr['created_at'] <=
                  oct3) or await HacktoberStats._is_accepted(pr):
                accepted.append(pr)

        return in_review, accepted

    @staticmethod
    def _build_prs_string(prs: List[tuple], user: str) -> str:
        """
        Builds a discord embed compatible string for a list of PRs.

        Repository name with the link to pull requests authored by 'user' for
        each PR.
        """
        base_url = "https://www.github.com/"
        str_list = []
        repo_list = [pr["repo_shortname"] for pr in prs]
        prs_list = Counter(repo_list).most_common(5)  # get first 5 counted PRs
        more = len(prs) - sum(i[1] for i in prs_list)

        for pr in prs_list:
            # for example: https://www.github.com/python-discord/bot/pulls/octocat
            # will display pull requests authored by octocat.
            # pr[1] is the number of PRs to the repo
            string = f"{pr[1]} to [{pr[0]}]({base_url}{pr[0]}/pulls/{user})"
            str_list.append(string)
        if more:
            str_list.append(f"...and {more} more")

        return "\n".join(str_list)

    @staticmethod
    def _contributionator(n: int) -> str:
        """Return "contribution" or "contributions" based on the value of n."""
        if n == 1:
            return "contribution"
        else:
            return "contributions"

    @staticmethod
    def _author_mention_from_context(ctx: commands.Context) -> Tuple:
        """Return stringified Message author ID and mentionable string from commands.Context."""
        author_id = str(ctx.message.author.id)
        author_mention = ctx.message.author.mention

        return author_id, author_mention
Exemple #26
0
class Incidents(Cog):
    """
    Automation for the #incidents channel.

    This cog does not provide a command API, it only reacts to the following events.

    On start-up:
        * Crawl #incidents and add missing `Signal` emoji where appropriate
        * This is to retro-actively add the available options for messages which
          were sent while the bot wasn't listening
        * Pinned messages and message starting with # do not qualify as incidents
        * See: `crawl_incidents`

    On message:
        * Run message through `extract_message_links` and send them into the channel
        * Add `Signal` member emoji if message qualifies as an incident
        * Ignore messages starting with #
            * Use this if verbal communication is necessary
            * Each such message must be deleted manually once appropriate
        * See: `on_message`

    On reaction:
        * Remove reaction if not permitted
            * User does not have any of the roles in `ALLOWED_ROLES`
            * Used emoji is not a `Signal` member
        * If `Signal.ACTIONED` or `Signal.NOT_ACTIONED` were chosen, attempt to
          relay the incident message to #incidents-archive
        * If relay successful, delete original message
        * Delete quotation message if cached
        * See: `on_raw_reaction_add`

    Please refer to function docstrings for implementation details.
    """

    # This dictionary maps an incident report message to the message link embed's ID
    # RedisCache[discord.Message.id, discord.Message.id]
    message_link_embeds_cache = RedisCache()

    def __init__(self, bot: Bot) -> None:
        """Prepare `event_lock` and schedule `crawl_task` on start-up."""
        self.bot = bot
        self.incidents_webhook = None

        scheduling.create_task(self.fetch_webhook(), event_loop=self.bot.loop)

        self.event_lock = asyncio.Lock()
        self.crawl_task = scheduling.create_task(self.crawl_incidents(),
                                                 event_loop=self.bot.loop)

    async def fetch_webhook(self) -> None:
        """Fetch the incidents webhook object, so we can post message link embeds to it."""
        await self.bot.wait_until_guild_available()

        try:
            self.incidents_webhook = await self.bot.fetch_webhook(
                Webhooks.incidents)
        except discord.HTTPException:
            log.error(
                f"Failed to fetch incidents webhook with id `{Webhooks.incidents}`."
            )

    async def crawl_incidents(self) -> None:
        """
        Crawl #incidents and add missing emoji where necessary.

        This is to catch-up should an incident be reported while the bot wasn't listening.
        After adding each reaction, we take a short break to avoid drowning in ratelimits.

        Once this task is scheduled, listeners that change messages should await it.
        The crawl assumes that the channel history doesn't change as we go over it.

        Behaviour is configured by: `CRAWL_LIMIT`, `CRAWL_SLEEP`.
        """
        await self.bot.wait_until_guild_available()
        incidents: discord.TextChannel = self.bot.get_channel(
            Channels.incidents)

        log.debug(
            f"Crawling messages in #incidents: {CRAWL_LIMIT=}, {CRAWL_SLEEP=}")
        async for message in incidents.history(limit=CRAWL_LIMIT):

            if not is_incident(message):
                log.trace(f"Skipping message {message.id}: not an incident")
                continue

            if has_signals(message):
                log.trace(
                    f"Skipping message {message.id}: already has all signals")
                continue

            await add_signals(message)
            await asyncio.sleep(CRAWL_SLEEP)

        log.debug("Crawl task finished!")

    async def archive(self, incident: discord.Message, outcome: Signal,
                      actioned_by: discord.Member) -> bool:
        """
        Relay an embed representation of `incident` to the #incidents-archive channel.

        The following pieces of information are relayed:
            * Incident message content (as embed description)
            * Incident attachment (if image, shown in archive embed)
            * Incident author name (as webhook author)
            * Incident author avatar (as webhook avatar)
            * Resolution signal `outcome` (as embed colour & footer)
            * Moderator `actioned_by` (name & discriminator shown in footer)

        If `incident` contains an attachment, we try to add it to the archive embed. There is
        no handing of extensions / file types - we simply dispatch the attachment file with the
        webhook, and try to display it in the embed. Testing indicates that if the attachment
        cannot be displayed (e.g. a text file), it's invisible in the embed, with no error.

        Return True if the relay finishes successfully. If anything goes wrong, meaning
        not all information was relayed, return False. This signals that the original
        message is not safe to be deleted, as we will lose some information.
        """
        log.info(
            f"Archiving incident: {incident.id} (outcome: {outcome}, actioned by: {actioned_by})"
        )
        embed, attachment_file = await make_embed(incident, outcome,
                                                  actioned_by)

        try:
            webhook = await self.bot.fetch_webhook(Webhooks.incidents_archive)
            await webhook.send(
                embed=embed,
                username=sub_clyde(incident.author.name),
                avatar_url=incident.author.display_avatar.url,
                file=attachment_file,
            )
        except Exception:
            log.exception(
                f"Failed to archive incident {incident.id} to #incidents-archive"
            )
            return False
        else:
            log.trace("Message archived successfully!")
            return True

    def make_confirmation_task(self,
                               incident: discord.Message,
                               timeout: int = 5) -> asyncio.Task:
        """
        Create a task to wait `timeout` seconds for `incident` to be deleted.

        If `timeout` passes, this will raise `asyncio.TimeoutError`, signaling that we haven't
        been able to confirm that the message was deleted.
        """
        log.trace(
            f"Confirmation task will wait {timeout=} seconds for {incident.id=} to be deleted"
        )

        def check(payload: discord.RawReactionActionEvent) -> bool:
            return payload.message_id == incident.id

        coroutine = self.bot.wait_for(event="raw_message_delete",
                                      check=check,
                                      timeout=timeout)
        return scheduling.create_task(coroutine, event_loop=self.bot.loop)

    async def process_event(self, reaction: str, incident: discord.Message,
                            member: discord.Member) -> None:
        """
        Process a `reaction_add` event in #incidents.

        First, we check that the reaction is a recognized `Signal` member, and that it was sent by
        a permitted user (at least one role in `ALLOWED_ROLES`). If not, the reaction is removed.

        If the reaction was either `Signal.ACTIONED` or `Signal.NOT_ACTIONED`, we attempt to relay
        the report to #incidents-archive. If successful, the original message is deleted.

        We do not release `event_lock` until we receive the corresponding `message_delete` event.
        This ensures that if there is a racing event awaiting the lock, it will fail to find the
        message, and will abort. There is a `timeout` to ensure that this doesn't hold the lock
        forever should something go wrong.

        Deletes cache value (`message_link_embeds_cache`) of `incident` if it exists. It then removes the
        webhook message for that particular link from the channel.
        """
        members_roles: set[int] = {role.id for role in member.roles}
        if not members_roles & ALLOWED_ROLES:  # Intersection is truthy on at least 1 common element
            log.debug(
                f"Removing invalid reaction: user {member} is not permitted to send signals"
            )
            try:
                await incident.remove_reaction(reaction, member)
            except discord.NotFound:
                log.trace(
                    "Couldn't remove reaction because the reaction or its message was deleted"
                )
            return

        try:
            signal = Signal(reaction)
        except ValueError:
            log.debug(
                f"Removing invalid reaction: emoji {reaction} is not a valid signal"
            )
            try:
                await incident.remove_reaction(reaction, member)
            except discord.NotFound:
                log.trace(
                    "Couldn't remove reaction because the reaction or its message was deleted"
                )
            return

        log.trace(f"Received signal: {signal}")

        if signal not in (Signal.ACTIONED, Signal.NOT_ACTIONED):
            log.debug(
                "Reaction was valid, but no action is currently defined for it"
            )
            return

        relay_successful = await self.archive(incident,
                                              signal,
                                              actioned_by=member)
        if not relay_successful:
            log.trace(
                "Original message will not be deleted as we failed to relay it to the archive"
            )
            return

        timeout = 5  # Seconds
        confirmation_task = self.make_confirmation_task(incident, timeout)

        log.trace("Deleting original message")
        try:
            await incident.delete()
        except discord.NotFound:
            log.trace("Couldn't delete message because it was already deleted")

        log.trace(f"Awaiting deletion confirmation: {timeout=} seconds")
        try:
            await confirmation_task
        except asyncio.TimeoutError:
            log.info(
                f"Did not receive incident deletion confirmation within {timeout} seconds!"
            )
        else:
            log.trace("Deletion was confirmed")

        if self.incidents_webhook:
            # Deletes the message link embeds found in cache from the channel and cache.
            await self.delete_msg_link_embed(incident.id)

    async def resolve_message(self,
                              message_id: int) -> Optional[discord.Message]:
        """
        Get `discord.Message` for `message_id` from cache, or API.

        We first look into the local cache to see if the message is present.

        If not, we try to fetch the message from the API. This is necessary for messages
        which were sent before the bot's current session.

        In an edge-case, it is also possible that the message was already deleted, and
        the API will respond with a 404. In such a case, None will be returned.
        This signals that the event for `message_id` should be ignored.
        """
        await self.bot.wait_until_guild_available(
        )  # First make sure that the cache is ready
        log.trace(f"Resolving message for: {message_id=}")
        message: Optional[discord.Message] = self.bot._connection._get_message(
            message_id)

        if message is not None:
            log.trace("Message was found in cache")
            return message

        log.trace("Message not found, attempting to fetch")
        try:
            message = await self.bot.get_channel(Channels.incidents
                                                 ).fetch_message(message_id)
        except discord.NotFound:
            log.trace("Message doesn't exist, it was likely already relayed")
        except Exception:
            log.exception(f"Failed to fetch message {message_id}!")
        else:
            log.trace("Message fetched successfully!")
            return message

    @Cog.listener()
    async def on_raw_reaction_add(
            self, payload: discord.RawReactionActionEvent) -> None:
        """
        Pre-process `payload` and pass it to `process_event` if appropriate.

        We abort instantly if `payload` doesn't relate to a message sent in #incidents,
        or if it was sent by a bot.

        If `payload` relates to a message in #incidents, we first ensure that `crawl_task` has
        finished, to make sure we don't mutate channel state as we're crawling it.

        Next, we acquire `event_lock` - to prevent racing, events are processed one at a time.

        Once we have the lock, the `discord.Message` object for this event must be resolved.
        If the lock was previously held by an event which successfully relayed the incident,
        this will fail and we abort the current event.

        Finally, with both the lock and the `discord.Message` instance in our hands, we delegate
        to `process_event` to handle the event.

        The justification for using a raw listener is the need to receive events for messages
        which were not cached in the current session. As a result, a certain amount of
        complexity is introduced, but at the moment this doesn't appear to be avoidable.
        """
        if payload.channel_id != Channels.incidents or payload.member.bot:
            return

        log.trace(
            f"Received reaction add event in #incidents, waiting for crawler: {self.crawl_task.done()=}"
        )
        await self.crawl_task

        log.trace(f"Acquiring event lock: {self.event_lock.locked()=}")
        async with self.event_lock:
            message = await self.resolve_message(payload.message_id)

            if message is None:
                log.debug(
                    "Listener will abort as related message does not exist!")
                return

            if not is_incident(message):
                log.debug("Ignoring event for a non-incident message")
                return

            await self.process_event(str(payload.emoji), message,
                                     payload.member)
            log.trace("Releasing event lock")

    @Cog.listener()
    async def on_message(self, message: discord.Message) -> None:
        """
        Pass `message` to `add_signals` and `extract_message_links` if it satisfies `is_incident`.

        If `message` is an incident report, then run it through `extract_message_links` to get all
        the message link embeds (embeds which contain information about that particular link).
        These message link embeds are then sent into the channel.

        Also passes the message into `add_signals` if the message is an incident.
        """
        if not is_incident(message):
            return

        await add_signals(message)

        # Only use this feature if incidents webhook embed is found
        if self.incidents_webhook:
            if embed_list := await self.extract_message_links(message):
                await self.send_message_link_embeds(embed_list, message,
                                                    self.incidents_webhook)