async def _terminate_raid(self, guild, dismised=False): guild_id = guild.id raid_info = self.under_raid[guild_id] raid_id = raid_info['ID'] raid_info["ENDED"] = str(datetime.utcfromtimestamp(time.time())) self._save_raid(raid_info) Logging.info(f"Lifted alarm in {guild}") del self.under_raid[guild_id] channel = self._get_mod_channel(guild_id) if channel is not None: total = len(raid_info['RAIDERS']) left = len(raid_info["TODO"]) handled = total - left await channel.send( f"Raid party is over :( Guess i'm done handing out special roles (for now).\n**Summary:**\nRaid ID: {raid_info['ID']}\n{total} guests showed up for the party\n{left} are still hanging out, enjoying that oh so special role they got\n{handled} are no longer with us." ) # notify other server if we didn't dismiss it, if we did they already got notified about the false alarm if not dismised: for other_guild in self.bot.guilds: if other_guild != channel.guild: new_channel = self.bot.get_channel( Configuration.get_var(other_guild.id, f"MOD_CHANNEL")) if new_channel is not None: await new_channel.send( f"Raid party over at {guild} has ended (raid ID {raid_id}. If you want to cross ban now would be a great time.\nFor more info on the raid: ``!raid_info pretty {raid_id}``\nFor crossbanning: ``!raid_act ban {raid_id}``" )
async def _terminate_raid(self, guild, dismised=False): guild_id = guild.id raid_info = self.under_raid[guild_id] raid_id = raid_info['ID'] raid_info["ENDED"] = str(datetime.utcfromtimestamp(time.time())) self._save_raid(raid_info) Logging.info(f"Lifted alarm in {guild}") del self.under_raid[guild_id] channel = self._get_mod_channel(guild_id) if channel is not None: total = len(raid_info['RAIDERS']) left = len(raid_info["TODO"]) handled = total - left await channel.send( f"No longer detecting any signs of a raid\n**Summary:**\nRaid ID: {raid_info['ID']}\n{total} raiders spotted\n{left} of them are still in the server but muted\n{handled} are no longer on this server²." ) # notify other server if we didn't dismiss it, if we did they already got notified about the false alarm if not dismised: for other_guild in self.bot.guilds: if other_guild != channel.guild: new_channel = self.bot.get_channel( Configuration.get_var(other_guild.id, f"MOD_CHANNEL")) if new_channel is not None: await new_channel.send( f"No longer detecting any signs of a raid in {guild}\nRaid ID {raid_id}. For more info on the raid: ``!raid_info pretty {raid_id}``\nFor crossbanning: ``!raid_act ban {raid_id}``" )
def get_var(id, key): if id is None: raise ValueError("Where is this coming from?") if id not in SERVER_CONFIGS.keys(): Logging.info(f"Config entry requested before config was loaded for guild {id}, loading config for it") load_config(id) return SERVER_CONFIGS[id][key]
async def mute(self, member): role = member.guild.get_role(Configuration.get_var(member.guild.id, "MUTE_ROLE")) if role is not None: try: await member.add_roles(role, reason="Raid alarm triggered") except discord.HTTPException: Logging.warn(f"failed to mute {member} ({member.id}!")
async def upgrade(self, ctx): await ctx.send( "<:BCWrench:344163417981976578> I'll be right back with new gears! <:woodGear:344163118089240596> <:stoneGear:344163146325295105> <:ironGear:344163170664841216> <:goldGear:344163202684289024> <:diamondGear:344163228101640192>" ) await Logging.bot_log(f"Upgrade initiated by {ctx.author.name}") Logging.info(f"Upgrade initiated by {ctx.author.name}") await ctx.invoke(self.pull) await ctx.invoke(self.restart)
def load_master(): global MASTER_CONFIG, MASTER_LOADED try: with open('config/master.json', 'r') as jsonfile: MASTER_CONFIG = json.load(jsonfile) MASTER_LOADED = True except FileNotFoundError: Logging.error("Unable to load config, running with defaults.") except Exception as e: Logging.error("Failed to parse configuration.") print(e) raise e
async def sound_the(self, kind, guild): Logging.info(f"Anti-raid {kind} triggered for {guild.name} ({guild.id})!") channel = self.bot.get_channel(Configuration.get_var(guild.id, f"MOD_CHANNEL")) if channel is not None: await channel.send(Configuration.get_var(guild.id, f"RAID_{kind}_MESSAGE")) else: Logging.warn(f"Unable to sound the {kind} in {guild.name} ({guild.id})") await guild.owner.send( f"🚨 Anti-raid {kind} triggered for {guild.name} but the mod channel is misconfigured 🚨") if kind == "ALARM": for m in self.trackers[kind][guild.id]: await self.mute(m)
def update_config(guild, config): v = config["VERSION"] while config["VERSION"] < CONFIG_VERSION: Logging.info(f"Upgrading config version from version {v} to {v+1}") d = f"config/backups/v{v}" if not os.path.isdir(d): os.makedirs(d) Utils.save_to_disk(f"{d}/{guild}", config) config = MIGRATORS[config["VERSION"] - 1](config) config["VERSION"] += 1 Utils.save_to_disk(f"config/{guild}", config) return config
def load_config(guild): global SERVER_CONFIGS config = Utils.fetch_from_disk(f'config/{guild}') if "VERSION" not in config and len(config) < 15: Logging.info(f"The config for {guild} is to old to migrate, resetting") config = dict() else: if "VERSION" not in config: config["VERSION"] = 0 SERVER_CONFIGS[guild] = update_config(guild, config) if len(config) is 0: Logging.info(f"No config available for {guild}, creating a blank one.") SERVER_CONFIGS[guild] = Utils.fetch_from_disk("config/template") save(guild)
async def _sound_the_alarm(self, guild): Logging.info(f"Sounding the alarm for {guild} ({guild.id})!") guild_id = guild.id # apply alarm, grab id later reference raid_id = self.last_raid = self.last_raid + 1 with open("raids/counter", "w") as file: file.write(str(raid_id)) now = datetime.utcfromtimestamp(time.time()) self.under_raid[guild_id] = { "ID": raid_id, "GUILD": guild_id, "RAIDERS": {}, "MESSAGE": None, "TODO": [], "LAST_JOIN": now, "DETECTED": str(now), "ENDED": "NOT YET" } channel = self.bot.get_channel( Configuration.get_var(guild_id, f"MOD_CHANNEL")) if channel is not None: await channel.send( Configuration.get_var(guild_id, f"RAID_ALARM_MESSAGE")) else: Logging.warn( f"Unable to sound the alarm in {guild.name} ({guild_id})") await guild.owner.send( f"🚨 Anti-raid alarm triggered for {guild.name} but the mod channel is misconfigured, please use ``!status`` somewhere in that server to get the raid status 🚨" ) # deal with current raiders for raider in self.trackers[guild.id]: await self._handle_raider(raider) self.bot.loop.create_task(self._alarm_checker(guild)) # server has the tools the need to deal with it, notify other servers for other_guild in self.bot.guilds: if other_guild is not guild: channel = self.bot.get_channel( Configuration.get_var(other_guild.id, f"MOD_CHANNEL")) if channel is not None: await channel.send( f"⚠ Heads up: {guild} is being raided (raid ID: {raid_id}! They might try to raid this server as well. Spoiler alert: They'll fail" )
async def on_ready(): global STARTED if not STARTED: await Logging.onReady(bot, Configuration.get_master_var("BOT_LOG_CHANNEL")) await Configuration.on_ready(bot) for e in ["Maintenance", "Moderation", "BadNames"]: try: bot.load_extension("Cogs." + e) except Exception as ex: Logging.error(f"Failed to load cog {e}") await handle_exception(f"Loading cog {e}", ex) Logging.info("Cogs loaded") await Logging.bot_log("Outboard engine running at full speed!") STARTED = True
async def track_for(self, kind, member): guild = member.guild tracker = self.trackers[kind] if guild.id not in tracker: tracker[guild.id] = set() tracker[guild.id].add(member) amount = Configuration.get_var(guild.id, f"RAID_{kind}_AMOUNT") if len(tracker[guild.id]) >= amount: if guild.id not in self.active[kind]: self.active[kind].add(guild.id) await self.sound_the(kind, guild) if kind == "ALARM": await self.mute(member) await asyncio.sleep(Configuration.get_var(guild.id, f"RAID_{kind}_TIMEFRAME")) tracker[guild.id].remove(member) if len(tracker[guild.id]) < amount and guild.id in self.active[kind]: self.active[kind].remove(guild.id) Logging.info(f"{kind} lifted for {guild.name}") channel = self.bot.get_channel(Configuration.get_var(guild.id, f"MOD_CHANNEL")) if channel is not None: await channel.send(f"{kind} has been lifted")
async def on_ready(bot: commands.Bot): global CONFIG_VERSION CONFIG_VERSION = Utils.fetch_from_disk("config/template")["VERSION"] Logging.info(f"Current template config version: {CONFIG_VERSION}") Logging.info(f"Loading configurations for {len(bot.guilds)} guilds.") for guild in bot.guilds: Logging.info(f"Loading info for {guild.name} ({guild.id}).") load_config(guild.id)
def requirement(self): ssh = ScaleTools.Ssh(host=self.getConfig(self.configCondorServer), username=self.getConfig(self.configCondorUser), key=self.getConfig(self.configCondorKey)) # Target.Requirements can't be filtered with -constraints since it would require ClassAd based regex matching. # TODO: Find a more generic way to match resources/requirements (condor_q -slotads ??) # cmd_idle = "condor_q -constraint 'JobStatus == 1' -slotads slotads_bwforcluster " \ # "-analyze:summary,reverse | tail -n1 | awk -F ' ' " \ # "'{print $3 "\n" $4}'| sort -n | head -n1" constraint = "( %s ) && ( %s )" % (self._query_constraints, self.getConfig( self.configCondorConstraint)) cmd = ("condor_q -global -constraint '%s' %s" % (constraint, self._query_format_string)) result = ssh.handleSshCall(call=cmd, quiet=True) if result[0] != 0: self.logger.warning("Could not get HTCondor queue status! %d: %s" % (result[0], result[2])) return None elif any(error_string in result[1] for error_string in self._CLI_error_strings): self.logger.warning("condor_q request timed out.") return None queue_line = (entry.split(",", 3) for entry in str(result[1]).splitlines()) converted_line = ((int(status), int(cores), requirement) for status, cores, requirement in queue_line) if self.getConfig(self.configCondorRequirement): # TODO: We could use ClassAd bindings, to check requirement(s) filtered_line = ( (status, cores) for status, cores, requirement in converted_line if self.getConfig(self.configCondorRequirement) in requirement) else: filtered_line = ((status, cores) for status, cores, requirement in converted_line) required_cpus_total = 0 required_cpus_idle_jobs = 0 required_cpus_running_jobs = 0 try: for job_status, requested_cpus in filtered_line: required_cpus_total += requested_cpus if job_status == self.condorStatusIdle: required_cpus_idle_jobs += requested_cpus elif job_status == self.condorStatusRunning: required_cpus_running_jobs += requested_cpus except ValueError: # This error should only occur, if the result was empty AND CondorRequirement is initial required_cpus_total = 0 required_cpus_idle_jobs = 0 required_cpus_running_jobs = 0 self.logger.debug( "HTCondor queue: Idle: %d; Running: %d." % (required_cpus_idle_jobs, required_cpus_running_jobs)) # cores->machines: machine definition required for RequirementAdapter n_cores = -int( self.getConfig( self.configMachines)[self.getNeededMachineType()]["cores"]) self._curRequirement = -(required_cpus_total // n_cores) with Logging.JsonLog() as json_log: json_log.addItem(self.getNeededMachineType(), "jobs_idle", required_cpus_idle_jobs) json_log.addItem(self.getNeededMachineType(), "jobs_running", required_cpus_running_jobs) return self._curRequirement
async def on_guild_join(guild: discord.Guild): Logging.info(f"A new guild came up: {guild.name} ({guild.id}).") Configuration.load_config(guild.id)
async def handle_exception(exception_type, exception, event=None, message=None, ctx=None, *args, **kwargs): embed = discord.Embed(colour=discord.Colour(0xff0000), timestamp=datetime.datetime.utcfromtimestamp( time.time())) # something went wrong and it might have been in on_command_error, make sure we log to the log file first lines = [ "\n===========================================EXCEPTION CAUGHT, DUMPING ALL AVAILABLE INFO===========================================", f"Type: {exception_type}" ] arg_info = "" for arg in list(args): arg_info += extract_info(arg) + "\n" if arg_info == "": arg_info = "No arguments" kwarg_info = "" for name, arg in kwargs.items(): kwarg_info += "{}: {}\n".format(name, extract_info(arg)) if kwarg_info == "": kwarg_info = "No keyword arguments" lines.append("======================Exception======================") lines.append(f"{str(exception)} ({type(exception)})") lines.append("======================ARG INFO======================") lines.append(arg_info) lines.append("======================KWARG INFO======================") lines.append(kwarg_info) lines.append("======================STACKTRACE======================") tb = "".join(traceback.format_tb(exception.__traceback__)) lines.append(tb) if message is None and event is not None and hasattr(event, "message"): message = event.message if message is None and ctx is not None: message = ctx.message if message is not None and hasattr(message, "content"): lines.append( "======================ORIGINAL MESSAGE======================") lines.append(message.content) if message.content is None or message.content == "": content = "<no content>" else: content = message.content embed.add_field(name="Original message", value=content, inline=False) lines.append( "======================ORIGINAL MESSAGE (DETAILED)======================" ) lines.append(extract_info(message)) if event is not None: lines.append("======================EVENT NAME======================") lines.append(event) embed.add_field(name="Event", value=event) if ctx is not None: lines.append( "======================COMMAND INFO======================") lines.append(f"Command: {ctx.command}") embed.add_field(name="Command", value=ctx.command) channel_name = 'Private Message' if isinstance( ctx.channel, discord.abc.PrivateChannel ) else f"{ctx.channel.name} (`{ctx.channel.id}`)" lines.append(f"Channel: {channel_name}") embed.add_field(name="Channel", value=channel_name, inline=False) sender = f"{ctx.author.name}#{ctx.author.discriminator} (`{ctx.author.id}`)" lines.append(f"Sender: {sender}") embed.add_field(name="Sender", value=sender, inline=False) lines.append( "===========================================DATA DUMP COMPLETE===========================================" ) Logging.error("\n".join(lines)) # nice embed for info on discord embed.set_author(name=exception_type) embed.add_field(name="Exception", value=f"{str(exception)} (`{type(exception)}`)", inline=False) parts = Utils.paginate(tb, max_chars=1024) num = 1 for part in parts: embed.add_field(name=f"Traceback {num}/{len(parts)}", value=part) num += 1 # try logging to botlog, wrapped in an try catch as there is no higher lvl catching to prevent taking down the bot (and if we ended here it might have even been due to trying to log to botlog try: await Logging.bot_log(embed=embed) except Exception as ex: Logging.error( f"Failed to log to botlog, either Discord broke or something is seriously wrong!\n{ex}" ) Logging.error(traceback.format_exc())
import datetime import sys import time import traceback import discord from discord.ext import commands from Util import Logging, Configuration, Utils # Initialize logging Logging.initialize() Logging.info("Outboard initializing") bot = commands.Bot(command_prefix="!", case_insensitive=True) STARTED = False @bot.event async def on_ready(): global STARTED if not STARTED: await Logging.onReady(bot, Configuration.get_master_var("BOT_LOG_CHANNEL")) await Configuration.on_ready(bot) for e in ["Maintenance", "Moderation", "BadNames"]: try: bot.load_extension("Cogs." + e) except Exception as ex: Logging.error(f"Failed to load cog {e}")
def requirement(self): ssh = ScaleTools.Ssh(host=self.getConfig(self.configSlurmServer), username=self.getConfig(self.configSlurmUser), key=self.getConfig(self.configSlurmKey)) # Target.Requirements can't be filtered with -constraints since it would require ClassAd based regex matching. # TODO: Find a more generic way to match resources/requirements (condor_q -slotads ??) # cmd_idle = "condor_q -constraint 'JobStatus == 1' -slotads slotads_bwforcluster " \ # "-analyze:summary,reverse | tail -n1 | awk -F ' ' " \ # "'{print $3 "\n" $4}'| sort -n | head -n1" #constraint = "( %s ) && ( %s )" % (self._query_constraints, self.getConfig(self.configCondorConstraint)) #cmd = ("condor_q -global -allusers -nobatch -constraint '%s' %s" % (constraint, self._query_format_string)) #cmd = 'squeue -p nemo_vm_atlsch --noheader --format="%T %r %c"' self.logger.info("Checking requirements in partition {}".format( self.getConfig(self.configSlurmPartition))) cmd = 'squeue -p {} --noheader --format="%T %r %c"'.format( self.getConfig(self.configSlurmPartition)) result = ssh.handleSshCall(call=cmd, quiet=True) if result[0] != 0: self.logger.warning("Could not get Slurm queue status! %d: %s" % (result[0], result[2])) return None elif any(error_string in result[1] for error_string in self._CLI_error_strings): self.logger.warning("squeue request timed out.") return None required_cpus_total = 0 required_cpus_idle_jobs = 0 required_cpus_running_jobs = 0 cpus_dependency_jobs = 0 for line in result[1].splitlines(): values = line.split() #self.logger.debug(values) if len(values) != 3: continue if "Dependency" in values[1]: cpus_dependency_jobs = cpus_dependency_jobs + int(values[2]) continue if "PartitionTimeLimit" in values[1]: continue elif "PENDING" in values[0]: required_cpus_total = required_cpus_total + int(values[2]) required_cpus_idle_jobs = required_cpus_idle_jobs + int( values[2]) continue elif "RUNNING" in values[0]: required_cpus_total = required_cpus_total + int(values[2]) required_cpus_running_jobs = required_cpus_running_jobs + int( values[2]) continue else: self.logger.warning("unknown job state: %s. Ignoring.", values[0]) self.logger.debug( "Slurm queue: Idle: %d; Running: %d. in partition: %s." % (required_cpus_idle_jobs, required_cpus_running_jobs, self.getConfig(self.configSlurmPartition))) # cores->machines: machine definition required for RequirementAdapter n_cores = -int( self.getConfig( self.configMachines)[self.getNeededMachineType()]["cores"]) self._curRequirement = -(required_cpus_total // n_cores) self.logger.debug("Required CPUs total=%s" % required_cpus_total) self.logger.debug("Required CPUs idle Jobs=%s" % required_cpus_idle_jobs) self.logger.debug("Required CPUs running Jobs=%s" % required_cpus_running_jobs) self.logger.debug("CPUs dependency Jobs=%s" % cpus_dependency_jobs) with Logging.JsonLog() as json_log: json_log.addItem(self.getNeededMachineType(), "jobs_idle", required_cpus_idle_jobs) json_log.addItem(self.getNeededMachineType(), "jobs_running", required_cpus_running_jobs) return self._curRequirement