Пример #1
0
    async def send_welcome(self, member):
        guild = self.bot.get_guild(Configuration.get_var("guild_id"))
        if member.guild.id != guild.id or self.is_member_verified(member):
            return False

        try:
            welcome_channel = self.bot.get_config_channel(
                guild.id, Utils.welcome_channel)
            rules_channel = self.bot.get_config_channel(
                guild.id, Utils.rules_channel)

            # Send welcome message in configured language. default to english
            if welcome_channel and rules_channel:
                txt = Lang.get_string(
                    "welcome/welcome_msg",
                    user=member.mention,
                    rules_channel=rules_channel.mention,
                    accept_emoji=Emoji.get_chat_emoji('PAINTBRUSH'))
                if self.mute_new_members:
                    # add mute notification if mute for new members is on
                    mute_txt = Lang.get_string("welcome/welcome_mute_msg")
                    txt = f"{txt}\n{mute_txt}"
                await welcome_channel.send(txt)
                return True
        except Exception as ex:
            Logging.info(f"failed to welcome {member.id}")
            Logging.error(ex)
            raise ex
        return False
Пример #2
0
    async def startup_cleanup(self):
        Logging.info("starting bugs")
        # TODO: find out what the condition is we need to wait for instead of just sleep
        # await asyncio.sleep(20)

        # for name, cid in Configuration.get_var("channels").items():
        reporting_channel_ids = []
        for row in BugReportingChannel.select():
            cid = row.channelid
            name = f"{row.platform.platform}_{row.platform.branch}"
            guild_id = row.guild.serverid
            channel = self.bot.get_channel(cid)
            shutdown_key = f"{guild_id}_{name}_shutdown"
            shutdown_id = Configuration.get_persistent_var(shutdown_key)

            if shutdown_id is not None:
                Configuration.del_persistent_var(shutdown_key)
                try:
                    message = await channel.fetch_message(shutdown_id)
                    await message.delete()
                except (NotFound, HTTPException) as e:
                    pass
            reporting_channel_ids.append(cid)
        try:
            await self.send_bug_info(*reporting_channel_ids)
        except Exception as e:
            await Utils.handle_exception("bug startup failure", self.bot, e)
Пример #3
0
    async def handle_reaction_change(self, t, reaction, user_id):
        roles = Configuration.get_var("roles")
        if reaction in roles:
            guild = self.bot.get_guild(Configuration.get_var("guild_id"))
            role = guild.get_role(roles[reaction])
            member_role = guild.get_role(Configuration.get_var("member_role"))
            nonmember_role = guild.get_role(
                Configuration.get_var("nonmember_role"))
            member = guild.get_member(user_id)

            if member is None:
                return

            action = getattr(member, f"{t}_roles")
            try:
                await action(role)
                # if acting on member role, toggle corresponding nonmember role
                if role is member_role:
                    if t == 'add':
                        await member.remove_roles(nonmember_role)
                    else:
                        await member.add_roles(nonmember_role)
            except Exception as ex:
                Logging.info("failed")
                Logging.error(ex)
                raise ex
Пример #4
0
    async def ping_unverified(self, ctx):
        guild = self.bot.get_guild(Configuration.get_var("guild_id"))
        try:
            nonmember_role = guild.get_role(
                Configuration.get_var("nonmember_role"))
            welcome_channel = self.bot.get_config_channel(
                guild.id, Utils.welcome_channel)
            rules_channel = self.bot.get_config_channel(
                guild.id, Utils.rules_channel)

            if welcome_channel and rules_channel:
                txt = Lang.get_string(
                    "welcome/welcome_msg",
                    user=nonmember_role.mention,
                    rules_channel=rules_channel.mention,
                    accept_emoji=Emoji.get_chat_emoji('CANDLE'))

                await nonmember_role.edit(mentionable=True)
                await welcome_channel.send(txt)
                await nonmember_role.edit(mentionable=False)
                return True
        except Exception as ex:
            Logging.info(f"failed to welcome unverified role.")
            Logging.error(ex)
            raise ex
        return False
Пример #5
0
    async def periodic_task(self):
        # periodic task to run while cog is loaded

        # remove expired cooldowns
        now = datetime.now().timestamp()
        cooldown = Configuration.get_persistent_var(f"mischief_cooldown", dict())

        try:
            # key for loaded dict is a string
            updated_cooldown = {}
            for str_uid, member_last_access_time in cooldown.items():
                if (now - member_last_access_time) < self.cooldown_time:
                    updated_cooldown[str_uid] = member_last_access_time
            Configuration.set_persistent_var(f"mischief_cooldown", updated_cooldown)
        except:
            Logging.info("can't clear cooldown")

        # update role count storage (because it's slow)
        try:
            guild = Utils.get_home_guild()
            for role_id in self.role_map.values():
                my_role = guild.get_role(role_id)
                if my_role is not None:
                    self.role_counts[str(role_id)] = len(my_role.members)
        except:
            Logging.info("can't update role counts")
Пример #6
0
def list(path):
    ''' List existing repository. '''
    os.chdir(path)
    for name in os.listdir(path):
        if check_repo_validity(name):
            repo_url = wrap_repo_url(path, name)
            name = name.replace(REPO_POSTFIX, '')
            Logging.info('%s  %80s' % (name, repo_url))
Пример #7
0
 async def load(self, ctx, cog: str):
     if os.path.isfile(f"cogs/{cog}.py"):
         self.bot.load_extension(f"cogs.{cog}")
         if cog not in Configuration.MASTER_CONFIG["cogs"]:
             Configuration.MASTER_CONFIG["cogs"].append(cog)
             Configuration.save()
         await ctx.send(f"**{cog}** has been loaded!")
         await Logging.bot_log(
             f"**{cog}** has been loaded by {ctx.author.name}.")
         Logging.info(f"{cog} has been loaded")
     else:
         await ctx.send(
             f"{Emoji.get_chat_emoji('NO')} I can't find that cog.")
Пример #8
0
 async def unload(self, ctx, cog: str):
     if cog in ctx.bot.cogs:
         self.bot.unload_extension(f"cogs.{cog}")
         if cog in Configuration.MASTER_CONFIG["cogs"]:
             Configuration.get_var("cogs").remove(cog)
             Configuration.save()
         await ctx.send(f'**{cog}** has been unloaded.')
         await Logging.bot_log(
             f'**{cog}** has been unloaded by {ctx.author.name}')
         Logging.info(f"{cog} has been unloaded")
     else:
         await ctx.send(
             f"{Emoji.get_chat_emoji('NO')} I can't find that cog.")
Пример #9
0
    async def startup_cleanup(self):
        await self.bot.wait_until_ready()
        Logging.info("starting DropBox")

        for guild in self.bot.guilds:
            # fetch dropbox channels per server
            self.init_guild(guild.id)
            for row in DropboxChannel.select().where(
                    DropboxChannel.serverid == guild.id):
                self.dropboxes[guild.id][row.sourcechannelid] = row
        self.loaded = True

        self.deliver_to_channel.start()
        self.clean_channels.start()
Пример #10
0
    async def on_ready(self):
        if not self.loaded:
            Logging.BOT_LOG_CHANNEL = self.get_channel(
                Configuration.get_var("log_channel"))
            Emoji.initialize(self)

            for cog in Configuration.get_var("cogs"):
                try:
                    self.load_extension("cogs." + cog)
                except Exception as e:
                    await Utils.handle_exception(f"Failed to load cog {cog}",
                                                 self, e)
            Logging.info("Cogs loaded")
            self.loop.create_task(self.keepDBalive())
            self.loaded = True

        await Logging.bot_log("Journey bot surfing through the dunes!")
Пример #11
0
def init_root_repos(path):
    grm_path = os.path.join(path, BASE_GRM_DIR)
    if os.path.exists(grm_path):
        Logging.error('Reinitialized existing remote root directory in %s' %
                      os.path.abspath(path))
        return False

    if not os.path.exists(path):
        os.mkdir(path)
    Logging.info('Initialized remote root directory in %s' %
                 os.path.abspath(path))
    os.chdir(path)
    os.mkdir(BASE_GRM_DIR)

    init_config('.')
    p = Process(target=init_public_ip)
    p.start()
Пример #12
0
    async def on_ready(self):
        if self.loaded:
            Logging.info(f"{self.my_name} reconnect")
            return

        Logging.BOT_LOG_CHANNEL = self.get_channel(
            Configuration.get_var("log_channel"))
        Emoji.initialize(self)

        for cog in Configuration.get_var("cogs"):
            try:
                self.load_extension("cogs." + cog)
            except Exception as e:
                await Utils.handle_exception(f"Failed to load cog {cog}", self,
                                             e)
        Logging.info("Cogs loaded")
        self.db_keepalive = self.loop.create_task(self.keepDBalive())
        self.loaded = True

        await Logging.bot_log(f"{self.my_name} has started. Time to bot!")
Пример #13
0
    async def send_bug_info(self, *args):
        for channel_id in args:
            channel = self.bot.get_channel(channel_id)
            if channel is None:
                await Logging.bot_log(f"can't send bug info to nonexistent channel {channel_id}")
                continue

            bug_info_id = Configuration.get_persistent_var(f"{channel.guild.id}_{channel_id}_bug_message")

            ctx = None
            tries = 0
            while not ctx and tries < 5:
                tries += 1
                # this API call fails on startup because connection is not made yet.
                # TODO: properly wait for connection to be initialized
                try:
                    last_message = await channel.send('preparing bug reporting...')
                    ctx = await self.bot.get_context(last_message)

                    if bug_info_id is not None:
                        try:
                            message = await channel.fetch_message(bug_info_id)
                        except (NotFound, HTTPException):
                            pass
                        else:
                            await message.delete()
                            if message.id in self.bug_messages:
                                self.bug_messages.remove(message.id)

                    bugemoji = Emoji.get_emoji('BUG')
                    message = await channel.send(Lang.get_locale_string("bugs/bug_info", ctx, bug_emoji=bugemoji))
                    self.bug_messages.add(message.id)
                    await message.add_reaction(bugemoji)
                    Configuration.set_persistent_var(f"{channel.guild.id}_{channel_id}_bug_message", message.id)
                    Logging.info(f"Bug report message sent in channel #{channel.name} ({channel.id})")
                    await last_message.delete()
                except Exception as e:
                    await self.bot.guild_log(channel.guild.id, f'Having trouble sending bug message in {channel.mention}')
                    await Utils.handle_exception(
                        f"Bug report message failed to send in channel #{channel.name} ({channel.id})", self.bot, e)
                    await asyncio.sleep(0.5)
Пример #14
0
def run_db_migrations():
    dbv = int(Configuration.get_persistent_var('db_version', 0))
    Logging.info(f"db version is {dbv}")
    dbv_list = [f for f in glob.glob("db_migrations/db_migrate_*.py")]
    dbv_pattern = re.compile(r'db_migrations/db_migrate_(\d+)\.py',
                             re.IGNORECASE)
    migration_count = 0
    for filename in sorted(dbv_list):
        # get the int version number from filename
        version = int(re.match(dbv_pattern, filename)[1])
        if version > dbv:
            try:
                Logging.info(
                    f"--- running db migration version number {version}")
                spec = importlib.util.spec_from_file_location(
                    f"migrator_{version}", filename)
                dbm = importlib.util.module_from_spec(spec)
                spec.loader.exec_module(dbm)
                Configuration.set_persistent_var('db_version', version)
                migration_count = migration_count + 1
            except Exception as e:
                # throw a fit if it doesn't work
                raise e
    Logging.info(
        f"--- {migration_count if migration_count else 'no'} db migration{'' if migration_count == 1 else 's'} run"
    )
Пример #15
0
def get_locale_string(key, ctx='', **arg_dict):
    global LANG, locales_loaded
    locale = get_defaulted_locale(ctx)

    if not locale:
        return L_ERR

    if not locales_loaded:
        load_locales()

    output = []
    # locale is a list or tuple. may be a single item or multiple
    for item in locale:
        locale_lang = LANG[item]
        key_list = key.split("/")

        # Check that keys point to a valid path in base keys
        obj = LANG['keys']

        if get_by_path(
                obj, key_list[:-1]) is None or key_list[-1] not in get_by_path(
                    obj, key_list[:-1]):
            raise KeyError(f"Lang key is not in lang_keys: {key}")
        if get_by_path(obj, key_list) is not None:
            raise KeyError(f"Lang key is not terminal: {key}")

        obj = get_by_path(locale_lang, key_list)

        # keys were found. Now check locale for value:
        if isinstance(obj, str):
            try:
                output.append(obj.format(**arg_dict))
            except KeyError as e:
                output.append(obj)
        else:
            # Maybe string is not defined in lang file.
            Logging.info(
                f"localized lang string failed for key {key} in locale {item}")
            output.append(L_ERR)
    return '\n'.join(output)
Пример #16
0
    async def send_welcome(self, member):
        guild = self.bot.get_guild(Configuration.get_var("guild_id"))
        if member.guild.id != guild.id or self.is_member_verified(member):
            return False

        try:
            welcome_channel = self.bot.get_config_channel(
                guild.id, Utils.welcome_channel)
            rules_channel = self.bot.get_config_channel(
                guild.id, Utils.rules_channel)

            if welcome_channel and rules_channel:
                txt = Lang.get_string(
                    "welcome/welcome_msg",
                    user=member.mention,
                    rules_channel=rules_channel.mention,
                    accept_emoji=Emoji.get_chat_emoji('CANDLE'))
                await welcome_channel.send(txt)
                return True
        except Exception as ex:
            Logging.info(f"failed to welcome {member.id}")
            Logging.error(ex)
            raise ex
        return False
Пример #17
0
 async def close(self):
     Logging.info("Shutting down?")
     if not self.shutting_down:
         Logging.info("Shutting down...")
         self.shutting_down = True
         self.db_keepalive.cancel()
         temp = []
         for cog in self.cogs:
             temp.append(cog)
         for cog in temp:
             Logging.info(f"unloading cog {cog}")
             c = self.get_cog(cog)
             if hasattr(c, "shutdown"):
                 await c.shutdown()
             self.unload_extension(f"cogs.{cog}")
     return await super().close()
Пример #18
0
def add(path, name):
    ''' Create a repository. '''
    os.chdir(path)
    name = wrap_repo_name(name)
    repo_name = os.path.join(path, name)
    if os.path.exists(repo_name):
        Logging.error('the repository is existent')
        return False

    run_command('git init --bare %s' % name)
    run_command('chown -R git:git %s' % name)

    repo_url = wrap_repo_url(path, name)
    Logging.info("repository url: %s" % (repo_url))
    Logging.info("git clone:      git clone %s" % (repo_url))
    Logging.info("git remote:     git remote add origin %s" % (repo_url))
Пример #19
0
 async def hotreload(self, ctx):
     message = await ctx.send("Hot reloading...")
     importlib.reload(Reloader)
     for c in Reloader.components:
         importlib.reload(c)
     Emoji.initialize(self.bot)
     Logging.info("Reloading all cogs...")
     temp = []
     for cog in self.bot.cogs:
         temp.append(cog)
     for cog in temp:
         self.bot.unload_extension(f"cogs.{cog}")
         Logging.info(f'{cog} has been unloaded.')
         self.bot.load_extension(f"cogs.{cog}")
         Logging.info(f'{cog} has been loaded.')
     await message.edit(content="Hot reload complete")
Пример #20
0
def get_defaulted_locale(ctx):
    locale = 'en_US'
    if isinstance(ctx, Context):
        # TODO: move guild/channel checks to LangConfig, store in dict, update there on guild events and config changes
        cid = ctx.channel.id

        if ctx.guild is None:
            # DM - default the language
            locale = Configuration.get_var('broadcast_locale', 'en_US')
            if locale == ALL_LOCALES:
                return locales
            return [locale]

        # TODO: create lookup table so we don't hit database every time
        #  github issue #91
        gid = ctx.guild.id
        guild_row = Guild.get_or_none(serverid=gid)
        chan_locale = Localization.get_or_none(channelid=cid)

        # Bot default is English
        if guild_row is not None and guild_row.defaultlocale in locales:
            # server locale overrides bot default
            locale = guild_row.defaultlocale
        if chan_locale is not None and chan_locale.locale in locales:
            # channel locale overrides server
            locale = chan_locale.locale
    elif isinstance(ctx, str):
        # String assumes caller knows better and is overriding all else
        if ctx == ALL_LOCALES:
            return locales
        if ctx not in locales:
            if ctx != '':
                Logging.info(
                    f"Locale string override '{ctx}' not found. Defaulting.")
        else:
            locale = ctx
    else:
        Logging.info(f"Cannot derive locale from context: {ctx}")
        locale = False

    if locale not in locales:
        Logging.info(f"Missing locale {locale} - defaulting to English")
        locale = 'en_US'
    return [locale]
Пример #21
0
    async def on_ready(self):
        Logging.info(f"Skybot... {'RECONNECT!' if self.loaded else 'STARTUP!'}")
        if self.loaded:
            Logging.info("Skybot reconnect")
            return

        Logging.BOT_LOG_CHANNEL = self.get_channel(Configuration.get_var("log_channel"))
        Emoji.initialize(self)

        for cog in Configuration.get_var("cogs"):
            try:
                self.load_extension("cogs." + cog)
            except Exception as e:
                await Utils.handle_exception(f"Failed to load cog {cog}", self, e)
        Logging.info("Cogs loaded")
        self.db_keepalive = self.loop.create_task(self.keepDBalive())
        self.loaded = True

        await Logging.bot_log("Skybot soaring through the skies!")
import pymongo
from config import *
from utils import Logging

logger = Logging(log_name="collateral_str").getLogger()

conn = pymongo.MongoClient(mongodb_params['host'], mongodb_params['port'])
conn_db = conn[mongodb_params['db_name']]
count = 1
for object in conn_db.account_collateral.find():
    logger.info(object)
    object["collateral"] = str(object["collateral"])
    logger.info(object)
    logger.info("--------------- {} ---------------------\n".format(count))
    count += 1
    conn_db.account_collateral.save(object)

Пример #23
0
class Aggregator:
    """ Aggregator object 
    Establishes connection and joins MUCs. Registers handlers
    """
    def __init__(self):
        config = Configuration()

        self.db = Database()
        self.results = self.db.get_table('results')
        
        self.job_map = {}
        self.job_pool = []
        
        self.failed_jobs = []
        
        self.evals = {}
        
        self.notifier = Notifier()
        
        self.sched = MessageScheduler(self.message_handler)

        conn = Connection('aggregator', 'roflcake')
        entity_prefix, entity_suffix = conn.get_entity_name()
        self.entity_name = entity_prefix + entity_suffix

        self.log = Logging(conn)
        conn.join_muc('aggregators')
        
        self.conn = conn.get_conn()
        
        self.roster = self.conn.getRoster()
        
        self.conn.RegisterHandler('iq',self.set_handler,'set')
        self.conn.RegisterHandler('iq',self.get_handler,'get')
        self.conn.RegisterHandler('iq',self.result_handler,'result')
        self.conn.RegisterHandler('presence',self.presence_handler)
        
        self.temp_messages = []
        
        self.parser = Parser()

        self.go_on()

    """ Handler for scheduler """
    def message_handler(self, message, retry=False):
        #print 'Sending message.'
        if retry == True:
            self.log.error('Timed out, attempting to resend.')
        self.conn.send(message)
    
# HANDLERS
    """ IQ result handler """
    def result_handler(self, conn, iq_node):
        if self.sched.is_managed(int(iq_node.getID())):
            self.sched.received_response(iq_node)
        raise NodeProcessed
            
    """ Presence handler """
    def presence_handler(self, conn, presence_node):
        if len(self.job_map) > 0:
            sender = presence_node.getFrom()
            if presence_node.getAttr('type') == 'unavailable':
                failed_poller = None
                for poller in self.job_map:
                    if poller == sender:
                        failed_poller = poller
                        break
                        
                if failed_poller != None:
                # Only used if Controller has gone offline
                    self.log.info('Poller %s has gone offline.' % failed_poller)
        raise NodeProcessed
                
    """ IQ get handler """
    def get_handler(self, conn, iq_node):
        if iq_node.getQueryNS() == NS_DISCO_INFO:
            reply = iq_node.buildReply('result')
            identity = Node('identity', {'category':'skynet', 'type':'aggregator'})
            reply.setQueryPayload([identity])
            conn.send(reply)
        raise NodeProcessed

    """ IQ set handler. Permits RPC calls in the whitelist, else returns error message """
    def set_handler(self, conn, iq_node):
        sender = iq_node.getFrom()
        iq_id = iq_node.getAttr('id')
        
        if sender == '[email protected]/skynet':
            query_node = iq_node.getQueryChildren()
            for node in query_node:
                try:
                    method = node.getTagData('methodName')
                    method_whitelist = ['run_job', 'add_poller', 'remove_poller', 'remove_job', 'move_job']
                    if method in method_whitelist:
                        method = getattr(self, method)
                        try:
                            try:
                                params = node.getTag('params').getChildren()
                                args = self.parser.get_args(params)
                            except AttributeError:
                                args = []
                            status, parameters = apply(method, args)
                            message = self.parser.rpc_response(iq_node.getFrom(), iq_node.getID(), status, parameters)
                            conn.send(message)
                        except TypeError:
                            #print sys.exc_info()
                            conn.send(iq_node.buildReply('error'))
                    else:
                        conn.send(iq_node.buildReply('error'))
                        self.log.error('Method called not in whitelist')
                except AttributeError:
                    #print sys.exc_info()
                    conn.send(iq_node.buildReply('error'))

        if len(self.job_map) > 0:
            if sender in self.job_map:
                query_node = iq_node.getQueryChildren()
                for node in query_node:
                    try:
                       method = node.getTagData('methodName')
                       method_whitelist = ['add_result']
                       if method in method_whitelist:
                           method = getattr(self, method)
                           try:
                               try:
                                   params = node.getTag('params').getChildren()
                                   args = self.parser.get_args(params)
                               except AttributeError:
                                   args = []
                                   
                               status, parameters = apply(method, args)
                               message = self.parser.rpc_response(iq_node.getFrom(), iq_node.getID(), status, parameters)
                               conn.send(message)
                           except TypeError:
                               #print sys.exc_info()
                               conn.send(iq_node.buildReply('error'))
                       else:
                           conn.send(iq_node.buildReply('error'))
                    except AttributeError:
                        #print sys.exc_info()
                        conn.send(iq_node.buildReply('error'))
        
        raise NodeProcessed  # This stanza is fully processed
        
    """ Establish evaluations """
    def set_evals(self, job, evaluations):
        for evaluation in evaluations:
            if evaluation.string != None:
                self.evals[job].append((evaluation.comparison, str(evaluation.string)))
            elif evaluation.float != None:
                self.evals[job].append((evaluation.comparison, float(evaluation.float)))
            elif evaluation.int != None:
                self.evals[job].append((evaluation.comparison, int(evaluation.int)))
            else:
                self.log.info('Evaluation contains no comparison value.')
                
    """ Callback handler used by scheduler.add_message when poller replies on successful job assignment """
    def assign_job(self, sender, query_node):
        if query_node.getNamespace() == NS_RPC:
            params = query_node.getTag('methodResponse').getTag('params').getChildren()
            job_id = self.parser.get_args_no_sender(params)[0]
            job_id = int(job_id)
            self.job_map[JID(sender)].append(job_id)
            self.evals[job_id] = []
            
            evaluations = self.db.get_evaluations(job_id)
            self.set_evals(job_id, evaluations)
        else:
            pass
#            print 'Receieved iq message with incorrect namespace'
# END HANDLERS

# RPC METHODS
    """ Retains details of a job, allocates to Poller with least currently assinged jobs """
    def run_job(self, sender, poller, job, addr, proto, freq, dom, resource):
        # Checks a poller is assigned
        if len(self.job_map) > 0:
            # Determines which poller has least assigned jobs
#            job_comp = None
#            least_loaded = None
#            for poller, jobs in self.job_map.items():
#                num_jobs = len(jobs)
#                if job_comp != None:
#                    if len(jobs) < job_comp:
#                        least_loaded = poller
#                else:
#                    least_loaded = poller
#                job_comp = num_jobs
            
            message = self.parser.rpc_call(poller, 'run_job', [self.entity_name, job, addr, proto, freq, dom, resource])
            self.sched.add_message(message, self.assign_job)
            return 'success', [str(poller), int(job)]
        else:
            return 'failure', ['There are no pollers connected']
            
    """ Called when a job is moved to this Aggregator, sets up evals and details """
    def move_job(self, sender, poller, job, addr, proto, freq, dom, resource, segment):
        # Checks a poller is assigned
        if len(self.job_map) > 0:
            job_id = int(job)
            self.job_map[JID(poller)].append(job_id)
            self.evals[job_id] = []

            evaluations = self.db.get_evaluations(job_id)
            self.set_evals(job_id, evaluations)
            return 'success', ['Successfully moved job']
        else:
            return 'failure', ['There are no pollers connected']
    
    """ Removes job from the Aggregator, cleans up state """
    def remove_job(self, sender, job_id):
        job_id = int(job_id)
        parent_poller = None
        for poller, jobs in self.job_map.items():
            for i in range(len(jobs)):
                if jobs[i] == job_id:
                    jobs.pop(i)
                    parent_poller = poller
                    break
        try:
            self.evals.pop(job_id)
        except KeyError:
            pass
            
        if parent_poller != None:
            message = self.parser.rpc_call(parent_poller, 'remove_job', [job_id])
            self.sched.add_message(message)
            return 'success', ['Successfully removed job']
        else:
            return 'failure', ['Failed to remove job']
    
    """ Called by child Poller to deliver result """
    def add_result(self, sender, id, recorded, val):
        status = self.insert_result(id, recorded, val)
        if status != 'failure':
            return 'success', ['Sucessfully added result']
        else:
            messages = self.temp_messages
            self.temp_messages = []
            return 'failure', messages
        
    """ Called by Controller when Poller is assigned """
    def add_poller(self, sender, poller):
        # Subscribe to poller for presence updates
        poller_jid = JID(poller)
        self.roster.Subscribe(poller_jid)
        self.job_map[poller_jid] = []
        self.sched.add_message(self.parser.rpc_call(poller_jid, 'set_aggregator', [self.entity_name]))
        return 'success', ['Successfully added %s' % poller_jid]

    """ Called by Controller to remove references to Poller """
    def remove_poller(self, sender, poller):
        poller_jid = JID(poller)
        try:
            unassigned_jobs = self.job_map.pop(poller_jid)
            # If controller has also failed
#            for job in unassigned_jobs:
#                self.job_pool.append(job)
            self.roster.Unsubscribe(poller_jid)
            return 'success', [poller]
        except KeyError:
            return 'failure', ['Failed to remove poller %s' % poller]

# END RPC METHODS

    """ Used when inserting results supplied by add_result.
    Peforms evaluations, casts type, makes notifications and then stores into the database. """
    def insert_result(self, id, recorded, val, list_id=None):

        val_type = type(val).__name__
        
        try:
            evals = self.evals.get(int(id))
            if evals:
                for comparison, comp_val in evals:
                    if val_type == 'int' or val_type == 'float':
                        eval_statement = str(val) + str(comparison) + str(comp_val)
                    elif val_type == 'str':
                        eval_statement = str('\'' +  val + '\'') + str(comparison) + str('\'' + comp_val + '\'')
#                    print 'Eval statement: %s' % eval_statement
                    result = eval(eval_statement)
#                    print 'Eval result: %s' % result
                if result != True:
                    message = 'Job %s has caused an error! The value %s failed an evaluation.' % (id, comp_val)
                    self.log.error(message)
                    if id not in self.failed_jobs:
                        self.log.info('Sending notifications')
                        self.notifier.send_email(message)
                        self.notifier.send_sms(message)
                        self.failed_jobs.append(id)
                else:
                    if id in self.failed_jobs:
                        message = 'Job %s is back within normal parameters' % id
                        self.notifier.send_email(message)
                        self.log.info(message)
                        self.failed_jobs.remove(id)
                        
        except:
#            traceback.print_exc()
            self.temp_messages = ['Failed to evaluate returned result']
            return 'failure'
            
        if val_type == 'int':        
            if list_id != None:
                self.results.insert().execute(job=id, int=val, recorded=recorded, list=list_id)
            else:
                self.results.insert().execute(job=id, int=val, recorded=recorded)
        elif val_type == 'str':
            if list_id != None:
                self.results.insert().execute(job=id, string=val, recorded=recorded, list=list_id)
            else:
                self.results.insert().execute(job=id, string=val, recorded=recorded)
        elif val_type == 'float':
            if list_item != None:
                self.results.insert().execute(job=id, float=val, recorded=recorded, list=list_id)
            else:
                self.results.insert().execute(job=id, float=val, recorded=recorded)
        elif val_type == 'list':
            self.results.insert().execute(job=id, recorded=recorded, list=0)
            where = and_(self.results.c.recorded == recorded, self.results.c.list == 0)
            list_id = self.results.select(where).execute().fetchone().id
            #print "Retrieved list id %s" % list_id
            for element in val:
                self.insert_result(id, recorded, element, list_id)
        else:
            self.temp_messages = ['Unexpected data type receieved']
            return 'failure'
        
    """ Setup listener """
    def step_on(self):
        try:
            self.conn.Process(1)
        except KeyboardInterrupt:
            server = 'quae.co.uk'
            features.unregister(self.conn, server)
            #print 'Unregistered from %s' % server
            return 0
        return 1

    def go_on(self):
        while self.step_on(): pass
Пример #24
0
    async def drop_message_impl(self, source_message, drop_channel):
        '''
        handles copying to dropbox, sending confirm message in channel, sending dm receipt, and deleting original for each message in any dropbox
        '''
        guild_id = source_message.channel.guild.id
        source_channel_id = source_message.channel.id
        source_message_id = source_message.id

        # get the ORM row for this dropbox.
        drop = None
        if source_channel_id in self.dropboxes[guild_id]:
            drop = self.dropboxes[guild_id][source_channel_id]
        else:
            # should only return one entry because of how rows are added
            drop = DropboxChannel.select().where(
                DropboxChannel.serverid == guild_id
                and DropboxChannel.sourcechannelid == source_channel_id)

        # the embed to display who was the author in dropbox channel
        embed = Embed(timestamp=source_message.created_at, color=0x663399)
        embed.set_author(
            name=f"{source_message.author} ({source_message.author.id})",
            icon_url=source_message.author.avatar_url_as(size=32))
        embed.add_field(name="Author link",
                        value=source_message.author.mention)
        ctx = await self.bot.get_context(source_message)

        pages = Utils.paginate(source_message.content)
        page_count = len(pages)

        if source_message.author.dm_channel is None:
            await source_message.author.create_dm()
        dm_channel = source_message.author.dm_channel

        attachment_names = []
        delivery_success = None

        try:
            # send embed and message to dropbox channel
            for attachment in source_message.attachments:
                try:
                    buffer = io.BytesIO()
                    await attachment.save(buffer)
                    await drop_channel.send(
                        file=discord.File(buffer, attachment.filename))
                    attachment_names.append(attachment.filename)
                except Exception as attach_e:
                    await drop_channel.send(
                        Lang.get_locale_string(
                            'dropbox/attachment_fail',
                            ctx,
                            author=source_message.author.mention))

            if len(pages) == 0:
                # means no text content included
                if len(attachment_names) < 1:
                    # if there isn't any attachments, the dropbox might end up having a floating embed so include a helpful message too
                    last_drop_message = await drop_channel.send(
                        embed=embed,
                        content=Lang.get_locale_string('dropbox/msg_blank',
                                                       ctx))
                else:
                    last_drop_message = await drop_channel.send(embed=embed)
            else:
                # deliver all the pages of text content
                for i, page in enumerate(pages[:-1]):
                    if len(pages) > 1:
                        page = f"**{i+1} of {page_count}**\n{page}"
                    await drop_channel.send(page)
                last_page = pages[
                    -1] if page_count == 1 else f"**{page_count} of {page_count}**\n{pages[-1]}"
                last_drop_message = await drop_channel.send(embed=embed,
                                                            content=last_page)

            # TODO: try/ignore: add reaction for "claim" "flag" "followup" "delete"
            msg = Lang.get_locale_string('dropbox/msg_delivered',
                                         ctx,
                                         author=source_message.author.mention)
            await ctx.send(msg)
            delivery_success = True
        except Exception as e:
            msg = Lang.get_locale_string('dropbox/msg_not_delivered',
                                         ctx,
                                         author=source_message.author.mention)
            await ctx.send(msg)
            await self.bot.guild_log(guild_id,
                                     "broken dropbox...? Call alex, I guess")
            await Utils.handle_exception("dropbox delivery failure", self.bot,
                                         e)
            delivery_success = False

        try:
            # delete original message, the confirmation of sending is deleted in clean_channels loop
            await source_message.delete()
            del self.drop_messages[guild_id][source_channel_id][
                source_message_id]
            set(self.delivery_in_progress[guild_id][source_channel_id]).remove(
                source_message_id)
        except discord.errors.NotFound as e:
            # ignore missing message
            pass

        #give senders a moment before spam pinging them the copy
        await asyncio.sleep(1)

        try:
            # try sending dm receipts and report in dropbox channel if it was sent or not
            if drop and drop.sendreceipt:
                # get the locale versions of the messages for status, receipt header, and attachments ready to be sent
                status_msg = Lang.get_locale_string(
                    'dropbox/msg_delivered'
                    if delivery_success else 'dropbox/msg_not_delivered',
                    ctx,
                    author="")
                receipt_msg_header = Lang.get_locale_string(
                    'dropbox/msg_receipt', ctx, channel=ctx.channel.mention)
                if len(attachment_names) == 0:
                    attachment_msg = ""
                else:
                    attachment_msg_key = 'dropbox/receipt_attachment_plural' if len(
                        attachment_names
                    ) > 1 else 'dropbox/receipt_attachment_singular'
                    attachment_msg = Lang.get_locale_string(
                        attachment_msg_key,
                        ctx,
                        number=len(attachment_names),
                        attachments=", ".join(attachment_names))
                # might as well try to stuff in as few pages as possible
                dm_header_pages = Utils.paginate(
                    f"{status_msg}\n{receipt_msg_header}\n{attachment_msg}")

                for page in dm_header_pages:
                    await dm_channel.send(page)

                if len(pages) == 0:
                    # no text content
                    if len(attachment_names) < 1:
                        #if no text and no attachments, then send a response that there wasn't any text content
                        await dm_channel.send(content=Lang.get_locale_string(
                            'dropbox/msg_blank', ctx))
                else:
                    # send the page(s) in code blocks to dm.
                    for i, page in enumerate(pages[:-1]):
                        if len(pages) > 1:
                            page = f"**{i+1} of {page_count}**\n```{page}```"
                        await dm_channel.send(page)

                    last_page = f'```{pages[-1]}```' if page_count == 1 else f"**{page_count} of {page_count}**\n```{pages[-1]}```"
                    await dm_channel.send(last_page)
                if delivery_success:
                    embed.add_field(name="receipt status", value="sent")
                    # this is used if drop first before dms to add status to embed
                    await last_drop_message.edit(embed=embed)
        except Exception as e:
            Logging.info(
                "Dropbox DM receipt failed, not an issue so ignoring exception and giving up"
            )
            if drop.sendreceipt and delivery_success:
                embed.add_field(name="receipt status", value="failed")
                # this is used if drop first before dms to add status to embed
                await last_drop_message.edit(embed=embed)
Пример #25
0
def init_public_ip():
    set_config('.', 'public_ip', get_public_ip())
    Logging.info('Initialized config : [public_ip]')
Пример #26
0
def version():
    ''' Show version and exit. '''
    from . import __version__
    Logging.info(__version__)
Пример #27
0
def before_send(event, hint):
    if event['level'] == "error" and 'logger' in event.keys(
    ) and event['logger'] == 'gearbot':
        return None  # we send errors manually, in a much cleaner way
    if 'exc_info' in hint:
        exc_type, exc_value, tb = hint['exc_info']
        for t in [ConnectionClosed, ClientOSError, ServerDisconnectedError]:
            if isinstance(exc_value, t):
                return
    return event


if __name__ == '__main__':
    Logging.init()
    Logging.info("Launching thatskybot!")

    dsn = Configuration.get_var('SENTRY_DSN', '')
    if dsn != '':
        sentry_sdk.init(dsn, before_send=before_send)

    Database.init()

    loop = asyncio.get_event_loop()

    skybot = Skybot(command_prefix=Configuration.get_var("bot_prefix"),
                    case_insensitive=True,
                    loop=loop)
    skybot.remove_command("help")

    Utils.BOT = skybot
Пример #28
0
class Controller:
    
    """ Controller object. Initializes various data structures used by object. Establishes connection with XMPP server,
    connects to poller, aggregator and logging Multi-User Chats and registers stanza handlers. """
    def __init__(self):

        self.db = Database()

        entity_prefix = 'controller'

        conn = Connection(entity_prefix, static=True)
        self.entity_name, self.entity_suffix = conn.get_entity_name()
        
        # List of nodes known to the controller
        self.poller_map = {}
        self.poller_pool = {}
        
        self.job_map = {}
        self.job_pool = []

        # Message scheduler
        self.sched = MessageScheduler(self.message_handler)
        
        self.log = Logging(conn)
        conn.join_muc('pollers')
        conn.join_muc('aggregators')

        self.parser = Parser()
        
        self.establish_jobs()
        
        self.conn = conn.get_conn()

        self.conn.RegisterHandler('iq',self.result_handler,'result')
        self.conn.RegisterHandler('iq',self.set_handler,'set')
        self.conn.RegisterHandler('presence',self.presence_handler)

        self.go_on()
        
    """ Called by the presence handler when an entity connects to the aggregator or poller MUCs.
    Used to inspect retreive service information from an entity, required in the XEP Jabber-RPC standard. """
    def disco_lookup(self, recipient):
        self.log.info('Performing discovery lookup.')
        message = Iq('get', queryNS=NS_DISCO_INFO, to=recipient)
        self.sched.add_message(message, self.disco_handler)
        
    """ Method passed and used as handler for messages by MessageScheduler.
    Sends messages and logs an error if message send is a retry """
    # Handler used by message scheduling class
    def message_handler(self, message, retry=False):
#        print 'Sending message.'
        if retry == True:
            self.log.error('Message timed out, resending.')
        self.conn.send(message)
#
#   MESSAGE HANDLERS
#
    """ Handler for presence stanzas recieved by the XMPP listener.
    If 
    """
    def presence_handler(self, conn, presence_node):
        sender = presence_node.getFrom()
        presence_type = presence_node.getAttr('type')
        # Ignore self and presence announcements from logging MUC
        if sender.getResource() != 'controller':
            if presence_type == 'unavailable':
                if sender.getNode() == 'aggregators' or sender.getNode() == 'pollers':
                    self.remove_entity(sender.getNode(), sender)
            elif sender.getNode() == 'aggregators' or sender.getNode() == 'pollers':
                # Check the service discovery details for a connecting node.
                self.disco_lookup(sender)
        raise NodeProcessed
        
    """ IQ set handler, runs RPC methods in whitelist """
    def set_handler(self, conn, iq_node):
        query_node = iq_node.getQueryChildren()
        for node in query_node:
            try:
                method = node.getTagData('methodName')
                method_whitelist = ['get_group', 'get_groups', 'create_group', 'update_group', 'remove_group',
                'get_monitor', 'get_monitors', 'create_monitor', 'update_monitor', 'remove_monitor', 'get_monitors_by_gid',
                'get_job', 'get_jobs', 'create_job', 'update_job', 'remove_job',
                'get_evaluation', 'get_evaluations', 'create_evaluation', 'update_evaluation', 'remove_evaluation',
                'get_results', 'get_results_day', 'get_results_week', 'get_results_hour',
                'poller_failure',
                'get_aggregator']
                if method in method_whitelist:
                    method = getattr(self, method)
                    try:
                        try:
                            params = node.getTag('params').getChildren()
                            args = self.parser.get_args(params, iq_node.getFrom())
                        except AttributeError:
                            args = []
                            
                        status, parameters  = apply(method, args)
                        message = self.parser.rpc_response(iq_node.getFrom(), iq_node.getID(), status, parameters)
                        self.conn.send(message)
                    except TypeError:
#                        print sys.exc_info()
                        conn.send(iq_node.buildReply('error'))
                else:
                    conn.send(iq_node.buildReply('error'))
                    self.log.error('Method not in whitelist')
            except AttributeError:
                traceback.print_exc()
                conn.send(iq_node.buildReply('error'))
        raise NodeProcessed

    def result_handler(self, conn, iq_node):
        # Check if the reponse is managed by scheduler
        if self.sched.is_managed(int(iq_node.getAttr('id'))):
            self.sched.received_response(iq_node)
        raise NodeProcessed
#   END MESSAGE HANDLERS

#
# BEGIN SCHEDULER RESPONSE HANDLERS
#
    def disco_handler(self, sender, query_node):
        if query_node.getNamespace() == NS_DISCO_INFO:
            entity_type = query_node.getTagAttr('identity', 'type')
            if entity_type == 'aggregator' or entity_type == 'poller':
                adjusted_jid = JID(sender.getResource() + '@quae.co.uk/skynet')
                category = query_node.getTagAttr('identity', 'category')
                self.log.info('Registering node %s' % adjusted_jid)
                self.add_entity(entity_type, category, adjusted_jid)
        else:
            self.log.error('Receieved iq message with incorrect namespace')

    def assign_job(self, sender, query_node):
        if query_node.getNamespace() == NS_RPC:
            poller, job_id = self.parser.get_args_no_sender(query_node.getTag('methodResponse').getTag('params').getChildren())
            poller_jid = JID(poller)
            job_id = int(job_id)
            job = None
            for i in range(len(self.job_pool)):
                if self.job_pool[i]['id'] == job_id:
                    job = self.job_pool.pop(i)
                    self.log.info('Removing job %s from the job pool' % job_id)
                    break
            if job != None:
                self.job_map[poller_jid].append(job)
                self.log.info('Job %s successfully assigned to %s' % (job_id, poller_jid))
        else:
            self.log.error('Receieved iq message with incorrect namespace')
            
    def poller_removed(self, sender, query_node):
        if query_node.getNamespace() == NS_RPC:
            args = self.parser.get_args_no_sender(query_node.getTag('methodResponse').getTag('params').getChildren())
            adjusted_jid = JID(args[0])
            unassigned_jobs = self.job_map.pop(adjusted_jid)
            for job in unassigned_jobs:
                self.log.info('Adding job %s to the job pool' % job['id'])
                self.job_pool.append(job)
            parent_poller = None
            for aggregator, pollers in self.poller_map.items():
                for poller, segment in pollers:
                    if poller == adjusted_jid:
                        parent_aggregator = aggregator
                        pollers.remove((poller, segment))
                        break
            self.log.info('Removed %s from %s' % (adjusted_jid, parent_aggregator))
            self.assign_pooled_jobs()            
        else:
            self.log.error('Receieved iq message with incorrect namespace')
# END SCHEDULER HANDLERS

#
#   BEGIN RPC METHODS
#
# Requested by an aggregator when an assigned poller has failed/disconnected.
    def poller_failure(self, sender, previous_poller):
        pollers = self.poller_map[JID(sender)]
        poller_jid = JID('[email protected]/' + JID(previous_poller).getNode())
        try:
            pollers.remove(poller_jid)
            message = 'Removed failed poller'
            try:
                self.rebalance_pollers()
            except:
                print sys.exc_info()
            return 'success', [message]
        except:
            return 'failure', ['Failed to remove poller']

# Group operations
    def get_group(self, sender, name):
        group = self.db.get_group_by_name(name)
        if group != None:
            return 'success', [group]
        return 'failure', ['No such group exists']
    
    def get_groups(self, sender):
        groups = self.db.get_groups()
        if groups != None:
            return 'success', [groups]
        return 'failure', ['Failed to retreieve groups']

    def create_group(self, sender, name, desc):
        existing = self.db.get_group_by_name(name)
        if existing == None:
            self.db.create_group(name, desc)
            return 'success', ['Sucessfully created group %s' % name]
        return 'failure', ['failure']
    
    def update_group(self, sender, id, name, desc):
        group = self.db.get_group_by_id(id)
        if group != None:
            self.db.update_group(id, name, desc)
            return 'success', ['Successfully update group %s' % name]
        return 'failure', ['Failed to update group']
        
    def remove_group(self, sender, name):
        self.db.remove_group_by_name(name)
        if self.db.get_group_by_name(name) == None:
            return 'success', ['Successfully remove group %s' % name]
        else:
            return 'failure', ['Failed to remove group %s' % name]
# Monitor operations

    def get_monitor(self, sender, name):
        try:
            monitor = self.db.get_monitor(name)
            if monitor != False:
                return 'success', [monitor]
        except TypeError:
            return 'failure', ['No such monitor exists']
        return 'failure', ['No such monitor exists']
        
    def get_monitors(self, sender, group=None):
        try:
            if group != None:
                monitors = self.db.get_monitors(group)
            else:
                monitors = self.db.get_monitors()
            return 'success', [monitors]
        except AttributeError:
            return 'failure', ['Failed to retrieve monitors']
            
    def get_monitors_by_gid(self, sender, group_id):
        try:
            if group_id != None:
                monitors = self.db.get_monitors_by_gid(group_id)
                return 'success', [monitors]
        except AttributeError:
            pass
        return 'failure', ['Failed to retrieve monitors']
    
    def create_monitor(self, sender, name, description, group):
        if self.db.create_monitor(name, description, group) == True:
            return 'success', ['Successfully create monitor %s' % name]
        return 'failure', ['Failed to create monitor']
        
    def update_monitor(self, sender, name, description, group):
        group = self.db.get_group_by_id(id)
        if group != None:
            self.db.update_group(id, name, desc)
            return 'success', ['Successfully update monitor %s' % name]
        return 'failure', ['Failed to update monitor']
        
    def remove_monitor(self, sender, name):
        self.db.remove_monitor_by_name(name)
        if self.db.get_monitor_by_name(name) == None:
            return 'success', ['Successfully removed monitor %s' % name]
        else:
            return 'failure', ['Failed to remove monitor %s' % name]
        
# job operations

    def get_job(self, sender, mon, id):
        try:
            job = self.db.get_job(id, mon)
            return 'success', [job]
        except TypeError:
            return 'failure', ['No such job exists']
            
    def get_jobs(self, sender, mon):
        try:
            jobs = self.db.get_jobs(mon)
            return 'success', [jobs]
        except AttributeError:
            return 'failure', ['Failed to retreieve jobs']

    def create_job(self, sender, mon, address, protocol, frequency, interface, resource):
        if self.db.get_monitor(mon) != None:
            if self.db.create_job(address, protocol, frequency, interface, resource, mon) == True:
                return 'success', ['Successfully created a job for %s' % mon]
        return 'failure', ['Failed to create job']

    def update_job(self, sender, mon, id, address, protocol, frequency, interface, resource):
        existing = self.db.get_job(id, mon)
        if existing != None:
            self.db.update_job(id, address, protocol, frequency, interface, resource)
            return 'success', ['Successfully updated job']
        else:
            return 'failure', ['failure']
            
    def remove_job(self, sender, mon, id):
        self.db.remove_job(id)
        if self.db.get_job(id) == None:
            return 'success', ['Successfully removed job']
        else:
            return 'failure', ['failure']
        
    # Result read operations
    
    def get_results(self, sender, monitor, job, start_datetime, end_datetime):
        results = self.db.get_results(monitor, job, start_datetime, end_datetime)
        if results == None:
            return 'failure', ['No such results exist']
        elif results != False:
            return 'success', results
        return 'failure', ['Failed to retreive specificied results']
        
    def get_results_day(self, sender, monitor, job, start_datetime):
        results = self.db.get_results_day(monitor, job, start_datetime)
        if results == []:
            return 'failure', ['No such results exist']
        elif results != False:
            job_details = self.db.get_job(job, monitor)
            return 'success', [job_details, results]
        return 'failure', ['Failed to retreive specificied results']
        
    def get_results_week(self, sender, monitor, job, start_datetime):
        pass
    
    def get_results_hour(self, sender, monitor, job, start_datetime):
        results = self.db.get_results_hour(monitor, job, start_datetime)
        if results == []:
            return 'failure', ['No such results exist']
        elif results != False:
            job_details = self.db.get_job(job, monitor)
            return 'success', [job_details, results]
        return 'failure', ['Failed to retreive specificied results']
#   END RPC METHODS

#
#   BEGIN PRIVATE METHODS
#
    """ Called on successful DISCO request.
    Will register Poller or Aggregator, send jobs or balance jobs. """
    def add_entity(self, entity_type, segment, entity):
        if entity_type == 'aggregator':
            self.poller_map[entity] = []
            # If pollers have been added, but there were no aggregators running
            self.assign_pooled_pollers()
            if len(self.poller_map) > 1:
                self.rebalance_pollers()
            self.assign_pooled_jobs()
                
        elif entity_type == 'poller':
            self.job_map[entity] = []
            
            self.poller_pool[entity] = segment
            self.assign_pooled_pollers()
            
            if len(self.poller_map) > 1:
                self.rebalance_pollers()
            
            self.assign_pooled_jobs()
            
            if len(self.job_map) > 1:
                self.rebalance_jobs()
            # Give poller to appropriate aggregator
            print 'Added %s to %ss' % (entity, entity_type)
        self.log.info('Node %s was successfully registered with the controller' % entity)

    """ Removing Poller or Aggregator """
    def remove_entity(self, entity_type, entity):
        try:
            if entity_type == 'aggregators':
                adjusted_jid = JID(JID(entity).getResource() + '@quae.co.uk/skynet')
                unassigned_pollers = self.poller_map.pop(adjusted_jid)
                for poller, segment in unassigned_pollers:
                    self.poller_pool[poller] = segment
                self.log.info('Removed %s' % adjusted_jid)
                if len(self.poller_pool) > 0:
                    # Try and assign pooled pollers
                    if not self.assign_pooled_pollers():
                        for poller, segment in unassigned_pollers:
                            message = self.parser.rpc_call(poller, 'aggregator_failure', [])
                            self.sched.add_message(message)
                    
            elif entity_type == 'pollers':
                adjusted_jid = JID(JID(entity).getResource() + '@quae.co.uk/skynet')
                if len(self.poller_map) > 0:
                    parent_aggregator = None
                    for aggregator, pollers in self.poller_map.items():
                        for poller, segment in pollers:
                            if adjusted_jid == poller:
                                parent_aggregator = aggregator
                                break
                    if parent_aggregator != None:
                        remove_call = self.parser.rpc_call(parent_aggregator, 'remove_poller', [str(adjusted_jid)])
                        self.sched.add_message(remove_call, self.poller_removed)
                else:
                    try:
                        self.job_map.pop(adjusted_jid)
                        self.poller_pool.pop(adjusted_jid)
                        self.log.info('Poller not assigned, sucessfully removed')
                    except:
                        self.log.error('Failed to remove poller')
                        traceback.print_exc()
        except ValueError:
            self.log.error('Failed to remove %s' % entity)

    """ Assign unassigned pollers """
    def assign_pooled_pollers(self):
        if len(self.poller_map) > 0:
            while len(self.poller_pool) > 0:
                unassigned_poller, segment = self.poller_pool.popitem()
                chosen_aggregator = None
                poller_comp = None
                for aggregator, pollers in self.poller_map.items():
                    # If first loop or number of pollers assigned to aggregator is less than comp, make this agg the comp
                    if (chosen_aggregator == None and poller_comp == None) or len(pollers) < poller_comp:
                        chosen_aggregator = aggregator
                        poller_comp = len(pollers)
                if chosen_aggregator != None:
                    # Assign Poller to the Aggregtor with least assigned Pollers
                    
                    message = self.parser.rpc_call(chosen_aggregator, 'add_poller', [str(unassigned_poller)])
                    self.sched.add_message(message)
                    
                    for job in self.job_map[unassigned_poller]:
                        message = self.parser.rpc_call(chosen_aggregator, 'move_job', [str(unassigned_poller), job['id'], job['address'], job['protocol'], job['frequency'], job['interface'], job['resource'], job['segment']])
                        self.sched.add_message(message)
                    self.poller_map[chosen_aggregator].append((unassigned_poller, segment))
            return True
        else:
            self.log.info('No aggregators available for poller assignment')
            return False
    
    """ Get Pollers for a given network segment """
    def get_segment_pollers(self, segment):
        segment_pollers = {}
        for aggregator, pollers in self.poller_map.items():
            for poller, poller_segment in pollers:
                if poller_segment == segment:
                    segment_pollers[poller] = self.job_map[poller]
        return segment_pollers
        
    """ Called to allocate unassigned jobs """
    def assign_pooled_jobs(self):
        if len(self.job_map) > 0 and len(self.poller_map) > 0:
            for job in self.job_pool:
                unassigned_job = job
                least_loaded = None
                job_comp = None
                pollers = self.get_segment_pollers(unassigned_job['segment'])
                for poller, jobs in pollers.items():
#                for poller, jobs in self.job_map.items():
                    if (least_loaded == None and job_comp == None) or len(jobs) < job_comp:
                        least_loaded = poller
                        job_comp = len(jobs)
                if least_loaded != None:
                    chosen_aggregator = None
                    for aggregator, pollers in self.poller_map.items():
                        for poller, segment in pollers:
                            if poller == least_loaded:
                                chosen_aggregator = aggregator
                                break
                    if chosen_aggregator != None:
                        self.send_job(unassigned_job, least_loaded, chosen_aggregator)
        else:
            self.log.info('No assigned pollers available for job assignment')
        #print 'Job map %s' % self.job_map
        #print 'Job pool %s' % self.job_pool
    
    """ Rebalance pollers, compares amount assigned to each Aggregator, and moves across to
    another Poller if there's at least 2 more than another Aggregator """
    def rebalance_pollers(self):
        self.log.info('Attempting to rebalance pollers')

        poller_comp = None
        least_pollers = None
        most_pollers = None
        # Retrieve aggregators with least and most pollers
        
        for aggregator, pollers in self.poller_map.items():
            if poller_comp == None:
                least_pollers = aggregator
                most_pollers = aggregator
            elif len(pollers) < poller_comp:
                least_pollers = aggregator
            elif len(pollers) > poller_comp:
                most_pollers = aggregator
            poller_comp = len(pollers)
        
        if least_pollers != None and most_pollers != None:
            # If the difference between the two pollers is worth balancing
            if (len(self.poller_map[most_pollers]) - len(self.poller_map[least_pollers])) > 1:
                poller, segment = self.poller_map[most_pollers].pop()
                self.poller_map[least_pollers].append((poller, segment))
                self.sched.add_message(self.parser.rpc_call(least_pollers, 'add_poller', [str(poller)]))
                self.sched.add_message(self.parser.rpc_call(most_pollers, 'remove_poller', [str(poller)]))
                self.rebalance_pollers()
            else:
                self.log.info('Pollers balanced')
                return True
               
    """ Similar to above, checks number of assigned jobs through the system, and will level them across all Pollers """ 
    def rebalance_jobs(self):
        self.log.info('Attempting to rebalance jobs')
        job_comp = None
        least_jobs = None
        most_jobs = None
        
        network_segment = 'skynet'
        pollers = self.get_segment_pollers(network_segment)
        
#        for poller, jobs in self.job_map.items():
        for poller, jobs in pollers.items():
            print poller
            if job_comp == None:
                least_jobs = poller
                most_jobs = poller
            elif len(jobs) < job_comp:
                least_jobs = poller
            elif len(jobs) > job_comp:
                most_jobs = poller
            job_comp = len(jobs)
            
        if least_jobs != None and most_jobs != None:
            if (len(self.job_map[most_jobs]) - len(self.job_map[least_jobs])) > 1:
                job = self.job_map[most_jobs].pop()
                self.job_map[least_jobs].append(job)
                least_parent = None
                most_parent = None
                for aggregator, pollers in self.poller_map.items():
                    for poller, node_segment in pollers:
                        if network_segment == node_segment:
                            if poller == least_jobs:
                                least_parent = aggregator
                            if poller == most_jobs:
                                most_parent = aggregator
                            if least_parent != None and most_parent != None:
                                break
                    
                if least_parent != None and most_parent != None:
                    self.log.info('Moving job %s to %s' % (job['id'], least_jobs))
                    self.sched.add_message(self.parser.rpc_call(most_parent, 'remove_job', [job['id']]))
                    self.sched.add_message(self.parser.rpc_call(least_parent, 'run_job', [str(least_jobs), job['id'], job['address'], job['protocol'], job['frequency'], job['interface'], job['resource']]), offset=True)
                self.rebalance_jobs()
            else:
                self.log.info('Jobs balanced')
                return True
                
    """ Retrieve jobs on startup """    
    def establish_jobs(self):
        monitors = self.db.get_monitors()
        self.log.info('Retrieving jobs')
        for monitor in monitors:
            jobs = self.db.get_jobs(monitor.name)
            for job in jobs:
                job = dict(job)
                segment_name = self.db.get_segment_name(job['segment'])
                job['segment'] = segment_name
                # Make the poll freq stored every minute minimum
                job['frequency'] = job['frequency'] * 60
                self.job_pool.append(job)
        self.log.info('%s jobs added to the pool' % len(self.job_pool))
        
    """ Send job to Aggregator to forward to Poller """
    def send_job(self, job, poller, aggregator):
        message = self.parser.rpc_call(aggregator, 'run_job', [str(poller), job['id'], job['address'], job['protocol'], job['frequency'], job['interface'], job['resource']])
        self.log.info('Sending job %s to %s' % (job['id'], aggregator))
        self.sched.add_message(message, self.assign_job, offset=True)
        
    def step_on(self):
        try:
            self.conn.Process(1)
        except KeyboardInterrupt: return 0
        return 1

    def go_on(self):
        while self.step_on(): pass
Пример #29
0
def before_send(event, hint):
    if event['level'] == "error" and 'logger' in event.keys(
    ) and event['logger'] == 'gearbot':
        return None  # we send errors manually, in a much cleaner way
    if 'exc_info' in hint:
        exc_type, exc_value, tb = hint['exc_info']
        for t in [ConnectionClosed, ClientOSError, ServerDisconnectedError]:
            if isinstance(exc_value, t):
                return
    return event


if __name__ == '__main__':
    Logging.init()
    Logging.info("Launching journeybot!")

    dsn = Configuration.get_var('SENTRY_DSN', '')
    env = Configuration.get_var('SENTRY_ENV', '')
    if dsn != '' and env != '':
        sentry_sdk.init(dsn, before_send=before_send, environment=env)

    Database.init()

    loop = asyncio.get_event_loop()

    journeybot = Journeybot(command_prefix=Configuration.get_var("bot_prefix"),
                            case_insensitive=True,
                            loop=loop)
    journeybot.remove_command("help")
Пример #30
0
    async def on_message(self, message: discord.Message):
        if message.author.bot or not hasattr(message.author, "guild"):
            return

        welcome_channel = self.bot.get_config_channel(message.guild.id,
                                                      Utils.welcome_channel)
        rules_channel = self.bot.get_config_channel(message.guild.id,
                                                    Utils.rules_channel)
        log_channel = self.bot.get_config_channel(message.guild.id,
                                                  Utils.log_channel)
        member_role = message.guild.get_role(
            Configuration.get_var("member_role"))
        nonmember_role = message.guild.get_role(
            Configuration.get_var("nonmember_role"))

        if message.author.id == 349977940198555660:  # is gearbot
            pattern = re.compile(
                r'\(``(\d+)``\) has re-joined the server before their mute expired'
            )
            match = re.search(pattern, message.content)
            if match:
                user_id = int(match[1])
                # gearbot is handling it. never unmute this user
                self.remove_member_from_cooldown(message.guild.id, user_id)
                muted_member = message.guild.get_member(user_id)
                muted_member_name = Utils.get_member_log_name(muted_member)
                await log_channel.send(f'''
                    Gearbot re-applied mute when member re-joined: {muted_member_name}
                    I won't try to unmute them later.
                    ''')
                return

        if message.author.guild_permissions.mute_members or \
                await self.member_verify_action(message) or \
                member_role in message.author.roles:
            # is a mod or
            # verification flow triggered. no further processing or
            # message from regular member. no action for welcomer to take.
            return

        # ignore when channels not configured
        if not welcome_channel or not rules_channel or message.channel.id != welcome_channel.id:
            if member_role not in message.author.roles:
                # nonmember speaking somewhere other than welcome channel? Maybe we're not using the
                # welcome channel anymore? or something else went wrong... give them member role.
                try:
                    await message.author.add_roles(member_role)
                    if nonmember_role in message.author.roles:
                        Logging.info(
                            f"{Utils.get_member_log_name(message.author)} - had shadow role when speaking. removing it!"
                        )
                        await message.author.remove_roles(nonmember_role)
                except Exception as e:
                    try:
                        Logging.info(f"message: {message.content}")
                        Logging.info(f"author id: {message.author.id}")
                    except Exception as ee:
                        pass
                    await Utils.handle_exception("member join exception",
                                                 self.bot, e)
            return

        # Only act on messages in welcome channel from here on
        # Nonmember will only be warned once every 10 minutes that they are speaking in welcome channel
        now = datetime.now().timestamp()
        then = 0
        grace_period = 10 * 60  # 10 minutes

        try:
            was_welcomed = self.welcome_talkers[message.guild.id][
                message.author.id]
            then = was_welcomed + grace_period
        except Exception as ex:
            # TODO: refine exception. KeyError only?
            pass

        if then > now:
            # grace period has not expired. Do not warn member again yet.
            # print("it hasn't been 10 minutes...")
            return

        # record the time so member won't be pinged again too soon if they keep talking
        self.welcome_talkers[message.guild.id][message.author.id] = now
        await welcome_channel.send(
            Lang.get_string("welcome/welcome_help",
                            author=message.author.mention,
                            rules_channel=rules_channel.mention))
        # ping log channel with detail
        if log_channel:
            await log_channel.send(
                f"{Utils.get_member_log_name(message.author)} "
                f"spoke in {welcome_channel.mention} ```{message.content}```")
Пример #31
0
    return event


def can_help(ctx):
    return ctx.author.guild_permissions.mute_members


def can_admin():
    async def predicate(ctx):
        return await ctx.bot.permission_manage_bot(ctx)
    return commands.check(predicate)


if __name__ == '__main__':
    Logging.init()
    Logging.info("Launching Skybot!")

    dsn = Configuration.get_var('SENTRY_DSN', '')
    dsn_env = Configuration.get_var('SENTRY_ENV', 'Dev')
    Logging.info(f"DSN info - dsn:{dsn} env:{dsn_env}")
    if dsn != '':
        sentry_sdk.init(dsn, before_send=before_send, environment=dsn_env, integrations=[AioHttpIntegration()])

    # TODO: exception handling for db migration error
    run_db_migrations()
    Logging.info('dg migrations go')
    Database.init()
    Logging.info('db init go')

    intents = Intents(members=True, messages=True, guilds=True, bans=True, emojis=True, presences=True, reactions=True)
    loop = asyncio.get_event_loop()
Пример #32
0
        dest="size",
        type=size_limit,
        default=20000,
        help=
        "enter a value from 10 to 1000 (default = 20, less than 20kB will not be downloaded)",
    )
    parser.add_argument(
        "-e",
        dest="ext",
        metavar="exclude",
        default=False,
        help="exclude image type/extension, i.e., exclude gif, jpg, webp, etc.",
    )
    parser.add_argument("-j",
                        dest="hash",
                        action="store_true",
                        help="create json record of hashed image files")

    args = parser.parse_args()

    # remove dot from extension if present
    if args.ext:
        args.ext = args.ext.replace(".", "")

        # account for variation in jpg extension format
        if args.ext in ("jpg", ".jpg"):
            args.ext = "jpeg"

    log.info(f"{'Initiating connection':>15}")
    main(args.url, args.size, args.ext, args.hash)
Пример #33
0
class Poller:
    """ Poller object, establish connection, MUCs and handlers """
    def __init__(self, segment='skynet'):
        config = Configuration()

        self.jobs = {}

        self.sched = MessageScheduler(self.message_handler)
        self.parser = Parser()
        
        self.aggregator = None
        self.failed_aggregator = False
        
        self.query_queue = []
        conn = Connection('poller', 'roflcake')        
        self.entity_prefix, entity_suffix = conn.get_entity_name()
        
#        self.entity_name = entity_prefix + entity_suffix
        conn.join_muc('pollers')
        
        self.segment = segment
    
        self.conn = conn.get_conn()
        
        self.log = Logging(conn)
        self.roster = self.conn.getRoster()
        
        self.conn.RegisterHandler('presence',self.presence_handler)
        
        self.conn.RegisterHandler('iq', self.result_handler, 'result')
        self.conn.RegisterHandler('iq', self.error_handler, 'error')
        self.conn.RegisterHandler('iq', self.get_handler, 'get')
        self.conn.RegisterHandler('iq', self.set_handler, 'set')
        
        self.go_on()
        
    """ Message scheduling handler """
    def message_handler(self, message, retry=False):
        if retry == True:
            self.log.error('Timed out, attempting to resend.')
        self.conn.send(message)
#
#  Handlers for node communication
# 
    """ Presence stanza handler """
    def presence_handler(self, conn, presence_node):
        sender = presence_node.getFrom()
        if presence_node.getAttr('type') == 'subscribe':
            self.roster.Authorize(sender)
#        if sender != self.entity_name:
#            print presence_node.getRole()
#            conn.send(Iq('get', NS_DISCO_INFO, to=presence_node.getFrom()))            
        raise NodeProcessed
    
    """ IQ result handler, acknowledges messages with scheduler """
    def result_handler(self, conn, iq_node):
        iq_id = iq_node.getAttr('id')
        sender = iq_node.getFrom()
        
        if sender.getNode() != self.entity_prefix:
#            pass
            self.sched.received_response(iq_node)
        raise NodeProcessed  # This stanza is fully processed
    
    """ IQ error handler """
    def error_handler(self, conn, iq_node):
        if iq_node.getFrom() == self.aggregator:
            pass
#            print 'Erk!'
        raise NodeProcessed
    
    """ IQ get handler """
    def get_handler(self, conn, iq_node):
        if iq_node.getQueryNS() == NS_DISCO_INFO:
            reply = iq_node.buildReply('result')
            if self.segment != None:
                category = self.segment
            else:
                category = 'skynet'
            identity = Node('identity', {'category':category, 'type':'poller'})
            reply.setQueryPayload([identity])
            conn.send(reply)
        else:
            conn.send(iq_node.buildReply('error'))
        raise NodeProcessed
        
    """ IQ set handler, used for RPC calls, permits methods in whitelist """
    def set_handler(self, conn, iq_node):
            sender = iq_node.getFrom()
            if sender.getNode() != self.entity_prefix:
                query_node = iq_node.getQueryChildren()
                for node in query_node:
                    try:
                        method = node.getTagData('methodName')
                        method_whitelist = ['run_job', 'set_aggregator', 'remove_job', 'aggregator_failure']
                        if method in method_whitelist:
                            method = getattr(self, method)
                            try:
                                try:
                                    params = node.getTag('params').getChildren()
                                    args = self.parser.get_args(params)
                                except AttributeError:
                                    args = []
                                status, parameters = apply(method, args)
                                message = self.parser.rpc_response(iq_node.getFrom(), iq_node.getID(), status, parameters)
                                conn.send(message)
                            except TypeError:
#                                print sys.exc_info()
                                conn.send(iq_node.buildReply('error'))
                        else:
                            #print 'Method not in whitelist'
                            #print sys.exc_info()
                            conn.send(iq_node.buildReply('error'))
                    except AttributeError:
                        #print sys.exc_info()
                        conn.send(iq_node.buildReply('error'))         
            raise NodeProcessed
    
    #
    # RPC METHODS (job setup and scheduling)
    #
    """ Called by Aggregator to establish job """
    def run_job(self, sender, aggregator, id, addr, proto, freq, dom, resource):
        try:
            job = Job(self, id, addr, proto, freq, dom, resource, self.conn, self.sched)
            self.jobs[id] = job
            job.start()
            return 'success', [int(id)]
        except:
            return 'failure', ['Failed to schedule job']
            
    """ Controller calls to notify of parent Aggregator failure """
    def aggregator_failure(self, sender):
        self.aggregator = None
        self.failed_aggregator = True
        return 'success', ['Removed parent aggregator']
            
    """ Called when job moved off this Poller """
    def remove_job(self, sender, job_id):
        try:
            job = self.jobs[int(job_id)]
            job.end()
            return 'success', ['Stopped job %s' % job_id]
        except:
            return 'failure', ['Failed to stop job %s' % job_id]
    
    """ Called by parent Aggregator, sets where add_results will be sent """
    def set_aggregator(self, sender, aggregator):
        self.log.info('Setting aggregator %s' % aggregator)
        self.aggregator = aggregator
        if self.failed_aggregator == True:
            self.failed_aggregator = False
            for job in self.jobs.values():
                job.send_cached_results()
        return 'success', ['Successfully set aggregator']
            
    #
    # PRIVATE METHODS
    #
    """ Provides parent Aggregator, used by Job instances """
    def get_aggregator(self):
        return self.aggregator

    """ Setup listener """
    def step_on(self):
        try:
            self.conn.Process(1)
        except KeyboardInterrupt:
            server = 'quae.co.uk'
            features.unregister(self.conn, server)
            # Stop all running jobs
            for job in self.jobs.values():
                job.end()
                
            self.sched.end()
            
            print 'Unregistered from %s' % server
            return 0
        return 1
    
    def go_on(self):
        while self.step_on(): pass