Exemplo n.º 1
0
    def __init__(self, segment='skynet'):
        config = Configuration()

        self.jobs = {}

        self.sched = MessageScheduler(self.message_handler)
        self.parser = Parser()
        
        self.aggregator = None
        self.failed_aggregator = False
        
        self.query_queue = []
        conn = Connection('poller', 'roflcake')        
        self.entity_prefix, entity_suffix = conn.get_entity_name()
        
#        self.entity_name = entity_prefix + entity_suffix
        conn.join_muc('pollers')
        
        self.segment = segment
    
        self.conn = conn.get_conn()
        
        self.log = Logging(conn)
        self.roster = self.conn.getRoster()
        
        self.conn.RegisterHandler('presence',self.presence_handler)
        
        self.conn.RegisterHandler('iq', self.result_handler, 'result')
        self.conn.RegisterHandler('iq', self.error_handler, 'error')
        self.conn.RegisterHandler('iq', self.get_handler, 'get')
        self.conn.RegisterHandler('iq', self.set_handler, 'set')
        
        self.go_on()
Exemplo n.º 2
0
    def __init__(self):

        self.db = Database()

        entity_prefix = 'controller'

        conn = Connection(entity_prefix, static=True)
        self.entity_name, self.entity_suffix = conn.get_entity_name()
        
        # List of nodes known to the controller
        self.poller_map = {}
        self.poller_pool = {}
        
        self.job_map = {}
        self.job_pool = []

        # Message scheduler
        self.sched = MessageScheduler(self.message_handler)
        
        self.log = Logging(conn)
        conn.join_muc('pollers')
        conn.join_muc('aggregators')

        self.parser = Parser()
        
        self.establish_jobs()
        
        self.conn = conn.get_conn()

        self.conn.RegisterHandler('iq',self.result_handler,'result')
        self.conn.RegisterHandler('iq',self.set_handler,'set')
        self.conn.RegisterHandler('presence',self.presence_handler)

        self.go_on()
Exemplo n.º 3
0
    def __init__(self):

        global log
        log = Logging(self.__class__.__name__).log

        clientInfo = clientinfo.ClientInfo()
        self.addonName = clientInfo.getAddonName()

        self.kodiversion = int(xbmc.getInfoLabel('System.BuildVersion')[:2])
    def __init__(self, embycursor):

        global log
        log = Logging(self.__class__.__name__).log

        self.embycursor = embycursor

        self.clientInfo = clientinfo.ClientInfo()
        self.addonName = self.clientInfo.getAddonName()
Exemplo n.º 5
0
    def __init__(self):

        global log
        log = Logging(self.__class__.__name__).log

        self.clientInfo = clientinfo.ClientInfo()
        self.addonName = self.clientInfo.getAddonName()
        self.doUtils = downloadutils.DownloadUtils()

        log("Kodi monitor started.", 1)
Exemplo n.º 6
0
    def __init__(self, item):

        global log
        log = Logging(self.__class__.__name__).log

        # item is the api response
        self.item = item

        self.clientinfo = clientinfo.ClientInfo()
        self.addonName = self.clientinfo.getAddonName()
Exemplo n.º 7
0
    def __init__(self, cursor):

        global log
        log = Logging(self.__class__.__name__).log
        
        self.cursor = cursor
        
        self.clientInfo = clientinfo.ClientInfo()
        self.addonName = self.clientInfo.getAddonName()
        self.artwork = artwork.Artwork()
Exemplo n.º 8
0
    def __init__(self, item):

        global log
        log = Logging(self.__class__.__name__).log

        self.item = item
        self.clientInfo = clientinfo.ClientInfo()
        self.addonName = self.clientInfo.getAddonName()

        self.userid = window('emby_currUser')
        self.server = window('emby_server%s' % self.userid)
Exemplo n.º 9
0
    async def on_ready(self):
        if self.loaded:
            Logging.info(f"{self.my_name} reconnect")
            return

        Logging.BOT_LOG_CHANNEL = self.get_channel(
            Configuration.get_var("log_channel"))
        Emoji.initialize(self)

        for cog in Configuration.get_var("cogs"):
            try:
                self.load_extension("cogs." + cog)
            except Exception as e:
                await Utils.handle_exception(f"Failed to load cog {cog}", self,
                                             e)
        Logging.info("Cogs loaded")
        self.db_keepalive = self.loop.create_task(self.keepDBalive())
        self.loaded = True

        await Logging.bot_log(f"{self.my_name} has started. Time to bot!")
Exemplo n.º 10
0
    def __init__(self):

        global log
        log = Logging(self.__class__.__name__).log

        self.clientInfo = clientinfo.ClientInfo()
        self.addonName = self.clientInfo.getAddonName()
        self.doUtils = downloadutils.DownloadUtils().downloadUrl

        self.userId = window('emby_currUser')
        self.server = window('emby_server%s' % self.userId)
Exemplo n.º 11
0
class BestEffortBroadcast(Broadcast):
    """This class implements a best effort broadcast abstraction.

    Uses:
        - PerfectLink

    """
    def __init__(self, link):
        super().__init__(link)
        self.logger = Logging(self.process_number, "BEB")

    def broadcast(self, callback_id, args=(), kwargs={}):
        self.logger.log_debug(f"Broadcasting {(args, kwargs)}")
        for peer in self.peers:
            self.send(peer, self.receive, args=(callback_id, args, kwargs))

    def receive(self, source_number, callback_id, args=(), kwargs={}):
        self.logger.log_debug(
            f"Receiving {(args, kwargs)} from {source_number}")
        self.callback(callback_id, args=args, kwargs=kwargs)
Exemplo n.º 12
0
    async def send_bug_info(self, *args):
        for channel_id in args:
            channel = self.bot.get_channel(channel_id)
            if channel is None:
                await Logging.bot_log(f"can't send bug info to nonexistent channel {channel_id}")
                continue

            bug_info_id = Configuration.get_persistent_var(f"{channel.guild.id}_{channel_id}_bug_message")

            ctx = None
            tries = 0
            while not ctx and tries < 5:
                tries += 1
                # this API call fails on startup because connection is not made yet.
                # TODO: properly wait for connection to be initialized
                try:
                    last_message = await channel.send('preparing bug reporting...')
                    ctx = await self.bot.get_context(last_message)

                    if bug_info_id is not None:
                        try:
                            message = await channel.fetch_message(bug_info_id)
                        except (NotFound, HTTPException):
                            pass
                        else:
                            await message.delete()
                            if message.id in self.bug_messages:
                                self.bug_messages.remove(message.id)

                    bugemoji = Emoji.get_emoji('BUG')
                    message = await channel.send(Lang.get_locale_string("bugs/bug_info", ctx, bug_emoji=bugemoji))
                    self.bug_messages.add(message.id)
                    await message.add_reaction(bugemoji)
                    Configuration.set_persistent_var(f"{channel.guild.id}_{channel_id}_bug_message", message.id)
                    Logging.info(f"Bug report message sent in channel #{channel.name} ({channel.id})")
                    await last_message.delete()
                except Exception as e:
                    await self.bot.guild_log(channel.guild.id, f'Having trouble sending bug message in {channel.mention}')
                    await Utils.handle_exception(
                        f"Bug report message failed to send in channel #{channel.name} ({channel.id})", self.bot, e)
                    await asyncio.sleep(0.5)
Exemplo n.º 13
0
    def __init__(self):

        global log
        log = Logging(self.__class__.__name__).log

        self.clientInfo = clientinfo.ClientInfo()
        self.addonName = self.clientInfo.getAddonName()

        self.userid = window('emby_currUser')
        self.server = window('emby_server%s' % self.userid)

        self.emby = embyserver.Read_EmbyServer()
Exemplo n.º 14
0
    def __init__(self):

        global log
        log = Logging(self.__class__.__name__).log

        self.__dict__ = self._shared_state
        self.addon = xbmcaddon.Addon()

        self.addonName = clientinfo.ClientInfo().getAddonName()
        self.doUtils = downloadutils.DownloadUtils()

        threading.Thread.__init__(self)
Exemplo n.º 15
0
def run_db_migrations():
    dbv = int(Configuration.get_persistent_var('db_version', 0))
    Logging.info(f"db version is {dbv}")
    dbv_list = [f for f in glob.glob("db_migrations/db_migrate_*.py")]
    dbv_pattern = re.compile(r'db_migrations/db_migrate_(\d+)\.py',
                             re.IGNORECASE)
    migration_count = 0
    for filename in sorted(dbv_list):
        # get the int version number from filename
        version = int(re.match(dbv_pattern, filename)[1])
        if version > dbv:
            try:
                Logging.info(
                    f"--- running db migration version number {version}")
                spec = importlib.util.spec_from_file_location(
                    f"migrator_{version}", filename)
                dbm = importlib.util.module_from_spec(spec)
                spec.loader.exec_module(dbm)
                Configuration.set_persistent_var('db_version', version)
                migration_count = migration_count + 1
            except Exception as e:
                # throw a fit if it doesn't work
                raise e
    Logging.info(
        f"--- {migration_count if migration_count else 'no'} db migration{'' if migration_count == 1 else 's'} run"
    )
Exemplo n.º 16
0
def get_locale_string(key, ctx='', **arg_dict):
    global LANG, locales_loaded
    locale = get_defaulted_locale(ctx)

    if not locale:
        return L_ERR

    if not locales_loaded:
        load_locales()

    output = []
    # locale is a list or tuple. may be a single item or multiple
    for item in locale:
        locale_lang = LANG[item]
        key_list = key.split("/")

        # Check that keys point to a valid path in base keys
        obj = LANG['keys']

        if get_by_path(
                obj, key_list[:-1]) is None or key_list[-1] not in get_by_path(
                    obj, key_list[:-1]):
            raise KeyError(f"Lang key is not in lang_keys: {key}")
        if get_by_path(obj, key_list) is not None:
            raise KeyError(f"Lang key is not terminal: {key}")

        obj = get_by_path(locale_lang, key_list)

        # keys were found. Now check locale for value:
        if isinstance(obj, str):
            try:
                output.append(obj.format(**arg_dict))
            except KeyError as e:
                output.append(obj)
        else:
            # Maybe string is not defined in lang file.
            Logging.info(
                f"localized lang string failed for key {key} in locale {item}")
            output.append(L_ERR)
    return '\n'.join(output)
Exemplo n.º 17
0
class LeaderElection(Subscriptable):
    """ This class implement a hierarchical leader election abstraction.

    Uses:
        - PerfectFailureDetector
        - HierarchicalConsensus

    A peer's rank is greater than another's iff its process number is strictly
    lower than the other's. All sbscribed methods are called whenever a new
    leader is elected.

    """
    def __init__(self, pfd, hco):
        super().__init__()
        self.process_number = hco.process_number
        
        self.pfd = pfd
        self.pfd.subscribe_abstraction(self, self.peer_failure)
        self.hco = hco
        self.hco.subscribe_abstraction(self, self.decided)

        self.peers = {self.process_number}
        self.detected = set()
        self.leader = None
        
        self.in_election = False

        self.logger = Logging(self.process_number, "LEL")

    def start(self):
        super().start()
        self.election()

    def add_peers(self, *peers):
        self.peers.update(peers)
        self.pfd.add_peers(*peers)
        self.hco.add_peers(*peers)

    def peer_failure(self, process_number):
        self.logger.log_debug(f"Peer {process_number} crashed")
        self.detected.add(process_number)
        self.election()

    def election(self):
        if not self.in_election:
            self.logger.log_debug(f"New election")
            self.in_election = True
            self.leader = None
            leader = min(self.peers - self.detected)
            self.hco.trigger_event(self.hco.propose, kwargs={"value": leader})
    
    def decided(self, value):
        self.in_election = False
        if value in (self.peers - self.detected):
            self.logger.log_debug(f"New leader {value}")
            self.leader = value
            self.call_callbacks(self.leader)
        else:
            self.election()
Exemplo n.º 18
0
 async def handle_reaction_change(self, t, reaction, user_id):
     roles = Configuration.get_var("roles")
     if reaction in roles:
         guild = self.bot.get_guild(Configuration.get_var("guild_id"))
         role = guild.get_role(roles[reaction])
         member_role = guild.get_role(Configuration.get_var("member_role"))
         nonmember_role = guild.get_role(
             Configuration.get_var("nonmember_role"))
         member = guild.get_member(user_id)
         action = getattr(member, f"{t}_roles")
         try:
             await action(role)
             # if acting on member role, toggle corresponding nonmember role
             if role is member_role:
                 if t == 'add':
                     await member.remove_roles(nonmember_role)
                 else:
                     await member.add_roles(nonmember_role)
         except Exception as ex:
             Logging.info("failed")
             Logging.error(ex)
             raise ex
Exemplo n.º 19
0
    def __init__(self):

        global log
        log = Logging(self.__class__.__name__).log

        self.__dict__ = self._shared_state

        self.clientInfo = clientinfo.ClientInfo()
        self.addonName = self.clientInfo.getAddonName()
        self.doUtils = downloadutils.DownloadUtils().downloadUrl
        self.ws = wsc.WebSocket_Client()
        self.xbmcplayer = xbmc.Player()

        log("Starting playback monitor.", 2)
Exemplo n.º 20
0
def add(path, name):
    ''' Create a repository. '''
    os.chdir(path)
    name = wrap_repo_name(name)
    repo_name = os.path.join(path, name)
    if os.path.exists(repo_name):
        Logging.error('the repository is existent')
        return False

    run_command('git init --bare %s' % name)
    run_command('chown -R git:git %s' % name)

    repo_url = wrap_repo_url(path, name)
    Logging.info("repository url: %s" % (repo_url))
    Logging.info("git clone:      git clone %s" % (repo_url))
    Logging.info("git remote:     git remote add origin %s" % (repo_url))
Exemplo n.º 21
0
def test(args):
    # np.random.seed(1)
    # torch.manual_seed(1)
    # torch.cuda.manual_seed_all(1)
    # random.seed(1)
    # np.random.seed(1)
    item_num, test_support_data, test_negative_dict, test_negative, test_mat, sup_max = data_prepare.load_all(
        args)
    if args.dataset == 'amazon_small':
        item_num = 9449

    elif args.dataset == 'amazon_big':
        item_num = 57790
    else:
        item_num = 3952

    test_data = data_prepare.Test_data(test_support_data, item_num, test_mat,
                                       sup_max, args)
    test_data.ng_test_sample()
    log_str_path = './test_log/hr' + str(args.topK) + '/' + args.model + str(
        args.number) + 'test' + args.dataset + 'GL' + str(
            args.global_lr) + 'LE' + str(args.local_epoch) + 'LL' + str(
                args.local_lr)
    mod_str_path = './saved_models/' + args.model + str(
        args.number) + 'train' + args.dataset + 'GL' + str(
            args.global_lr) + 'LE' + str(args.local_epoch) + 'LL' + str(
                args.local_lr)
    log = Logging(log_str_path + '.log')
    eval_ = args.model + "(item_num,args).cuda()"
    model = eval(eval_)
    mod = torch.load(mod_str_path + '.mod')
    model.load_state_dict(mod)
    hrs, ndcgs = model.evaluate_test(test_data, test_negative_dict,
                                     test_negative, test_data.sup_dict)

    log.record('------hr:{}-------------ndcg{}'.format(np.mean(hrs),
                                                       np.mean(ndcgs)))
Exemplo n.º 22
0
    def __init__(self, process_number, decide_callback, deliver_callback):
        super().__init__()
        self.process_number = process_number
        self.decide_callback = decide_callback
        self.deliver_callback = deliver_callback

        self.link = PerfectLink(self.process_number)

        self.pfd = PerfectFailureDetector(self.link)
        self.pfd.subscribe_abstraction(self, self.peer_failure)

        self.erb = EagerReliableBroadcast(self.link)
        self.broadcast = self.erb.register_abstraction(self)

        self.beb = BestEffortBroadcast(self.link)
        self.hco = HierarchicalConsensus(self.link, self.pfd, self.beb)
        self.hco.subscribe_abstraction(self, self.consensus_decided)

        self.lel_hco = HierarchicalConsensus(self.link, self.pfd, self.beb)
        self.lel = LeaderElection(self.pfd, self.lel_hco)
        self.lel.subscribe_abstraction(self, self.new_leader)
        self.leader = None

        self.peers = {self.process_number}
        self.detected = set()
        self.erb.add_peers(self.process_number)

        self.votes = {}
        self.voted = {peer: False for peer in self.peers}

        self.finished_election = Event()
        self.finished_consensus = Event()
        self.finished_consensus.set()
        self.consensus_result = None
        self.proposition = None

        self.logger = Logging(self.process_number, "VOT")
Exemplo n.º 23
0
    async def send_welcome(self, member):
        guild = self.bot.get_guild(Configuration.get_var("guild_id"))
        if member.guild.id != guild.id or self.is_member_verified(member):
            return False

        try:
            welcome_channel = self.bot.get_config_channel(
                guild.id, Utils.welcome_channel)
            rules_channel = self.bot.get_config_channel(
                guild.id, Utils.rules_channel)

            if welcome_channel and rules_channel:
                txt = Lang.get_string(
                    "welcome/welcome_msg",
                    user=member.mention,
                    rules_channel=rules_channel.mention,
                    accept_emoji=Emoji.get_chat_emoji('CANDLE'))
                await welcome_channel.send(txt)
                return True
        except Exception as ex:
            Logging.info(f"failed to welcome {member.id}")
            Logging.error(ex)
            raise ex
        return False
Exemplo n.º 24
0
    def __init__(self, item):

        global log
        log = Logging(self.__class__.__name__).log

        self.item = item
        self.API = api.API(self.item)

        self.clientInfo = clientinfo.ClientInfo()
        self.addonName = self.clientInfo.getAddonName()
        self.doUtils = downloadutils.DownloadUtils().downloadUrl

        self.userid = window('emby_currUser')
        self.server = window('emby_server%s' % self.userid)

        self.artwork = artwork.Artwork()
        self.emby = embyserver.Read_EmbyServer()
        self.pl = playlist.Playlist()
Exemplo n.º 25
0
    def __init__(self):

        global log
        log = Logging(self.__class__.__name__).log

        self.clientinfo = clientinfo.ClientInfo()
        self.addonName = self.clientinfo.getAddonName()

        self.enableTextureCache = settings('enableTextureCache') == "true"
        self.imageCacheLimitThreads = int(settings('imageCacheLimit'))
        self.imageCacheLimitThreads = int(self.imageCacheLimitThreads * 5)
        log("Using Image Cache Thread Count: %s" % self.imageCacheLimitThreads,
            1)

        if not self.xbmc_port and self.enableTextureCache:
            self.setKodiWebServerDetails()

        self.userId = window('emby_currUser')
        self.server = window('emby_server%s' % self.userId)
Exemplo n.º 26
0
 async def hotreload(self, ctx):
     message = await ctx.send("Hot reloading...")
     importlib.reload(Reloader)
     for c in Reloader.components:
         importlib.reload(c)
     Emoji.initialize(self.bot)
     Logging.info("Reloading all cogs...")
     temp = []
     for cog in self.bot.cogs:
         temp.append(cog)
     for cog in temp:
         self.bot.unload_extension(f"cogs.{cog}")
         Logging.info(f'{cog} has been unloaded.')
         self.bot.load_extension(f"cogs.{cog}")
         Logging.info(f'{cog} has been loaded.')
     await message.edit(content="Hot reload complete")
Exemplo n.º 27
0
 async def close(self):
     Logging.info("Shutting down?")
     if not self.shutting_down:
         Logging.info("Shutting down...")
         self.shutting_down = True
         self.db_keepalive.cancel()
         temp = []
         for cog in self.cogs:
             temp.append(cog)
         for cog in temp:
             Logging.info(f"unloading cog {cog}")
             c = self.get_cog(cog)
             if hasattr(c, "shutdown"):
                 await c.shutdown()
             self.unload_extension(f"cogs.{cog}")
     return await super().close()
Exemplo n.º 28
0
    def __init__(self):

        global log
        log = Logging(self.__class__.__name__).log

        self.clientInfo = clientinfo.ClientInfo()
        self.addonName = self.clientInfo.getAddonName()
        logLevel = userclient.UserClient().getLogLevel()
        self.monitor = xbmc.Monitor()

        window('emby_logLevel', value=str(logLevel))
        window('emby_kodiProfile',
               value=xbmc.translatePath('special://profile'))

        # Initial logging
        log("======== START %s ========" % self.addonName, 0)
        log("Platform: %s" % (self.clientInfo.getPlatform()), 0)
        log("KODI Version: %s" % xbmc.getInfoLabel('System.BuildVersion'), 0)
        log("%s Version: %s" % (self.addonName, self.clientInfo.getVersion()),
            0)
        log("Using plugin paths: %s" % (settings('useDirectPaths') == "0"), 0)
        log("Log Level: %s" % logLevel, 0)

        # Reset window props for profile switch
        properties = [
            "emby_online", "emby_serverStatus", "emby_onWake",
            "emby_syncRunning", "emby_dbCheck", "emby_kodiScan",
            "emby_shouldStop", "emby_currUser", "emby_dbScan",
            "emby_sessionId", "emby_initialScan", "emby_customplaylist",
            "emby_playbackProps"
        ]
        for prop in properties:
            window(prop, clear=True)

        # Clear video nodes properties
        videonodes.VideoNodes().clearProperties()

        # Set the minimum database version
        window('emby_minDBVersion', value="1.1.63")
Exemplo n.º 29
0
    def load_client(self, server_address, cam_address, detection_result_pool):
        print(server_address, cam_address)
        self.detection_result_pool = detection_result_pool
        host, port = server_address.split(":")
        if host is "":
            s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
            s.connect(('8.8.8.8', 0))
            self.host = str(s.getsockname()[0])
        else:
            self.host = host
        self.port = int(port)

        self.client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        self.client_socket.connect((self.host, self.port))
        self.client_socket.sendall(
            str({
                "message_type": "init",
                "edge_type": "single",
                "cam_address": cam_address.split(",")
            }).encode())
        return True, Logging.i("Server is successfully loaded - {}:{}".format(
            self.host, self.port))
Exemplo n.º 30
0
    def __init__(self):
        config = Configuration()

        self.db = Database()
        self.results = self.db.get_table('results')
        
        self.job_map = {}
        self.job_pool = []
        
        self.failed_jobs = []
        
        self.evals = {}
        
        self.notifier = Notifier()
        
        self.sched = MessageScheduler(self.message_handler)

        conn = Connection('aggregator', 'roflcake')
        entity_prefix, entity_suffix = conn.get_entity_name()
        self.entity_name = entity_prefix + entity_suffix

        self.log = Logging(conn)
        conn.join_muc('aggregators')
        
        self.conn = conn.get_conn()
        
        self.roster = self.conn.getRoster()
        
        self.conn.RegisterHandler('iq',self.set_handler,'set')
        self.conn.RegisterHandler('iq',self.get_handler,'get')
        self.conn.RegisterHandler('iq',self.result_handler,'result')
        self.conn.RegisterHandler('presence',self.presence_handler)
        
        self.temp_messages = []
        
        self.parser = Parser()

        self.go_on()
Exemplo n.º 31
0
    def __init__(
        self,
        results_dir,
        dataloaders,
        model,
        criterion,
        optimizer,
        use_gpu,
        test_dataset,
        test_mode,
        seed,
        data_parallel=False,
        sync_bn=False,
    ):

        # base settings
        self.results_dir = results_dir
        self.dataloaders = dataloaders
        self.model = model
        self.criterion = criterion
        self.optimizer = optimizer
        self.device = torch.device("cuda") if use_gpu else torch.device("cpu")
        self.test_dataset = test_dataset
        self.test_mode = test_mode
        self.seed = seed
        assert test_mode in ["all", "in_door"], "test_mode should be 'all' or 'in_door'"

        self.loss_meter = MultiItemAverageMeter()
        os.makedirs(self.results_dir, exist_ok=True)
        self.logging = Logging(os.path.join(self.results_dir, "logging.txt"))

        self.model = self.model.to(self.device)
        if data_parallel:
            if not sync_bn:
                self.model = nn.DataParallel(self.model)
            if sync_bn:
                # torch.distributed.init_process_group(backend='nccl', world_size=4, init_method='...')
                self.model = nn.SyncBatchNorm.convert_sync_batchnorm(self.model)
Exemplo n.º 32
0
def get_defaulted_locale(ctx):
    locale = 'en_US'
    if isinstance(ctx, Context):
        # TODO: move guild/channel checks to LangConfig, store in dict, update there on guild events and config changes
        cid = ctx.channel.id

        if ctx.guild is None:
            # DM - default the language
            locale = Configuration.get_var('broadcast_locale', 'en_US')
            if locale == ALL_LOCALES:
                return locales
            return [locale]

        # TODO: create lookup table so we don't hit database every time
        #  github issue #91
        gid = ctx.guild.id
        guild_row = Guild.get_or_none(serverid=gid)
        chan_locale = Localization.get_or_none(channelid=cid)

        # Bot default is English
        if guild_row is not None and guild_row.defaultlocale in locales:
            # server locale overrides bot default
            locale = guild_row.defaultlocale
        if chan_locale is not None and chan_locale.locale in locales:
            # channel locale overrides server
            locale = chan_locale.locale
    elif isinstance(ctx, str):
        # String assumes caller knows better and is overriding all else
        if ctx == ALL_LOCALES:
            return locales
        if ctx not in locales:
            if ctx != '':
                Logging.info(
                    f"Locale string override '{ctx}' not found. Defaulting.")
        else:
            locale = ctx
    else:
        Logging.info(f"Cannot derive locale from context: {ctx}")
        locale = False

    if locale not in locales:
        Logging.info(f"Missing locale {locale} - defaulting to English")
        locale = 'en_US'
    return [locale]
Exemplo n.º 33
0
    async def on_ready(self):
        Logging.info(f"Skybot... {'RECONNECT!' if self.loaded else 'STARTUP!'}")
        if self.loaded:
            Logging.info("Skybot reconnect")
            return

        Logging.BOT_LOG_CHANNEL = self.get_channel(Configuration.get_var("log_channel"))
        Emoji.initialize(self)

        for cog in Configuration.get_var("cogs"):
            try:
                self.load_extension("cogs." + cog)
            except Exception as e:
                await Utils.handle_exception(f"Failed to load cog {cog}", self, e)
        Logging.info("Cogs loaded")
        self.db_keepalive = self.loop.create_task(self.keepDBalive())
        self.loaded = True

        await Logging.bot_log("Skybot soaring through the skies!")
Exemplo n.º 34
0
class Controller:
    
    """ Controller object. Initializes various data structures used by object. Establishes connection with XMPP server,
    connects to poller, aggregator and logging Multi-User Chats and registers stanza handlers. """
    def __init__(self):

        self.db = Database()

        entity_prefix = 'controller'

        conn = Connection(entity_prefix, static=True)
        self.entity_name, self.entity_suffix = conn.get_entity_name()
        
        # List of nodes known to the controller
        self.poller_map = {}
        self.poller_pool = {}
        
        self.job_map = {}
        self.job_pool = []

        # Message scheduler
        self.sched = MessageScheduler(self.message_handler)
        
        self.log = Logging(conn)
        conn.join_muc('pollers')
        conn.join_muc('aggregators')

        self.parser = Parser()
        
        self.establish_jobs()
        
        self.conn = conn.get_conn()

        self.conn.RegisterHandler('iq',self.result_handler,'result')
        self.conn.RegisterHandler('iq',self.set_handler,'set')
        self.conn.RegisterHandler('presence',self.presence_handler)

        self.go_on()
        
    """ Called by the presence handler when an entity connects to the aggregator or poller MUCs.
    Used to inspect retreive service information from an entity, required in the XEP Jabber-RPC standard. """
    def disco_lookup(self, recipient):
        self.log.info('Performing discovery lookup.')
        message = Iq('get', queryNS=NS_DISCO_INFO, to=recipient)
        self.sched.add_message(message, self.disco_handler)
        
    """ Method passed and used as handler for messages by MessageScheduler.
    Sends messages and logs an error if message send is a retry """
    # Handler used by message scheduling class
    def message_handler(self, message, retry=False):
#        print 'Sending message.'
        if retry == True:
            self.log.error('Message timed out, resending.')
        self.conn.send(message)
#
#   MESSAGE HANDLERS
#
    """ Handler for presence stanzas recieved by the XMPP listener.
    If 
    """
    def presence_handler(self, conn, presence_node):
        sender = presence_node.getFrom()
        presence_type = presence_node.getAttr('type')
        # Ignore self and presence announcements from logging MUC
        if sender.getResource() != 'controller':
            if presence_type == 'unavailable':
                if sender.getNode() == 'aggregators' or sender.getNode() == 'pollers':
                    self.remove_entity(sender.getNode(), sender)
            elif sender.getNode() == 'aggregators' or sender.getNode() == 'pollers':
                # Check the service discovery details for a connecting node.
                self.disco_lookup(sender)
        raise NodeProcessed
        
    """ IQ set handler, runs RPC methods in whitelist """
    def set_handler(self, conn, iq_node):
        query_node = iq_node.getQueryChildren()
        for node in query_node:
            try:
                method = node.getTagData('methodName')
                method_whitelist = ['get_group', 'get_groups', 'create_group', 'update_group', 'remove_group',
                'get_monitor', 'get_monitors', 'create_monitor', 'update_monitor', 'remove_monitor', 'get_monitors_by_gid',
                'get_job', 'get_jobs', 'create_job', 'update_job', 'remove_job',
                'get_evaluation', 'get_evaluations', 'create_evaluation', 'update_evaluation', 'remove_evaluation',
                'get_results', 'get_results_day', 'get_results_week', 'get_results_hour',
                'poller_failure',
                'get_aggregator']
                if method in method_whitelist:
                    method = getattr(self, method)
                    try:
                        try:
                            params = node.getTag('params').getChildren()
                            args = self.parser.get_args(params, iq_node.getFrom())
                        except AttributeError:
                            args = []
                            
                        status, parameters  = apply(method, args)
                        message = self.parser.rpc_response(iq_node.getFrom(), iq_node.getID(), status, parameters)
                        self.conn.send(message)
                    except TypeError:
#                        print sys.exc_info()
                        conn.send(iq_node.buildReply('error'))
                else:
                    conn.send(iq_node.buildReply('error'))
                    self.log.error('Method not in whitelist')
            except AttributeError:
                traceback.print_exc()
                conn.send(iq_node.buildReply('error'))
        raise NodeProcessed

    def result_handler(self, conn, iq_node):
        # Check if the reponse is managed by scheduler
        if self.sched.is_managed(int(iq_node.getAttr('id'))):
            self.sched.received_response(iq_node)
        raise NodeProcessed
#   END MESSAGE HANDLERS

#
# BEGIN SCHEDULER RESPONSE HANDLERS
#
    def disco_handler(self, sender, query_node):
        if query_node.getNamespace() == NS_DISCO_INFO:
            entity_type = query_node.getTagAttr('identity', 'type')
            if entity_type == 'aggregator' or entity_type == 'poller':
                adjusted_jid = JID(sender.getResource() + '@quae.co.uk/skynet')
                category = query_node.getTagAttr('identity', 'category')
                self.log.info('Registering node %s' % adjusted_jid)
                self.add_entity(entity_type, category, adjusted_jid)
        else:
            self.log.error('Receieved iq message with incorrect namespace')

    def assign_job(self, sender, query_node):
        if query_node.getNamespace() == NS_RPC:
            poller, job_id = self.parser.get_args_no_sender(query_node.getTag('methodResponse').getTag('params').getChildren())
            poller_jid = JID(poller)
            job_id = int(job_id)
            job = None
            for i in range(len(self.job_pool)):
                if self.job_pool[i]['id'] == job_id:
                    job = self.job_pool.pop(i)
                    self.log.info('Removing job %s from the job pool' % job_id)
                    break
            if job != None:
                self.job_map[poller_jid].append(job)
                self.log.info('Job %s successfully assigned to %s' % (job_id, poller_jid))
        else:
            self.log.error('Receieved iq message with incorrect namespace')
            
    def poller_removed(self, sender, query_node):
        if query_node.getNamespace() == NS_RPC:
            args = self.parser.get_args_no_sender(query_node.getTag('methodResponse').getTag('params').getChildren())
            adjusted_jid = JID(args[0])
            unassigned_jobs = self.job_map.pop(adjusted_jid)
            for job in unassigned_jobs:
                self.log.info('Adding job %s to the job pool' % job['id'])
                self.job_pool.append(job)
            parent_poller = None
            for aggregator, pollers in self.poller_map.items():
                for poller, segment in pollers:
                    if poller == adjusted_jid:
                        parent_aggregator = aggregator
                        pollers.remove((poller, segment))
                        break
            self.log.info('Removed %s from %s' % (adjusted_jid, parent_aggregator))
            self.assign_pooled_jobs()            
        else:
            self.log.error('Receieved iq message with incorrect namespace')
# END SCHEDULER HANDLERS

#
#   BEGIN RPC METHODS
#
# Requested by an aggregator when an assigned poller has failed/disconnected.
    def poller_failure(self, sender, previous_poller):
        pollers = self.poller_map[JID(sender)]
        poller_jid = JID('[email protected]/' + JID(previous_poller).getNode())
        try:
            pollers.remove(poller_jid)
            message = 'Removed failed poller'
            try:
                self.rebalance_pollers()
            except:
                print sys.exc_info()
            return 'success', [message]
        except:
            return 'failure', ['Failed to remove poller']

# Group operations
    def get_group(self, sender, name):
        group = self.db.get_group_by_name(name)
        if group != None:
            return 'success', [group]
        return 'failure', ['No such group exists']
    
    def get_groups(self, sender):
        groups = self.db.get_groups()
        if groups != None:
            return 'success', [groups]
        return 'failure', ['Failed to retreieve groups']

    def create_group(self, sender, name, desc):
        existing = self.db.get_group_by_name(name)
        if existing == None:
            self.db.create_group(name, desc)
            return 'success', ['Sucessfully created group %s' % name]
        return 'failure', ['failure']
    
    def update_group(self, sender, id, name, desc):
        group = self.db.get_group_by_id(id)
        if group != None:
            self.db.update_group(id, name, desc)
            return 'success', ['Successfully update group %s' % name]
        return 'failure', ['Failed to update group']
        
    def remove_group(self, sender, name):
        self.db.remove_group_by_name(name)
        if self.db.get_group_by_name(name) == None:
            return 'success', ['Successfully remove group %s' % name]
        else:
            return 'failure', ['Failed to remove group %s' % name]
# Monitor operations

    def get_monitor(self, sender, name):
        try:
            monitor = self.db.get_monitor(name)
            if monitor != False:
                return 'success', [monitor]
        except TypeError:
            return 'failure', ['No such monitor exists']
        return 'failure', ['No such monitor exists']
        
    def get_monitors(self, sender, group=None):
        try:
            if group != None:
                monitors = self.db.get_monitors(group)
            else:
                monitors = self.db.get_monitors()
            return 'success', [monitors]
        except AttributeError:
            return 'failure', ['Failed to retrieve monitors']
            
    def get_monitors_by_gid(self, sender, group_id):
        try:
            if group_id != None:
                monitors = self.db.get_monitors_by_gid(group_id)
                return 'success', [monitors]
        except AttributeError:
            pass
        return 'failure', ['Failed to retrieve monitors']
    
    def create_monitor(self, sender, name, description, group):
        if self.db.create_monitor(name, description, group) == True:
            return 'success', ['Successfully create monitor %s' % name]
        return 'failure', ['Failed to create monitor']
        
    def update_monitor(self, sender, name, description, group):
        group = self.db.get_group_by_id(id)
        if group != None:
            self.db.update_group(id, name, desc)
            return 'success', ['Successfully update monitor %s' % name]
        return 'failure', ['Failed to update monitor']
        
    def remove_monitor(self, sender, name):
        self.db.remove_monitor_by_name(name)
        if self.db.get_monitor_by_name(name) == None:
            return 'success', ['Successfully removed monitor %s' % name]
        else:
            return 'failure', ['Failed to remove monitor %s' % name]
        
# job operations

    def get_job(self, sender, mon, id):
        try:
            job = self.db.get_job(id, mon)
            return 'success', [job]
        except TypeError:
            return 'failure', ['No such job exists']
            
    def get_jobs(self, sender, mon):
        try:
            jobs = self.db.get_jobs(mon)
            return 'success', [jobs]
        except AttributeError:
            return 'failure', ['Failed to retreieve jobs']

    def create_job(self, sender, mon, address, protocol, frequency, interface, resource):
        if self.db.get_monitor(mon) != None:
            if self.db.create_job(address, protocol, frequency, interface, resource, mon) == True:
                return 'success', ['Successfully created a job for %s' % mon]
        return 'failure', ['Failed to create job']

    def update_job(self, sender, mon, id, address, protocol, frequency, interface, resource):
        existing = self.db.get_job(id, mon)
        if existing != None:
            self.db.update_job(id, address, protocol, frequency, interface, resource)
            return 'success', ['Successfully updated job']
        else:
            return 'failure', ['failure']
            
    def remove_job(self, sender, mon, id):
        self.db.remove_job(id)
        if self.db.get_job(id) == None:
            return 'success', ['Successfully removed job']
        else:
            return 'failure', ['failure']
        
    # Result read operations
    
    def get_results(self, sender, monitor, job, start_datetime, end_datetime):
        results = self.db.get_results(monitor, job, start_datetime, end_datetime)
        if results == None:
            return 'failure', ['No such results exist']
        elif results != False:
            return 'success', results
        return 'failure', ['Failed to retreive specificied results']
        
    def get_results_day(self, sender, monitor, job, start_datetime):
        results = self.db.get_results_day(monitor, job, start_datetime)
        if results == []:
            return 'failure', ['No such results exist']
        elif results != False:
            job_details = self.db.get_job(job, monitor)
            return 'success', [job_details, results]
        return 'failure', ['Failed to retreive specificied results']
        
    def get_results_week(self, sender, monitor, job, start_datetime):
        pass
    
    def get_results_hour(self, sender, monitor, job, start_datetime):
        results = self.db.get_results_hour(monitor, job, start_datetime)
        if results == []:
            return 'failure', ['No such results exist']
        elif results != False:
            job_details = self.db.get_job(job, monitor)
            return 'success', [job_details, results]
        return 'failure', ['Failed to retreive specificied results']
#   END RPC METHODS

#
#   BEGIN PRIVATE METHODS
#
    """ Called on successful DISCO request.
    Will register Poller or Aggregator, send jobs or balance jobs. """
    def add_entity(self, entity_type, segment, entity):
        if entity_type == 'aggregator':
            self.poller_map[entity] = []
            # If pollers have been added, but there were no aggregators running
            self.assign_pooled_pollers()
            if len(self.poller_map) > 1:
                self.rebalance_pollers()
            self.assign_pooled_jobs()
                
        elif entity_type == 'poller':
            self.job_map[entity] = []
            
            self.poller_pool[entity] = segment
            self.assign_pooled_pollers()
            
            if len(self.poller_map) > 1:
                self.rebalance_pollers()
            
            self.assign_pooled_jobs()
            
            if len(self.job_map) > 1:
                self.rebalance_jobs()
            # Give poller to appropriate aggregator
            print 'Added %s to %ss' % (entity, entity_type)
        self.log.info('Node %s was successfully registered with the controller' % entity)

    """ Removing Poller or Aggregator """
    def remove_entity(self, entity_type, entity):
        try:
            if entity_type == 'aggregators':
                adjusted_jid = JID(JID(entity).getResource() + '@quae.co.uk/skynet')
                unassigned_pollers = self.poller_map.pop(adjusted_jid)
                for poller, segment in unassigned_pollers:
                    self.poller_pool[poller] = segment
                self.log.info('Removed %s' % adjusted_jid)
                if len(self.poller_pool) > 0:
                    # Try and assign pooled pollers
                    if not self.assign_pooled_pollers():
                        for poller, segment in unassigned_pollers:
                            message = self.parser.rpc_call(poller, 'aggregator_failure', [])
                            self.sched.add_message(message)
                    
            elif entity_type == 'pollers':
                adjusted_jid = JID(JID(entity).getResource() + '@quae.co.uk/skynet')
                if len(self.poller_map) > 0:
                    parent_aggregator = None
                    for aggregator, pollers in self.poller_map.items():
                        for poller, segment in pollers:
                            if adjusted_jid == poller:
                                parent_aggregator = aggregator
                                break
                    if parent_aggregator != None:
                        remove_call = self.parser.rpc_call(parent_aggregator, 'remove_poller', [str(adjusted_jid)])
                        self.sched.add_message(remove_call, self.poller_removed)
                else:
                    try:
                        self.job_map.pop(adjusted_jid)
                        self.poller_pool.pop(adjusted_jid)
                        self.log.info('Poller not assigned, sucessfully removed')
                    except:
                        self.log.error('Failed to remove poller')
                        traceback.print_exc()
        except ValueError:
            self.log.error('Failed to remove %s' % entity)

    """ Assign unassigned pollers """
    def assign_pooled_pollers(self):
        if len(self.poller_map) > 0:
            while len(self.poller_pool) > 0:
                unassigned_poller, segment = self.poller_pool.popitem()
                chosen_aggregator = None
                poller_comp = None
                for aggregator, pollers in self.poller_map.items():
                    # If first loop or number of pollers assigned to aggregator is less than comp, make this agg the comp
                    if (chosen_aggregator == None and poller_comp == None) or len(pollers) < poller_comp:
                        chosen_aggregator = aggregator
                        poller_comp = len(pollers)
                if chosen_aggregator != None:
                    # Assign Poller to the Aggregtor with least assigned Pollers
                    
                    message = self.parser.rpc_call(chosen_aggregator, 'add_poller', [str(unassigned_poller)])
                    self.sched.add_message(message)
                    
                    for job in self.job_map[unassigned_poller]:
                        message = self.parser.rpc_call(chosen_aggregator, 'move_job', [str(unassigned_poller), job['id'], job['address'], job['protocol'], job['frequency'], job['interface'], job['resource'], job['segment']])
                        self.sched.add_message(message)
                    self.poller_map[chosen_aggregator].append((unassigned_poller, segment))
            return True
        else:
            self.log.info('No aggregators available for poller assignment')
            return False
    
    """ Get Pollers for a given network segment """
    def get_segment_pollers(self, segment):
        segment_pollers = {}
        for aggregator, pollers in self.poller_map.items():
            for poller, poller_segment in pollers:
                if poller_segment == segment:
                    segment_pollers[poller] = self.job_map[poller]
        return segment_pollers
        
    """ Called to allocate unassigned jobs """
    def assign_pooled_jobs(self):
        if len(self.job_map) > 0 and len(self.poller_map) > 0:
            for job in self.job_pool:
                unassigned_job = job
                least_loaded = None
                job_comp = None
                pollers = self.get_segment_pollers(unassigned_job['segment'])
                for poller, jobs in pollers.items():
#                for poller, jobs in self.job_map.items():
                    if (least_loaded == None and job_comp == None) or len(jobs) < job_comp:
                        least_loaded = poller
                        job_comp = len(jobs)
                if least_loaded != None:
                    chosen_aggregator = None
                    for aggregator, pollers in self.poller_map.items():
                        for poller, segment in pollers:
                            if poller == least_loaded:
                                chosen_aggregator = aggregator
                                break
                    if chosen_aggregator != None:
                        self.send_job(unassigned_job, least_loaded, chosen_aggregator)
        else:
            self.log.info('No assigned pollers available for job assignment')
        #print 'Job map %s' % self.job_map
        #print 'Job pool %s' % self.job_pool
    
    """ Rebalance pollers, compares amount assigned to each Aggregator, and moves across to
    another Poller if there's at least 2 more than another Aggregator """
    def rebalance_pollers(self):
        self.log.info('Attempting to rebalance pollers')

        poller_comp = None
        least_pollers = None
        most_pollers = None
        # Retrieve aggregators with least and most pollers
        
        for aggregator, pollers in self.poller_map.items():
            if poller_comp == None:
                least_pollers = aggregator
                most_pollers = aggregator
            elif len(pollers) < poller_comp:
                least_pollers = aggregator
            elif len(pollers) > poller_comp:
                most_pollers = aggregator
            poller_comp = len(pollers)
        
        if least_pollers != None and most_pollers != None:
            # If the difference between the two pollers is worth balancing
            if (len(self.poller_map[most_pollers]) - len(self.poller_map[least_pollers])) > 1:
                poller, segment = self.poller_map[most_pollers].pop()
                self.poller_map[least_pollers].append((poller, segment))
                self.sched.add_message(self.parser.rpc_call(least_pollers, 'add_poller', [str(poller)]))
                self.sched.add_message(self.parser.rpc_call(most_pollers, 'remove_poller', [str(poller)]))
                self.rebalance_pollers()
            else:
                self.log.info('Pollers balanced')
                return True
               
    """ Similar to above, checks number of assigned jobs through the system, and will level them across all Pollers """ 
    def rebalance_jobs(self):
        self.log.info('Attempting to rebalance jobs')
        job_comp = None
        least_jobs = None
        most_jobs = None
        
        network_segment = 'skynet'
        pollers = self.get_segment_pollers(network_segment)
        
#        for poller, jobs in self.job_map.items():
        for poller, jobs in pollers.items():
            print poller
            if job_comp == None:
                least_jobs = poller
                most_jobs = poller
            elif len(jobs) < job_comp:
                least_jobs = poller
            elif len(jobs) > job_comp:
                most_jobs = poller
            job_comp = len(jobs)
            
        if least_jobs != None and most_jobs != None:
            if (len(self.job_map[most_jobs]) - len(self.job_map[least_jobs])) > 1:
                job = self.job_map[most_jobs].pop()
                self.job_map[least_jobs].append(job)
                least_parent = None
                most_parent = None
                for aggregator, pollers in self.poller_map.items():
                    for poller, node_segment in pollers:
                        if network_segment == node_segment:
                            if poller == least_jobs:
                                least_parent = aggregator
                            if poller == most_jobs:
                                most_parent = aggregator
                            if least_parent != None and most_parent != None:
                                break
                    
                if least_parent != None and most_parent != None:
                    self.log.info('Moving job %s to %s' % (job['id'], least_jobs))
                    self.sched.add_message(self.parser.rpc_call(most_parent, 'remove_job', [job['id']]))
                    self.sched.add_message(self.parser.rpc_call(least_parent, 'run_job', [str(least_jobs), job['id'], job['address'], job['protocol'], job['frequency'], job['interface'], job['resource']]), offset=True)
                self.rebalance_jobs()
            else:
                self.log.info('Jobs balanced')
                return True
                
    """ Retrieve jobs on startup """    
    def establish_jobs(self):
        monitors = self.db.get_monitors()
        self.log.info('Retrieving jobs')
        for monitor in monitors:
            jobs = self.db.get_jobs(monitor.name)
            for job in jobs:
                job = dict(job)
                segment_name = self.db.get_segment_name(job['segment'])
                job['segment'] = segment_name
                # Make the poll freq stored every minute minimum
                job['frequency'] = job['frequency'] * 60
                self.job_pool.append(job)
        self.log.info('%s jobs added to the pool' % len(self.job_pool))
        
    """ Send job to Aggregator to forward to Poller """
    def send_job(self, job, poller, aggregator):
        message = self.parser.rpc_call(aggregator, 'run_job', [str(poller), job['id'], job['address'], job['protocol'], job['frequency'], job['interface'], job['resource']])
        self.log.info('Sending job %s to %s' % (job['id'], aggregator))
        self.sched.add_message(message, self.assign_job, offset=True)
        
    def step_on(self):
        try:
            self.conn.Process(1)
        except KeyboardInterrupt: return 0
        return 1

    def go_on(self):
        while self.step_on(): pass
Exemplo n.º 35
0
class Poller:
    """ Poller object, establish connection, MUCs and handlers """
    def __init__(self, segment='skynet'):
        config = Configuration()

        self.jobs = {}

        self.sched = MessageScheduler(self.message_handler)
        self.parser = Parser()
        
        self.aggregator = None
        self.failed_aggregator = False
        
        self.query_queue = []
        conn = Connection('poller', 'roflcake')        
        self.entity_prefix, entity_suffix = conn.get_entity_name()
        
#        self.entity_name = entity_prefix + entity_suffix
        conn.join_muc('pollers')
        
        self.segment = segment
    
        self.conn = conn.get_conn()
        
        self.log = Logging(conn)
        self.roster = self.conn.getRoster()
        
        self.conn.RegisterHandler('presence',self.presence_handler)
        
        self.conn.RegisterHandler('iq', self.result_handler, 'result')
        self.conn.RegisterHandler('iq', self.error_handler, 'error')
        self.conn.RegisterHandler('iq', self.get_handler, 'get')
        self.conn.RegisterHandler('iq', self.set_handler, 'set')
        
        self.go_on()
        
    """ Message scheduling handler """
    def message_handler(self, message, retry=False):
        if retry == True:
            self.log.error('Timed out, attempting to resend.')
        self.conn.send(message)
#
#  Handlers for node communication
# 
    """ Presence stanza handler """
    def presence_handler(self, conn, presence_node):
        sender = presence_node.getFrom()
        if presence_node.getAttr('type') == 'subscribe':
            self.roster.Authorize(sender)
#        if sender != self.entity_name:
#            print presence_node.getRole()
#            conn.send(Iq('get', NS_DISCO_INFO, to=presence_node.getFrom()))            
        raise NodeProcessed
    
    """ IQ result handler, acknowledges messages with scheduler """
    def result_handler(self, conn, iq_node):
        iq_id = iq_node.getAttr('id')
        sender = iq_node.getFrom()
        
        if sender.getNode() != self.entity_prefix:
#            pass
            self.sched.received_response(iq_node)
        raise NodeProcessed  # This stanza is fully processed
    
    """ IQ error handler """
    def error_handler(self, conn, iq_node):
        if iq_node.getFrom() == self.aggregator:
            pass
#            print 'Erk!'
        raise NodeProcessed
    
    """ IQ get handler """
    def get_handler(self, conn, iq_node):
        if iq_node.getQueryNS() == NS_DISCO_INFO:
            reply = iq_node.buildReply('result')
            if self.segment != None:
                category = self.segment
            else:
                category = 'skynet'
            identity = Node('identity', {'category':category, 'type':'poller'})
            reply.setQueryPayload([identity])
            conn.send(reply)
        else:
            conn.send(iq_node.buildReply('error'))
        raise NodeProcessed
        
    """ IQ set handler, used for RPC calls, permits methods in whitelist """
    def set_handler(self, conn, iq_node):
            sender = iq_node.getFrom()
            if sender.getNode() != self.entity_prefix:
                query_node = iq_node.getQueryChildren()
                for node in query_node:
                    try:
                        method = node.getTagData('methodName')
                        method_whitelist = ['run_job', 'set_aggregator', 'remove_job', 'aggregator_failure']
                        if method in method_whitelist:
                            method = getattr(self, method)
                            try:
                                try:
                                    params = node.getTag('params').getChildren()
                                    args = self.parser.get_args(params)
                                except AttributeError:
                                    args = []
                                status, parameters = apply(method, args)
                                message = self.parser.rpc_response(iq_node.getFrom(), iq_node.getID(), status, parameters)
                                conn.send(message)
                            except TypeError:
#                                print sys.exc_info()
                                conn.send(iq_node.buildReply('error'))
                        else:
                            #print 'Method not in whitelist'
                            #print sys.exc_info()
                            conn.send(iq_node.buildReply('error'))
                    except AttributeError:
                        #print sys.exc_info()
                        conn.send(iq_node.buildReply('error'))         
            raise NodeProcessed
    
    #
    # RPC METHODS (job setup and scheduling)
    #
    """ Called by Aggregator to establish job """
    def run_job(self, sender, aggregator, id, addr, proto, freq, dom, resource):
        try:
            job = Job(self, id, addr, proto, freq, dom, resource, self.conn, self.sched)
            self.jobs[id] = job
            job.start()
            return 'success', [int(id)]
        except:
            return 'failure', ['Failed to schedule job']
            
    """ Controller calls to notify of parent Aggregator failure """
    def aggregator_failure(self, sender):
        self.aggregator = None
        self.failed_aggregator = True
        return 'success', ['Removed parent aggregator']
            
    """ Called when job moved off this Poller """
    def remove_job(self, sender, job_id):
        try:
            job = self.jobs[int(job_id)]
            job.end()
            return 'success', ['Stopped job %s' % job_id]
        except:
            return 'failure', ['Failed to stop job %s' % job_id]
    
    """ Called by parent Aggregator, sets where add_results will be sent """
    def set_aggregator(self, sender, aggregator):
        self.log.info('Setting aggregator %s' % aggregator)
        self.aggregator = aggregator
        if self.failed_aggregator == True:
            self.failed_aggregator = False
            for job in self.jobs.values():
                job.send_cached_results()
        return 'success', ['Successfully set aggregator']
            
    #
    # PRIVATE METHODS
    #
    """ Provides parent Aggregator, used by Job instances """
    def get_aggregator(self):
        return self.aggregator

    """ Setup listener """
    def step_on(self):
        try:
            self.conn.Process(1)
        except KeyboardInterrupt:
            server = 'quae.co.uk'
            features.unregister(self.conn, server)
            # Stop all running jobs
            for job in self.jobs.values():
                job.end()
                
            self.sched.end()
            
            print 'Unregistered from %s' % server
            return 0
        return 1
    
    def go_on(self):
        while self.step_on(): pass
Exemplo n.º 36
0
class Aggregator:
    """ Aggregator object 
    Establishes connection and joins MUCs. Registers handlers
    """
    def __init__(self):
        config = Configuration()

        self.db = Database()
        self.results = self.db.get_table('results')
        
        self.job_map = {}
        self.job_pool = []
        
        self.failed_jobs = []
        
        self.evals = {}
        
        self.notifier = Notifier()
        
        self.sched = MessageScheduler(self.message_handler)

        conn = Connection('aggregator', 'roflcake')
        entity_prefix, entity_suffix = conn.get_entity_name()
        self.entity_name = entity_prefix + entity_suffix

        self.log = Logging(conn)
        conn.join_muc('aggregators')
        
        self.conn = conn.get_conn()
        
        self.roster = self.conn.getRoster()
        
        self.conn.RegisterHandler('iq',self.set_handler,'set')
        self.conn.RegisterHandler('iq',self.get_handler,'get')
        self.conn.RegisterHandler('iq',self.result_handler,'result')
        self.conn.RegisterHandler('presence',self.presence_handler)
        
        self.temp_messages = []
        
        self.parser = Parser()

        self.go_on()

    """ Handler for scheduler """
    def message_handler(self, message, retry=False):
        #print 'Sending message.'
        if retry == True:
            self.log.error('Timed out, attempting to resend.')
        self.conn.send(message)
    
# HANDLERS
    """ IQ result handler """
    def result_handler(self, conn, iq_node):
        if self.sched.is_managed(int(iq_node.getID())):
            self.sched.received_response(iq_node)
        raise NodeProcessed
            
    """ Presence handler """
    def presence_handler(self, conn, presence_node):
        if len(self.job_map) > 0:
            sender = presence_node.getFrom()
            if presence_node.getAttr('type') == 'unavailable':
                failed_poller = None
                for poller in self.job_map:
                    if poller == sender:
                        failed_poller = poller
                        break
                        
                if failed_poller != None:
                # Only used if Controller has gone offline
                    self.log.info('Poller %s has gone offline.' % failed_poller)
        raise NodeProcessed
                
    """ IQ get handler """
    def get_handler(self, conn, iq_node):
        if iq_node.getQueryNS() == NS_DISCO_INFO:
            reply = iq_node.buildReply('result')
            identity = Node('identity', {'category':'skynet', 'type':'aggregator'})
            reply.setQueryPayload([identity])
            conn.send(reply)
        raise NodeProcessed

    """ IQ set handler. Permits RPC calls in the whitelist, else returns error message """
    def set_handler(self, conn, iq_node):
        sender = iq_node.getFrom()
        iq_id = iq_node.getAttr('id')
        
        if sender == '[email protected]/skynet':
            query_node = iq_node.getQueryChildren()
            for node in query_node:
                try:
                    method = node.getTagData('methodName')
                    method_whitelist = ['run_job', 'add_poller', 'remove_poller', 'remove_job', 'move_job']
                    if method in method_whitelist:
                        method = getattr(self, method)
                        try:
                            try:
                                params = node.getTag('params').getChildren()
                                args = self.parser.get_args(params)
                            except AttributeError:
                                args = []
                            status, parameters = apply(method, args)
                            message = self.parser.rpc_response(iq_node.getFrom(), iq_node.getID(), status, parameters)
                            conn.send(message)
                        except TypeError:
                            #print sys.exc_info()
                            conn.send(iq_node.buildReply('error'))
                    else:
                        conn.send(iq_node.buildReply('error'))
                        self.log.error('Method called not in whitelist')
                except AttributeError:
                    #print sys.exc_info()
                    conn.send(iq_node.buildReply('error'))

        if len(self.job_map) > 0:
            if sender in self.job_map:
                query_node = iq_node.getQueryChildren()
                for node in query_node:
                    try:
                       method = node.getTagData('methodName')
                       method_whitelist = ['add_result']
                       if method in method_whitelist:
                           method = getattr(self, method)
                           try:
                               try:
                                   params = node.getTag('params').getChildren()
                                   args = self.parser.get_args(params)
                               except AttributeError:
                                   args = []
                                   
                               status, parameters = apply(method, args)
                               message = self.parser.rpc_response(iq_node.getFrom(), iq_node.getID(), status, parameters)
                               conn.send(message)
                           except TypeError:
                               #print sys.exc_info()
                               conn.send(iq_node.buildReply('error'))
                       else:
                           conn.send(iq_node.buildReply('error'))
                    except AttributeError:
                        #print sys.exc_info()
                        conn.send(iq_node.buildReply('error'))
        
        raise NodeProcessed  # This stanza is fully processed
        
    """ Establish evaluations """
    def set_evals(self, job, evaluations):
        for evaluation in evaluations:
            if evaluation.string != None:
                self.evals[job].append((evaluation.comparison, str(evaluation.string)))
            elif evaluation.float != None:
                self.evals[job].append((evaluation.comparison, float(evaluation.float)))
            elif evaluation.int != None:
                self.evals[job].append((evaluation.comparison, int(evaluation.int)))
            else:
                self.log.info('Evaluation contains no comparison value.')
                
    """ Callback handler used by scheduler.add_message when poller replies on successful job assignment """
    def assign_job(self, sender, query_node):
        if query_node.getNamespace() == NS_RPC:
            params = query_node.getTag('methodResponse').getTag('params').getChildren()
            job_id = self.parser.get_args_no_sender(params)[0]
            job_id = int(job_id)
            self.job_map[JID(sender)].append(job_id)
            self.evals[job_id] = []
            
            evaluations = self.db.get_evaluations(job_id)
            self.set_evals(job_id, evaluations)
        else:
            pass
#            print 'Receieved iq message with incorrect namespace'
# END HANDLERS

# RPC METHODS
    """ Retains details of a job, allocates to Poller with least currently assinged jobs """
    def run_job(self, sender, poller, job, addr, proto, freq, dom, resource):
        # Checks a poller is assigned
        if len(self.job_map) > 0:
            # Determines which poller has least assigned jobs
#            job_comp = None
#            least_loaded = None
#            for poller, jobs in self.job_map.items():
#                num_jobs = len(jobs)
#                if job_comp != None:
#                    if len(jobs) < job_comp:
#                        least_loaded = poller
#                else:
#                    least_loaded = poller
#                job_comp = num_jobs
            
            message = self.parser.rpc_call(poller, 'run_job', [self.entity_name, job, addr, proto, freq, dom, resource])
            self.sched.add_message(message, self.assign_job)
            return 'success', [str(poller), int(job)]
        else:
            return 'failure', ['There are no pollers connected']
            
    """ Called when a job is moved to this Aggregator, sets up evals and details """
    def move_job(self, sender, poller, job, addr, proto, freq, dom, resource, segment):
        # Checks a poller is assigned
        if len(self.job_map) > 0:
            job_id = int(job)
            self.job_map[JID(poller)].append(job_id)
            self.evals[job_id] = []

            evaluations = self.db.get_evaluations(job_id)
            self.set_evals(job_id, evaluations)
            return 'success', ['Successfully moved job']
        else:
            return 'failure', ['There are no pollers connected']
    
    """ Removes job from the Aggregator, cleans up state """
    def remove_job(self, sender, job_id):
        job_id = int(job_id)
        parent_poller = None
        for poller, jobs in self.job_map.items():
            for i in range(len(jobs)):
                if jobs[i] == job_id:
                    jobs.pop(i)
                    parent_poller = poller
                    break
        try:
            self.evals.pop(job_id)
        except KeyError:
            pass
            
        if parent_poller != None:
            message = self.parser.rpc_call(parent_poller, 'remove_job', [job_id])
            self.sched.add_message(message)
            return 'success', ['Successfully removed job']
        else:
            return 'failure', ['Failed to remove job']
    
    """ Called by child Poller to deliver result """
    def add_result(self, sender, id, recorded, val):
        status = self.insert_result(id, recorded, val)
        if status != 'failure':
            return 'success', ['Sucessfully added result']
        else:
            messages = self.temp_messages
            self.temp_messages = []
            return 'failure', messages
        
    """ Called by Controller when Poller is assigned """
    def add_poller(self, sender, poller):
        # Subscribe to poller for presence updates
        poller_jid = JID(poller)
        self.roster.Subscribe(poller_jid)
        self.job_map[poller_jid] = []
        self.sched.add_message(self.parser.rpc_call(poller_jid, 'set_aggregator', [self.entity_name]))
        return 'success', ['Successfully added %s' % poller_jid]

    """ Called by Controller to remove references to Poller """
    def remove_poller(self, sender, poller):
        poller_jid = JID(poller)
        try:
            unassigned_jobs = self.job_map.pop(poller_jid)
            # If controller has also failed
#            for job in unassigned_jobs:
#                self.job_pool.append(job)
            self.roster.Unsubscribe(poller_jid)
            return 'success', [poller]
        except KeyError:
            return 'failure', ['Failed to remove poller %s' % poller]

# END RPC METHODS

    """ Used when inserting results supplied by add_result.
    Peforms evaluations, casts type, makes notifications and then stores into the database. """
    def insert_result(self, id, recorded, val, list_id=None):

        val_type = type(val).__name__
        
        try:
            evals = self.evals.get(int(id))
            if evals:
                for comparison, comp_val in evals:
                    if val_type == 'int' or val_type == 'float':
                        eval_statement = str(val) + str(comparison) + str(comp_val)
                    elif val_type == 'str':
                        eval_statement = str('\'' +  val + '\'') + str(comparison) + str('\'' + comp_val + '\'')
#                    print 'Eval statement: %s' % eval_statement
                    result = eval(eval_statement)
#                    print 'Eval result: %s' % result
                if result != True:
                    message = 'Job %s has caused an error! The value %s failed an evaluation.' % (id, comp_val)
                    self.log.error(message)
                    if id not in self.failed_jobs:
                        self.log.info('Sending notifications')
                        self.notifier.send_email(message)
                        self.notifier.send_sms(message)
                        self.failed_jobs.append(id)
                else:
                    if id in self.failed_jobs:
                        message = 'Job %s is back within normal parameters' % id
                        self.notifier.send_email(message)
                        self.log.info(message)
                        self.failed_jobs.remove(id)
                        
        except:
#            traceback.print_exc()
            self.temp_messages = ['Failed to evaluate returned result']
            return 'failure'
            
        if val_type == 'int':        
            if list_id != None:
                self.results.insert().execute(job=id, int=val, recorded=recorded, list=list_id)
            else:
                self.results.insert().execute(job=id, int=val, recorded=recorded)
        elif val_type == 'str':
            if list_id != None:
                self.results.insert().execute(job=id, string=val, recorded=recorded, list=list_id)
            else:
                self.results.insert().execute(job=id, string=val, recorded=recorded)
        elif val_type == 'float':
            if list_item != None:
                self.results.insert().execute(job=id, float=val, recorded=recorded, list=list_id)
            else:
                self.results.insert().execute(job=id, float=val, recorded=recorded)
        elif val_type == 'list':
            self.results.insert().execute(job=id, recorded=recorded, list=0)
            where = and_(self.results.c.recorded == recorded, self.results.c.list == 0)
            list_id = self.results.select(where).execute().fetchone().id
            #print "Retrieved list id %s" % list_id
            for element in val:
                self.insert_result(id, recorded, element, list_id)
        else:
            self.temp_messages = ['Unexpected data type receieved']
            return 'failure'
        
    """ Setup listener """
    def step_on(self):
        try:
            self.conn.Process(1)
        except KeyboardInterrupt:
            server = 'quae.co.uk'
            features.unregister(self.conn, server)
            #print 'Unregistered from %s' % server
            return 0
        return 1

    def go_on(self):
        while self.step_on(): pass