Пример #1
0
    async def ping_unverified(self, ctx):
        guild = self.bot.get_guild(Configuration.get_var("guild_id"))
        try:
            nonmember_role = guild.get_role(
                Configuration.get_var("nonmember_role"))
            welcome_channel = self.bot.get_config_channel(
                guild.id, Utils.welcome_channel)
            rules_channel = self.bot.get_config_channel(
                guild.id, Utils.rules_channel)

            if welcome_channel and rules_channel:
                txt = Lang.get_string(
                    "welcome/welcome_msg",
                    user=nonmember_role.mention,
                    rules_channel=rules_channel.mention,
                    accept_emoji=Emoji.get_chat_emoji('CANDLE'))

                await nonmember_role.edit(mentionable=True)
                await welcome_channel.send(txt)
                await nonmember_role.edit(mentionable=False)
                return True
        except Exception as ex:
            Logging.info(f"failed to welcome unverified role.")
            Logging.error(ex)
            raise ex
        return False
Пример #2
0
    async def handle_reaction_change(self, t, reaction, user_id):
        roles = Configuration.get_var("roles")
        if reaction in roles:
            guild = self.bot.get_guild(Configuration.get_var("guild_id"))
            role = guild.get_role(roles[reaction])
            member_role = guild.get_role(Configuration.get_var("member_role"))
            nonmember_role = guild.get_role(
                Configuration.get_var("nonmember_role"))
            member = guild.get_member(user_id)

            if member is None:
                return

            action = getattr(member, f"{t}_roles")
            try:
                await action(role)
                # if acting on member role, toggle corresponding nonmember role
                if role is member_role:
                    if t == 'add':
                        await member.remove_roles(nonmember_role)
                    else:
                        await member.add_roles(nonmember_role)
            except Exception as ex:
                Logging.info("failed")
                Logging.error(ex)
                raise ex
Пример #3
0
    async def send_welcome(self, member):
        guild = self.bot.get_guild(Configuration.get_var("guild_id"))
        if member.guild.id != guild.id or self.is_member_verified(member):
            return False

        try:
            welcome_channel = self.bot.get_config_channel(
                guild.id, Utils.welcome_channel)
            rules_channel = self.bot.get_config_channel(
                guild.id, Utils.rules_channel)

            # Send welcome message in configured language. default to english
            if welcome_channel and rules_channel:
                txt = Lang.get_string(
                    "welcome/welcome_msg",
                    user=member.mention,
                    rules_channel=rules_channel.mention,
                    accept_emoji=Emoji.get_chat_emoji('PAINTBRUSH'))
                if self.mute_new_members:
                    # add mute notification if mute for new members is on
                    mute_txt = Lang.get_string("welcome/welcome_mute_msg")
                    txt = f"{txt}\n{mute_txt}"
                await welcome_channel.send(txt)
                return True
        except Exception as ex:
            Logging.info(f"failed to welcome {member.id}")
            Logging.error(ex)
            raise ex
        return False
Пример #4
0
 def wrapper(*args, **kwargs):
     path = args[0]
     grm_path = os.path.join(path, BASE_GRM_DIR)
     if not os.path.exists(grm_path):
         Logging.error("fatal: not a remote root directory(%s)" %
                       BASE_GRM_DIR)
         return
     return func(*args, **kwargs)
Пример #5
0
async def handle_exception(exception_type, bot, exception, event=None, message=None, ctx=None, *args, **kwargs):
    embed = get_embed_and_log_exception(exception_type, bot, exception, event, message, ctx, *args, **kwargs)
    try:
        await Logging.bot_log(embed=embed)
    except Exception as ex:
        Logging.error(
            f"Failed to log to botlog, either Discord broke or something is seriously wrong!\n{ex}")
        Logging.error(traceback.format_exc())
Пример #6
0
 def load_guild(self, guild):
     my_channels = dict()
     for row in ConfigChannel.select().where(ConfigChannel.serverid == guild.id):
         if validate_channel_name(row.configname):
             my_channels[row.configname] = row.channelid
         else:
             Logging.error(f"Misconfiguration in config channel: {row.configname}")
     self.bot.config_channels[guild.id] = my_channels
Пример #7
0
def load():
    global MASTER_CONFIG, MASTER_LOADED
    try:
        with open('config.json', 'r') as json_file:
            MASTER_CONFIG = json.load(json_file)
            MASTER_LOADED = True
    except FileNotFoundError:
        Logging.error("Unable to load config, running with defaults.")
    except Exception as e:
        Logging.error("Failed to parse configuration.")
        print(e)
        raise e
Пример #8
0
 async def startup_cleanup(self):
     # Load channels
     self.bot.config_channels = dict()
     for guild in self.bot.guilds:
         my_channels = dict()
         for row in ConfigChannel.select().where(
                 ConfigChannel.serverid == guild.id):
             if validate_channel_name(row.configname):
                 my_channels[row.configname] = row.channelid
             else:
                 Logging.error(
                     f"Misconfiguration in config channel: {row.configname}"
                 )
         self.bot.config_channels[guild.id] = my_channels
Пример #9
0
def add(path, name):
    ''' Create a repository. '''
    os.chdir(path)
    name = wrap_repo_name(name)
    repo_name = os.path.join(path, name)
    if os.path.exists(repo_name):
        Logging.error('the repository is existent')
        return False

    run_command('git init --bare %s' % name)
    run_command('chown -R git:git %s' % name)

    repo_url = wrap_repo_url(path, name)
    Logging.info("repository url: %s" % (repo_url))
    Logging.info("git clone:      git clone %s" % (repo_url))
    Logging.info("git remote:     git remote add origin %s" % (repo_url))
Пример #10
0
def init_root_repos(path):
    grm_path = os.path.join(path, BASE_GRM_DIR)
    if os.path.exists(grm_path):
        Logging.error('Reinitialized existing remote root directory in %s' %
                      os.path.abspath(path))
        return False

    if not os.path.exists(path):
        os.mkdir(path)
    Logging.info('Initialized remote root directory in %s' %
                 os.path.abspath(path))
    os.chdir(path)
    os.mkdir(BASE_GRM_DIR)

    init_config('.')
    p = Process(target=init_public_ip)
    p.start()
Пример #11
0
    async def send_welcome(self, member):
        guild = self.bot.get_guild(Configuration.get_var("guild_id"))
        if member.guild.id != guild.id or self.is_member_verified(member):
            return False

        try:
            welcome_channel = self.bot.get_config_channel(
                guild.id, Utils.welcome_channel)
            rules_channel = self.bot.get_config_channel(
                guild.id, Utils.rules_channel)

            if welcome_channel and rules_channel:
                txt = Lang.get_string(
                    "welcome/welcome_msg",
                    user=member.mention,
                    rules_channel=rules_channel.mention,
                    accept_emoji=Emoji.get_chat_emoji('CANDLE'))
                await welcome_channel.send(txt)
                return True
        except Exception as ex:
            Logging.info(f"failed to welcome {member.id}")
            Logging.error(ex)
            raise ex
        return False
Пример #12
0
class Poller:
    """ Poller object, establish connection, MUCs and handlers """
    def __init__(self, segment='skynet'):
        config = Configuration()

        self.jobs = {}

        self.sched = MessageScheduler(self.message_handler)
        self.parser = Parser()
        
        self.aggregator = None
        self.failed_aggregator = False
        
        self.query_queue = []
        conn = Connection('poller', 'roflcake')        
        self.entity_prefix, entity_suffix = conn.get_entity_name()
        
#        self.entity_name = entity_prefix + entity_suffix
        conn.join_muc('pollers')
        
        self.segment = segment
    
        self.conn = conn.get_conn()
        
        self.log = Logging(conn)
        self.roster = self.conn.getRoster()
        
        self.conn.RegisterHandler('presence',self.presence_handler)
        
        self.conn.RegisterHandler('iq', self.result_handler, 'result')
        self.conn.RegisterHandler('iq', self.error_handler, 'error')
        self.conn.RegisterHandler('iq', self.get_handler, 'get')
        self.conn.RegisterHandler('iq', self.set_handler, 'set')
        
        self.go_on()
        
    """ Message scheduling handler """
    def message_handler(self, message, retry=False):
        if retry == True:
            self.log.error('Timed out, attempting to resend.')
        self.conn.send(message)
#
#  Handlers for node communication
# 
    """ Presence stanza handler """
    def presence_handler(self, conn, presence_node):
        sender = presence_node.getFrom()
        if presence_node.getAttr('type') == 'subscribe':
            self.roster.Authorize(sender)
#        if sender != self.entity_name:
#            print presence_node.getRole()
#            conn.send(Iq('get', NS_DISCO_INFO, to=presence_node.getFrom()))            
        raise NodeProcessed
    
    """ IQ result handler, acknowledges messages with scheduler """
    def result_handler(self, conn, iq_node):
        iq_id = iq_node.getAttr('id')
        sender = iq_node.getFrom()
        
        if sender.getNode() != self.entity_prefix:
#            pass
            self.sched.received_response(iq_node)
        raise NodeProcessed  # This stanza is fully processed
    
    """ IQ error handler """
    def error_handler(self, conn, iq_node):
        if iq_node.getFrom() == self.aggregator:
            pass
#            print 'Erk!'
        raise NodeProcessed
    
    """ IQ get handler """
    def get_handler(self, conn, iq_node):
        if iq_node.getQueryNS() == NS_DISCO_INFO:
            reply = iq_node.buildReply('result')
            if self.segment != None:
                category = self.segment
            else:
                category = 'skynet'
            identity = Node('identity', {'category':category, 'type':'poller'})
            reply.setQueryPayload([identity])
            conn.send(reply)
        else:
            conn.send(iq_node.buildReply('error'))
        raise NodeProcessed
        
    """ IQ set handler, used for RPC calls, permits methods in whitelist """
    def set_handler(self, conn, iq_node):
            sender = iq_node.getFrom()
            if sender.getNode() != self.entity_prefix:
                query_node = iq_node.getQueryChildren()
                for node in query_node:
                    try:
                        method = node.getTagData('methodName')
                        method_whitelist = ['run_job', 'set_aggregator', 'remove_job', 'aggregator_failure']
                        if method in method_whitelist:
                            method = getattr(self, method)
                            try:
                                try:
                                    params = node.getTag('params').getChildren()
                                    args = self.parser.get_args(params)
                                except AttributeError:
                                    args = []
                                status, parameters = apply(method, args)
                                message = self.parser.rpc_response(iq_node.getFrom(), iq_node.getID(), status, parameters)
                                conn.send(message)
                            except TypeError:
#                                print sys.exc_info()
                                conn.send(iq_node.buildReply('error'))
                        else:
                            #print 'Method not in whitelist'
                            #print sys.exc_info()
                            conn.send(iq_node.buildReply('error'))
                    except AttributeError:
                        #print sys.exc_info()
                        conn.send(iq_node.buildReply('error'))         
            raise NodeProcessed
    
    #
    # RPC METHODS (job setup and scheduling)
    #
    """ Called by Aggregator to establish job """
    def run_job(self, sender, aggregator, id, addr, proto, freq, dom, resource):
        try:
            job = Job(self, id, addr, proto, freq, dom, resource, self.conn, self.sched)
            self.jobs[id] = job
            job.start()
            return 'success', [int(id)]
        except:
            return 'failure', ['Failed to schedule job']
            
    """ Controller calls to notify of parent Aggregator failure """
    def aggregator_failure(self, sender):
        self.aggregator = None
        self.failed_aggregator = True
        return 'success', ['Removed parent aggregator']
            
    """ Called when job moved off this Poller """
    def remove_job(self, sender, job_id):
        try:
            job = self.jobs[int(job_id)]
            job.end()
            return 'success', ['Stopped job %s' % job_id]
        except:
            return 'failure', ['Failed to stop job %s' % job_id]
    
    """ Called by parent Aggregator, sets where add_results will be sent """
    def set_aggregator(self, sender, aggregator):
        self.log.info('Setting aggregator %s' % aggregator)
        self.aggregator = aggregator
        if self.failed_aggregator == True:
            self.failed_aggregator = False
            for job in self.jobs.values():
                job.send_cached_results()
        return 'success', ['Successfully set aggregator']
            
    #
    # PRIVATE METHODS
    #
    """ Provides parent Aggregator, used by Job instances """
    def get_aggregator(self):
        return self.aggregator

    """ Setup listener """
    def step_on(self):
        try:
            self.conn.Process(1)
        except KeyboardInterrupt:
            server = 'quae.co.uk'
            features.unregister(self.conn, server)
            # Stop all running jobs
            for job in self.jobs.values():
                job.end()
                
            self.sched.end()
            
            print 'Unregistered from %s' % server
            return 0
        return 1
    
    def go_on(self):
        while self.step_on(): pass
Пример #13
0
def get_embed_and_log_exception(exception_type, bot, exception, event=None, message=None, ctx=None, *args, **kwargs):
    with sentry_sdk.push_scope() as scope:
        embed = Embed(colour=Colour(0xff0000), timestamp=datetime.utcfromtimestamp(time.time()))

        # something went wrong and it might have been in on_command_error, make sure we log to the log file first
        lines = [
            "\n===========================================EXCEPTION CAUGHT, DUMPING ALL AVAILABLE INFO===========================================",
            f"Type: {exception_type}"
        ]

        arg_info = ""
        for arg in list(args):
            arg_info += extract_info(arg) + "\n"
        if arg_info == "":
            arg_info = "No arguments"

        kwarg_info = ""
        for name, arg in kwargs.items():
            kwarg_info += "{}: {}\n".format(name, extract_info(arg))
        if kwarg_info == "":
            kwarg_info = "No keyword arguments"

        lines.append("======================Exception======================")
        lines.append(f"{str(exception)} ({type(exception)})")

        lines.append("======================ARG INFO======================")
        lines.append(arg_info)
        sentry_sdk.add_breadcrumb(category='arg info', message=arg_info, level='info')

        lines.append("======================KWARG INFO======================")
        lines.append(kwarg_info)
        sentry_sdk.add_breadcrumb(category='kwarg info', message=kwarg_info, level='info')

        lines.append("======================STACKTRACE======================")
        tb = "".join(traceback.format_tb(exception.__traceback__))
        lines.append(tb)

        if message is None and event is not None and hasattr(event, "message"):
            message = event.message

        if message is None and ctx is not None:
            message = ctx.message

        if message is not None and hasattr(message, "content"):
            lines.append("======================ORIGINAL MESSAGE======================")
            lines.append(message.content)
            if message.content is None or message.content == "":
                content = "<no content>"
            else:
                content = message.content
            scope.set_tag('message content', content)
            embed.add_field(name="Original message", value=trim_message(content, 1000), inline=False)

            lines.append("======================ORIGINAL MESSAGE (DETAILED)======================")
            lines.append(extract_info(message))

        if event is not None:
            lines.append("======================EVENT NAME======================")
            lines.append(event)
            scope.set_tag('event name', event)
            embed.add_field(name="Event", value=event)

        if ctx is not None:
            lines.append("======================COMMAND INFO======================")

            lines.append(f"Command: {ctx.command.name}")
            embed.add_field(name="Command", value=ctx.command.name)
            scope.set_tag('command', ctx.command.name)

            channel_name = 'Private Message' if isinstance(ctx.channel,
                                                           PrivateChannel) else f"{ctx.channel.name} (`{ctx.channel.id}`)"
            lines.append(f"Channel: {channel_name}")
            embed.add_field(name="Channel", value=channel_name, inline=False)
            scope.set_tag('channel', channel_name)

            sender = f"{str(ctx.author)} (`{ctx.author.id}`)"
            scope.set_user({"id": ctx.author.id, "username": str(ctx.author)})

            lines.append(f"Sender: {sender}")
            embed.add_field(name="Sender", value=sender, inline=False)

        lines.append(
            "===========================================DATA DUMP COMPLETE===========================================")
        Logging.error("\n".join(lines))

        for t in [ConnectionClosed, ClientOSError, ServerDisconnectedError]:
            if isinstance(exception, t):
                return
        # nice embed for info on discord

        embed.set_author(name=exception_type)
        embed.add_field(name="Exception", value=f"{str(exception)} (`{type(exception)}`)", inline=False)
        if len(tb) < 1024:
            embed.add_field(name="Traceback", value=tb)
        else:
            embed.add_field(name="Traceback", value="stacktrace too long, see logs")
        sentry_sdk.capture_exception(exception)
        return embed
Пример #14
0
def compare_time(time1, time2):
    s_time = time.mktime(time.strptime(time1, '%Y-%m-%d'))
    e_time = time.mktime(time.strptime(time2, '%Y-%m-%d'))
    return int(s_time) - int(e_time)


def compare_time_test():
    result = compare_time('2020-04-17', '2020-04-19')
    logger.info("result: {}".format(result))


if __name__ == '__main__':
    logger.info('args: {}'.format(sys.argv))
    if len(sys.argv) < 2:
        logger.error('Usage: python3 check.py start_date[2020-07-01]')
        sys.exit(1)
    start_date = sys.argv[1]
    check_block(start_date)
'''
1. 功能
统计每天的链上区块、交易和operation的总数。

输入一个开始日期,统计该日期之后N天的数据,N由AFTER_DAYS全局变量控制,默认是7,可以根据统计需求任意修改。
如果开始日期和最新区块的间隔小于N天,统计最新区块之前的N天数据。

2. 使用
依赖: python-sdk

python3 check_count.py YYYY-MM-DD 
Пример #15
0
class Aggregator:
    """ Aggregator object 
    Establishes connection and joins MUCs. Registers handlers
    """
    def __init__(self):
        config = Configuration()

        self.db = Database()
        self.results = self.db.get_table('results')
        
        self.job_map = {}
        self.job_pool = []
        
        self.failed_jobs = []
        
        self.evals = {}
        
        self.notifier = Notifier()
        
        self.sched = MessageScheduler(self.message_handler)

        conn = Connection('aggregator', 'roflcake')
        entity_prefix, entity_suffix = conn.get_entity_name()
        self.entity_name = entity_prefix + entity_suffix

        self.log = Logging(conn)
        conn.join_muc('aggregators')
        
        self.conn = conn.get_conn()
        
        self.roster = self.conn.getRoster()
        
        self.conn.RegisterHandler('iq',self.set_handler,'set')
        self.conn.RegisterHandler('iq',self.get_handler,'get')
        self.conn.RegisterHandler('iq',self.result_handler,'result')
        self.conn.RegisterHandler('presence',self.presence_handler)
        
        self.temp_messages = []
        
        self.parser = Parser()

        self.go_on()

    """ Handler for scheduler """
    def message_handler(self, message, retry=False):
        #print 'Sending message.'
        if retry == True:
            self.log.error('Timed out, attempting to resend.')
        self.conn.send(message)
    
# HANDLERS
    """ IQ result handler """
    def result_handler(self, conn, iq_node):
        if self.sched.is_managed(int(iq_node.getID())):
            self.sched.received_response(iq_node)
        raise NodeProcessed
            
    """ Presence handler """
    def presence_handler(self, conn, presence_node):
        if len(self.job_map) > 0:
            sender = presence_node.getFrom()
            if presence_node.getAttr('type') == 'unavailable':
                failed_poller = None
                for poller in self.job_map:
                    if poller == sender:
                        failed_poller = poller
                        break
                        
                if failed_poller != None:
                # Only used if Controller has gone offline
                    self.log.info('Poller %s has gone offline.' % failed_poller)
        raise NodeProcessed
                
    """ IQ get handler """
    def get_handler(self, conn, iq_node):
        if iq_node.getQueryNS() == NS_DISCO_INFO:
            reply = iq_node.buildReply('result')
            identity = Node('identity', {'category':'skynet', 'type':'aggregator'})
            reply.setQueryPayload([identity])
            conn.send(reply)
        raise NodeProcessed

    """ IQ set handler. Permits RPC calls in the whitelist, else returns error message """
    def set_handler(self, conn, iq_node):
        sender = iq_node.getFrom()
        iq_id = iq_node.getAttr('id')
        
        if sender == '[email protected]/skynet':
            query_node = iq_node.getQueryChildren()
            for node in query_node:
                try:
                    method = node.getTagData('methodName')
                    method_whitelist = ['run_job', 'add_poller', 'remove_poller', 'remove_job', 'move_job']
                    if method in method_whitelist:
                        method = getattr(self, method)
                        try:
                            try:
                                params = node.getTag('params').getChildren()
                                args = self.parser.get_args(params)
                            except AttributeError:
                                args = []
                            status, parameters = apply(method, args)
                            message = self.parser.rpc_response(iq_node.getFrom(), iq_node.getID(), status, parameters)
                            conn.send(message)
                        except TypeError:
                            #print sys.exc_info()
                            conn.send(iq_node.buildReply('error'))
                    else:
                        conn.send(iq_node.buildReply('error'))
                        self.log.error('Method called not in whitelist')
                except AttributeError:
                    #print sys.exc_info()
                    conn.send(iq_node.buildReply('error'))

        if len(self.job_map) > 0:
            if sender in self.job_map:
                query_node = iq_node.getQueryChildren()
                for node in query_node:
                    try:
                       method = node.getTagData('methodName')
                       method_whitelist = ['add_result']
                       if method in method_whitelist:
                           method = getattr(self, method)
                           try:
                               try:
                                   params = node.getTag('params').getChildren()
                                   args = self.parser.get_args(params)
                               except AttributeError:
                                   args = []
                                   
                               status, parameters = apply(method, args)
                               message = self.parser.rpc_response(iq_node.getFrom(), iq_node.getID(), status, parameters)
                               conn.send(message)
                           except TypeError:
                               #print sys.exc_info()
                               conn.send(iq_node.buildReply('error'))
                       else:
                           conn.send(iq_node.buildReply('error'))
                    except AttributeError:
                        #print sys.exc_info()
                        conn.send(iq_node.buildReply('error'))
        
        raise NodeProcessed  # This stanza is fully processed
        
    """ Establish evaluations """
    def set_evals(self, job, evaluations):
        for evaluation in evaluations:
            if evaluation.string != None:
                self.evals[job].append((evaluation.comparison, str(evaluation.string)))
            elif evaluation.float != None:
                self.evals[job].append((evaluation.comparison, float(evaluation.float)))
            elif evaluation.int != None:
                self.evals[job].append((evaluation.comparison, int(evaluation.int)))
            else:
                self.log.info('Evaluation contains no comparison value.')
                
    """ Callback handler used by scheduler.add_message when poller replies on successful job assignment """
    def assign_job(self, sender, query_node):
        if query_node.getNamespace() == NS_RPC:
            params = query_node.getTag('methodResponse').getTag('params').getChildren()
            job_id = self.parser.get_args_no_sender(params)[0]
            job_id = int(job_id)
            self.job_map[JID(sender)].append(job_id)
            self.evals[job_id] = []
            
            evaluations = self.db.get_evaluations(job_id)
            self.set_evals(job_id, evaluations)
        else:
            pass
#            print 'Receieved iq message with incorrect namespace'
# END HANDLERS

# RPC METHODS
    """ Retains details of a job, allocates to Poller with least currently assinged jobs """
    def run_job(self, sender, poller, job, addr, proto, freq, dom, resource):
        # Checks a poller is assigned
        if len(self.job_map) > 0:
            # Determines which poller has least assigned jobs
#            job_comp = None
#            least_loaded = None
#            for poller, jobs in self.job_map.items():
#                num_jobs = len(jobs)
#                if job_comp != None:
#                    if len(jobs) < job_comp:
#                        least_loaded = poller
#                else:
#                    least_loaded = poller
#                job_comp = num_jobs
            
            message = self.parser.rpc_call(poller, 'run_job', [self.entity_name, job, addr, proto, freq, dom, resource])
            self.sched.add_message(message, self.assign_job)
            return 'success', [str(poller), int(job)]
        else:
            return 'failure', ['There are no pollers connected']
            
    """ Called when a job is moved to this Aggregator, sets up evals and details """
    def move_job(self, sender, poller, job, addr, proto, freq, dom, resource, segment):
        # Checks a poller is assigned
        if len(self.job_map) > 0:
            job_id = int(job)
            self.job_map[JID(poller)].append(job_id)
            self.evals[job_id] = []

            evaluations = self.db.get_evaluations(job_id)
            self.set_evals(job_id, evaluations)
            return 'success', ['Successfully moved job']
        else:
            return 'failure', ['There are no pollers connected']
    
    """ Removes job from the Aggregator, cleans up state """
    def remove_job(self, sender, job_id):
        job_id = int(job_id)
        parent_poller = None
        for poller, jobs in self.job_map.items():
            for i in range(len(jobs)):
                if jobs[i] == job_id:
                    jobs.pop(i)
                    parent_poller = poller
                    break
        try:
            self.evals.pop(job_id)
        except KeyError:
            pass
            
        if parent_poller != None:
            message = self.parser.rpc_call(parent_poller, 'remove_job', [job_id])
            self.sched.add_message(message)
            return 'success', ['Successfully removed job']
        else:
            return 'failure', ['Failed to remove job']
    
    """ Called by child Poller to deliver result """
    def add_result(self, sender, id, recorded, val):
        status = self.insert_result(id, recorded, val)
        if status != 'failure':
            return 'success', ['Sucessfully added result']
        else:
            messages = self.temp_messages
            self.temp_messages = []
            return 'failure', messages
        
    """ Called by Controller when Poller is assigned """
    def add_poller(self, sender, poller):
        # Subscribe to poller for presence updates
        poller_jid = JID(poller)
        self.roster.Subscribe(poller_jid)
        self.job_map[poller_jid] = []
        self.sched.add_message(self.parser.rpc_call(poller_jid, 'set_aggregator', [self.entity_name]))
        return 'success', ['Successfully added %s' % poller_jid]

    """ Called by Controller to remove references to Poller """
    def remove_poller(self, sender, poller):
        poller_jid = JID(poller)
        try:
            unassigned_jobs = self.job_map.pop(poller_jid)
            # If controller has also failed
#            for job in unassigned_jobs:
#                self.job_pool.append(job)
            self.roster.Unsubscribe(poller_jid)
            return 'success', [poller]
        except KeyError:
            return 'failure', ['Failed to remove poller %s' % poller]

# END RPC METHODS

    """ Used when inserting results supplied by add_result.
    Peforms evaluations, casts type, makes notifications and then stores into the database. """
    def insert_result(self, id, recorded, val, list_id=None):

        val_type = type(val).__name__
        
        try:
            evals = self.evals.get(int(id))
            if evals:
                for comparison, comp_val in evals:
                    if val_type == 'int' or val_type == 'float':
                        eval_statement = str(val) + str(comparison) + str(comp_val)
                    elif val_type == 'str':
                        eval_statement = str('\'' +  val + '\'') + str(comparison) + str('\'' + comp_val + '\'')
#                    print 'Eval statement: %s' % eval_statement
                    result = eval(eval_statement)
#                    print 'Eval result: %s' % result
                if result != True:
                    message = 'Job %s has caused an error! The value %s failed an evaluation.' % (id, comp_val)
                    self.log.error(message)
                    if id not in self.failed_jobs:
                        self.log.info('Sending notifications')
                        self.notifier.send_email(message)
                        self.notifier.send_sms(message)
                        self.failed_jobs.append(id)
                else:
                    if id in self.failed_jobs:
                        message = 'Job %s is back within normal parameters' % id
                        self.notifier.send_email(message)
                        self.log.info(message)
                        self.failed_jobs.remove(id)
                        
        except:
#            traceback.print_exc()
            self.temp_messages = ['Failed to evaluate returned result']
            return 'failure'
            
        if val_type == 'int':        
            if list_id != None:
                self.results.insert().execute(job=id, int=val, recorded=recorded, list=list_id)
            else:
                self.results.insert().execute(job=id, int=val, recorded=recorded)
        elif val_type == 'str':
            if list_id != None:
                self.results.insert().execute(job=id, string=val, recorded=recorded, list=list_id)
            else:
                self.results.insert().execute(job=id, string=val, recorded=recorded)
        elif val_type == 'float':
            if list_item != None:
                self.results.insert().execute(job=id, float=val, recorded=recorded, list=list_id)
            else:
                self.results.insert().execute(job=id, float=val, recorded=recorded)
        elif val_type == 'list':
            self.results.insert().execute(job=id, recorded=recorded, list=0)
            where = and_(self.results.c.recorded == recorded, self.results.c.list == 0)
            list_id = self.results.select(where).execute().fetchone().id
            #print "Retrieved list id %s" % list_id
            for element in val:
                self.insert_result(id, recorded, element, list_id)
        else:
            self.temp_messages = ['Unexpected data type receieved']
            return 'failure'
        
    """ Setup listener """
    def step_on(self):
        try:
            self.conn.Process(1)
        except KeyboardInterrupt:
            server = 'quae.co.uk'
            features.unregister(self.conn, server)
            #print 'Unregistered from %s' % server
            return 0
        return 1

    def go_on(self):
        while self.step_on(): pass
                            'mortgager':
                            mortgager_id,
                            'beneficiary':
                            beneficiary_id
                        })
                except Exception as e:
                    logger.error(
                        "block num: {}, op_id: {}, db except: '{}'".format(
                            operation["block_num"], op_id, repr(e)))
        conn.close()


if __name__ == '__main__':
    logger.info('args: {}'.format(sys.argv))
    if len(sys.argv) < 3:
        logger.error(
            'Usage: python3 check.py block_number_start, block_number_end')
        sys.exit(1)
    start = int(sys.argv[1])
    end = int(sys.argv[2])
    if start > end or start <= 0 or end <= 0:
        logger.error(
            'block_number_start: {} > block_number_end: {} or start <= 0 or end <= 0'
            .format(start, end))
        sys.exit(1)
    args = [start, end]
    t1 = Thread(target=check_block, args=(args, ))
    t1.start()
    t2 = Thread(target=data2db)
    t2.start()
Пример #17
0
class Controller:
    
    """ Controller object. Initializes various data structures used by object. Establishes connection with XMPP server,
    connects to poller, aggregator and logging Multi-User Chats and registers stanza handlers. """
    def __init__(self):

        self.db = Database()

        entity_prefix = 'controller'

        conn = Connection(entity_prefix, static=True)
        self.entity_name, self.entity_suffix = conn.get_entity_name()
        
        # List of nodes known to the controller
        self.poller_map = {}
        self.poller_pool = {}
        
        self.job_map = {}
        self.job_pool = []

        # Message scheduler
        self.sched = MessageScheduler(self.message_handler)
        
        self.log = Logging(conn)
        conn.join_muc('pollers')
        conn.join_muc('aggregators')

        self.parser = Parser()
        
        self.establish_jobs()
        
        self.conn = conn.get_conn()

        self.conn.RegisterHandler('iq',self.result_handler,'result')
        self.conn.RegisterHandler('iq',self.set_handler,'set')
        self.conn.RegisterHandler('presence',self.presence_handler)

        self.go_on()
        
    """ Called by the presence handler when an entity connects to the aggregator or poller MUCs.
    Used to inspect retreive service information from an entity, required in the XEP Jabber-RPC standard. """
    def disco_lookup(self, recipient):
        self.log.info('Performing discovery lookup.')
        message = Iq('get', queryNS=NS_DISCO_INFO, to=recipient)
        self.sched.add_message(message, self.disco_handler)
        
    """ Method passed and used as handler for messages by MessageScheduler.
    Sends messages and logs an error if message send is a retry """
    # Handler used by message scheduling class
    def message_handler(self, message, retry=False):
#        print 'Sending message.'
        if retry == True:
            self.log.error('Message timed out, resending.')
        self.conn.send(message)
#
#   MESSAGE HANDLERS
#
    """ Handler for presence stanzas recieved by the XMPP listener.
    If 
    """
    def presence_handler(self, conn, presence_node):
        sender = presence_node.getFrom()
        presence_type = presence_node.getAttr('type')
        # Ignore self and presence announcements from logging MUC
        if sender.getResource() != 'controller':
            if presence_type == 'unavailable':
                if sender.getNode() == 'aggregators' or sender.getNode() == 'pollers':
                    self.remove_entity(sender.getNode(), sender)
            elif sender.getNode() == 'aggregators' or sender.getNode() == 'pollers':
                # Check the service discovery details for a connecting node.
                self.disco_lookup(sender)
        raise NodeProcessed
        
    """ IQ set handler, runs RPC methods in whitelist """
    def set_handler(self, conn, iq_node):
        query_node = iq_node.getQueryChildren()
        for node in query_node:
            try:
                method = node.getTagData('methodName')
                method_whitelist = ['get_group', 'get_groups', 'create_group', 'update_group', 'remove_group',
                'get_monitor', 'get_monitors', 'create_monitor', 'update_monitor', 'remove_monitor', 'get_monitors_by_gid',
                'get_job', 'get_jobs', 'create_job', 'update_job', 'remove_job',
                'get_evaluation', 'get_evaluations', 'create_evaluation', 'update_evaluation', 'remove_evaluation',
                'get_results', 'get_results_day', 'get_results_week', 'get_results_hour',
                'poller_failure',
                'get_aggregator']
                if method in method_whitelist:
                    method = getattr(self, method)
                    try:
                        try:
                            params = node.getTag('params').getChildren()
                            args = self.parser.get_args(params, iq_node.getFrom())
                        except AttributeError:
                            args = []
                            
                        status, parameters  = apply(method, args)
                        message = self.parser.rpc_response(iq_node.getFrom(), iq_node.getID(), status, parameters)
                        self.conn.send(message)
                    except TypeError:
#                        print sys.exc_info()
                        conn.send(iq_node.buildReply('error'))
                else:
                    conn.send(iq_node.buildReply('error'))
                    self.log.error('Method not in whitelist')
            except AttributeError:
                traceback.print_exc()
                conn.send(iq_node.buildReply('error'))
        raise NodeProcessed

    def result_handler(self, conn, iq_node):
        # Check if the reponse is managed by scheduler
        if self.sched.is_managed(int(iq_node.getAttr('id'))):
            self.sched.received_response(iq_node)
        raise NodeProcessed
#   END MESSAGE HANDLERS

#
# BEGIN SCHEDULER RESPONSE HANDLERS
#
    def disco_handler(self, sender, query_node):
        if query_node.getNamespace() == NS_DISCO_INFO:
            entity_type = query_node.getTagAttr('identity', 'type')
            if entity_type == 'aggregator' or entity_type == 'poller':
                adjusted_jid = JID(sender.getResource() + '@quae.co.uk/skynet')
                category = query_node.getTagAttr('identity', 'category')
                self.log.info('Registering node %s' % adjusted_jid)
                self.add_entity(entity_type, category, adjusted_jid)
        else:
            self.log.error('Receieved iq message with incorrect namespace')

    def assign_job(self, sender, query_node):
        if query_node.getNamespace() == NS_RPC:
            poller, job_id = self.parser.get_args_no_sender(query_node.getTag('methodResponse').getTag('params').getChildren())
            poller_jid = JID(poller)
            job_id = int(job_id)
            job = None
            for i in range(len(self.job_pool)):
                if self.job_pool[i]['id'] == job_id:
                    job = self.job_pool.pop(i)
                    self.log.info('Removing job %s from the job pool' % job_id)
                    break
            if job != None:
                self.job_map[poller_jid].append(job)
                self.log.info('Job %s successfully assigned to %s' % (job_id, poller_jid))
        else:
            self.log.error('Receieved iq message with incorrect namespace')
            
    def poller_removed(self, sender, query_node):
        if query_node.getNamespace() == NS_RPC:
            args = self.parser.get_args_no_sender(query_node.getTag('methodResponse').getTag('params').getChildren())
            adjusted_jid = JID(args[0])
            unassigned_jobs = self.job_map.pop(adjusted_jid)
            for job in unassigned_jobs:
                self.log.info('Adding job %s to the job pool' % job['id'])
                self.job_pool.append(job)
            parent_poller = None
            for aggregator, pollers in self.poller_map.items():
                for poller, segment in pollers:
                    if poller == adjusted_jid:
                        parent_aggregator = aggregator
                        pollers.remove((poller, segment))
                        break
            self.log.info('Removed %s from %s' % (adjusted_jid, parent_aggregator))
            self.assign_pooled_jobs()            
        else:
            self.log.error('Receieved iq message with incorrect namespace')
# END SCHEDULER HANDLERS

#
#   BEGIN RPC METHODS
#
# Requested by an aggregator when an assigned poller has failed/disconnected.
    def poller_failure(self, sender, previous_poller):
        pollers = self.poller_map[JID(sender)]
        poller_jid = JID('[email protected]/' + JID(previous_poller).getNode())
        try:
            pollers.remove(poller_jid)
            message = 'Removed failed poller'
            try:
                self.rebalance_pollers()
            except:
                print sys.exc_info()
            return 'success', [message]
        except:
            return 'failure', ['Failed to remove poller']

# Group operations
    def get_group(self, sender, name):
        group = self.db.get_group_by_name(name)
        if group != None:
            return 'success', [group]
        return 'failure', ['No such group exists']
    
    def get_groups(self, sender):
        groups = self.db.get_groups()
        if groups != None:
            return 'success', [groups]
        return 'failure', ['Failed to retreieve groups']

    def create_group(self, sender, name, desc):
        existing = self.db.get_group_by_name(name)
        if existing == None:
            self.db.create_group(name, desc)
            return 'success', ['Sucessfully created group %s' % name]
        return 'failure', ['failure']
    
    def update_group(self, sender, id, name, desc):
        group = self.db.get_group_by_id(id)
        if group != None:
            self.db.update_group(id, name, desc)
            return 'success', ['Successfully update group %s' % name]
        return 'failure', ['Failed to update group']
        
    def remove_group(self, sender, name):
        self.db.remove_group_by_name(name)
        if self.db.get_group_by_name(name) == None:
            return 'success', ['Successfully remove group %s' % name]
        else:
            return 'failure', ['Failed to remove group %s' % name]
# Monitor operations

    def get_monitor(self, sender, name):
        try:
            monitor = self.db.get_monitor(name)
            if monitor != False:
                return 'success', [monitor]
        except TypeError:
            return 'failure', ['No such monitor exists']
        return 'failure', ['No such monitor exists']
        
    def get_monitors(self, sender, group=None):
        try:
            if group != None:
                monitors = self.db.get_monitors(group)
            else:
                monitors = self.db.get_monitors()
            return 'success', [monitors]
        except AttributeError:
            return 'failure', ['Failed to retrieve monitors']
            
    def get_monitors_by_gid(self, sender, group_id):
        try:
            if group_id != None:
                monitors = self.db.get_monitors_by_gid(group_id)
                return 'success', [monitors]
        except AttributeError:
            pass
        return 'failure', ['Failed to retrieve monitors']
    
    def create_monitor(self, sender, name, description, group):
        if self.db.create_monitor(name, description, group) == True:
            return 'success', ['Successfully create monitor %s' % name]
        return 'failure', ['Failed to create monitor']
        
    def update_monitor(self, sender, name, description, group):
        group = self.db.get_group_by_id(id)
        if group != None:
            self.db.update_group(id, name, desc)
            return 'success', ['Successfully update monitor %s' % name]
        return 'failure', ['Failed to update monitor']
        
    def remove_monitor(self, sender, name):
        self.db.remove_monitor_by_name(name)
        if self.db.get_monitor_by_name(name) == None:
            return 'success', ['Successfully removed monitor %s' % name]
        else:
            return 'failure', ['Failed to remove monitor %s' % name]
        
# job operations

    def get_job(self, sender, mon, id):
        try:
            job = self.db.get_job(id, mon)
            return 'success', [job]
        except TypeError:
            return 'failure', ['No such job exists']
            
    def get_jobs(self, sender, mon):
        try:
            jobs = self.db.get_jobs(mon)
            return 'success', [jobs]
        except AttributeError:
            return 'failure', ['Failed to retreieve jobs']

    def create_job(self, sender, mon, address, protocol, frequency, interface, resource):
        if self.db.get_monitor(mon) != None:
            if self.db.create_job(address, protocol, frequency, interface, resource, mon) == True:
                return 'success', ['Successfully created a job for %s' % mon]
        return 'failure', ['Failed to create job']

    def update_job(self, sender, mon, id, address, protocol, frequency, interface, resource):
        existing = self.db.get_job(id, mon)
        if existing != None:
            self.db.update_job(id, address, protocol, frequency, interface, resource)
            return 'success', ['Successfully updated job']
        else:
            return 'failure', ['failure']
            
    def remove_job(self, sender, mon, id):
        self.db.remove_job(id)
        if self.db.get_job(id) == None:
            return 'success', ['Successfully removed job']
        else:
            return 'failure', ['failure']
        
    # Result read operations
    
    def get_results(self, sender, monitor, job, start_datetime, end_datetime):
        results = self.db.get_results(monitor, job, start_datetime, end_datetime)
        if results == None:
            return 'failure', ['No such results exist']
        elif results != False:
            return 'success', results
        return 'failure', ['Failed to retreive specificied results']
        
    def get_results_day(self, sender, monitor, job, start_datetime):
        results = self.db.get_results_day(monitor, job, start_datetime)
        if results == []:
            return 'failure', ['No such results exist']
        elif results != False:
            job_details = self.db.get_job(job, monitor)
            return 'success', [job_details, results]
        return 'failure', ['Failed to retreive specificied results']
        
    def get_results_week(self, sender, monitor, job, start_datetime):
        pass
    
    def get_results_hour(self, sender, monitor, job, start_datetime):
        results = self.db.get_results_hour(monitor, job, start_datetime)
        if results == []:
            return 'failure', ['No such results exist']
        elif results != False:
            job_details = self.db.get_job(job, monitor)
            return 'success', [job_details, results]
        return 'failure', ['Failed to retreive specificied results']
#   END RPC METHODS

#
#   BEGIN PRIVATE METHODS
#
    """ Called on successful DISCO request.
    Will register Poller or Aggregator, send jobs or balance jobs. """
    def add_entity(self, entity_type, segment, entity):
        if entity_type == 'aggregator':
            self.poller_map[entity] = []
            # If pollers have been added, but there were no aggregators running
            self.assign_pooled_pollers()
            if len(self.poller_map) > 1:
                self.rebalance_pollers()
            self.assign_pooled_jobs()
                
        elif entity_type == 'poller':
            self.job_map[entity] = []
            
            self.poller_pool[entity] = segment
            self.assign_pooled_pollers()
            
            if len(self.poller_map) > 1:
                self.rebalance_pollers()
            
            self.assign_pooled_jobs()
            
            if len(self.job_map) > 1:
                self.rebalance_jobs()
            # Give poller to appropriate aggregator
            print 'Added %s to %ss' % (entity, entity_type)
        self.log.info('Node %s was successfully registered with the controller' % entity)

    """ Removing Poller or Aggregator """
    def remove_entity(self, entity_type, entity):
        try:
            if entity_type == 'aggregators':
                adjusted_jid = JID(JID(entity).getResource() + '@quae.co.uk/skynet')
                unassigned_pollers = self.poller_map.pop(adjusted_jid)
                for poller, segment in unassigned_pollers:
                    self.poller_pool[poller] = segment
                self.log.info('Removed %s' % adjusted_jid)
                if len(self.poller_pool) > 0:
                    # Try and assign pooled pollers
                    if not self.assign_pooled_pollers():
                        for poller, segment in unassigned_pollers:
                            message = self.parser.rpc_call(poller, 'aggregator_failure', [])
                            self.sched.add_message(message)
                    
            elif entity_type == 'pollers':
                adjusted_jid = JID(JID(entity).getResource() + '@quae.co.uk/skynet')
                if len(self.poller_map) > 0:
                    parent_aggregator = None
                    for aggregator, pollers in self.poller_map.items():
                        for poller, segment in pollers:
                            if adjusted_jid == poller:
                                parent_aggregator = aggregator
                                break
                    if parent_aggregator != None:
                        remove_call = self.parser.rpc_call(parent_aggregator, 'remove_poller', [str(adjusted_jid)])
                        self.sched.add_message(remove_call, self.poller_removed)
                else:
                    try:
                        self.job_map.pop(adjusted_jid)
                        self.poller_pool.pop(adjusted_jid)
                        self.log.info('Poller not assigned, sucessfully removed')
                    except:
                        self.log.error('Failed to remove poller')
                        traceback.print_exc()
        except ValueError:
            self.log.error('Failed to remove %s' % entity)

    """ Assign unassigned pollers """
    def assign_pooled_pollers(self):
        if len(self.poller_map) > 0:
            while len(self.poller_pool) > 0:
                unassigned_poller, segment = self.poller_pool.popitem()
                chosen_aggregator = None
                poller_comp = None
                for aggregator, pollers in self.poller_map.items():
                    # If first loop or number of pollers assigned to aggregator is less than comp, make this agg the comp
                    if (chosen_aggregator == None and poller_comp == None) or len(pollers) < poller_comp:
                        chosen_aggregator = aggregator
                        poller_comp = len(pollers)
                if chosen_aggregator != None:
                    # Assign Poller to the Aggregtor with least assigned Pollers
                    
                    message = self.parser.rpc_call(chosen_aggregator, 'add_poller', [str(unassigned_poller)])
                    self.sched.add_message(message)
                    
                    for job in self.job_map[unassigned_poller]:
                        message = self.parser.rpc_call(chosen_aggregator, 'move_job', [str(unassigned_poller), job['id'], job['address'], job['protocol'], job['frequency'], job['interface'], job['resource'], job['segment']])
                        self.sched.add_message(message)
                    self.poller_map[chosen_aggregator].append((unassigned_poller, segment))
            return True
        else:
            self.log.info('No aggregators available for poller assignment')
            return False
    
    """ Get Pollers for a given network segment """
    def get_segment_pollers(self, segment):
        segment_pollers = {}
        for aggregator, pollers in self.poller_map.items():
            for poller, poller_segment in pollers:
                if poller_segment == segment:
                    segment_pollers[poller] = self.job_map[poller]
        return segment_pollers
        
    """ Called to allocate unassigned jobs """
    def assign_pooled_jobs(self):
        if len(self.job_map) > 0 and len(self.poller_map) > 0:
            for job in self.job_pool:
                unassigned_job = job
                least_loaded = None
                job_comp = None
                pollers = self.get_segment_pollers(unassigned_job['segment'])
                for poller, jobs in pollers.items():
#                for poller, jobs in self.job_map.items():
                    if (least_loaded == None and job_comp == None) or len(jobs) < job_comp:
                        least_loaded = poller
                        job_comp = len(jobs)
                if least_loaded != None:
                    chosen_aggregator = None
                    for aggregator, pollers in self.poller_map.items():
                        for poller, segment in pollers:
                            if poller == least_loaded:
                                chosen_aggregator = aggregator
                                break
                    if chosen_aggregator != None:
                        self.send_job(unassigned_job, least_loaded, chosen_aggregator)
        else:
            self.log.info('No assigned pollers available for job assignment')
        #print 'Job map %s' % self.job_map
        #print 'Job pool %s' % self.job_pool
    
    """ Rebalance pollers, compares amount assigned to each Aggregator, and moves across to
    another Poller if there's at least 2 more than another Aggregator """
    def rebalance_pollers(self):
        self.log.info('Attempting to rebalance pollers')

        poller_comp = None
        least_pollers = None
        most_pollers = None
        # Retrieve aggregators with least and most pollers
        
        for aggregator, pollers in self.poller_map.items():
            if poller_comp == None:
                least_pollers = aggregator
                most_pollers = aggregator
            elif len(pollers) < poller_comp:
                least_pollers = aggregator
            elif len(pollers) > poller_comp:
                most_pollers = aggregator
            poller_comp = len(pollers)
        
        if least_pollers != None and most_pollers != None:
            # If the difference between the two pollers is worth balancing
            if (len(self.poller_map[most_pollers]) - len(self.poller_map[least_pollers])) > 1:
                poller, segment = self.poller_map[most_pollers].pop()
                self.poller_map[least_pollers].append((poller, segment))
                self.sched.add_message(self.parser.rpc_call(least_pollers, 'add_poller', [str(poller)]))
                self.sched.add_message(self.parser.rpc_call(most_pollers, 'remove_poller', [str(poller)]))
                self.rebalance_pollers()
            else:
                self.log.info('Pollers balanced')
                return True
               
    """ Similar to above, checks number of assigned jobs through the system, and will level them across all Pollers """ 
    def rebalance_jobs(self):
        self.log.info('Attempting to rebalance jobs')
        job_comp = None
        least_jobs = None
        most_jobs = None
        
        network_segment = 'skynet'
        pollers = self.get_segment_pollers(network_segment)
        
#        for poller, jobs in self.job_map.items():
        for poller, jobs in pollers.items():
            print poller
            if job_comp == None:
                least_jobs = poller
                most_jobs = poller
            elif len(jobs) < job_comp:
                least_jobs = poller
            elif len(jobs) > job_comp:
                most_jobs = poller
            job_comp = len(jobs)
            
        if least_jobs != None and most_jobs != None:
            if (len(self.job_map[most_jobs]) - len(self.job_map[least_jobs])) > 1:
                job = self.job_map[most_jobs].pop()
                self.job_map[least_jobs].append(job)
                least_parent = None
                most_parent = None
                for aggregator, pollers in self.poller_map.items():
                    for poller, node_segment in pollers:
                        if network_segment == node_segment:
                            if poller == least_jobs:
                                least_parent = aggregator
                            if poller == most_jobs:
                                most_parent = aggregator
                            if least_parent != None and most_parent != None:
                                break
                    
                if least_parent != None and most_parent != None:
                    self.log.info('Moving job %s to %s' % (job['id'], least_jobs))
                    self.sched.add_message(self.parser.rpc_call(most_parent, 'remove_job', [job['id']]))
                    self.sched.add_message(self.parser.rpc_call(least_parent, 'run_job', [str(least_jobs), job['id'], job['address'], job['protocol'], job['frequency'], job['interface'], job['resource']]), offset=True)
                self.rebalance_jobs()
            else:
                self.log.info('Jobs balanced')
                return True
                
    """ Retrieve jobs on startup """    
    def establish_jobs(self):
        monitors = self.db.get_monitors()
        self.log.info('Retrieving jobs')
        for monitor in monitors:
            jobs = self.db.get_jobs(monitor.name)
            for job in jobs:
                job = dict(job)
                segment_name = self.db.get_segment_name(job['segment'])
                job['segment'] = segment_name
                # Make the poll freq stored every minute minimum
                job['frequency'] = job['frequency'] * 60
                self.job_pool.append(job)
        self.log.info('%s jobs added to the pool' % len(self.job_pool))
        
    """ Send job to Aggregator to forward to Poller """
    def send_job(self, job, poller, aggregator):
        message = self.parser.rpc_call(aggregator, 'run_job', [str(poller), job['id'], job['address'], job['protocol'], job['frequency'], job['interface'], job['resource']])
        self.log.info('Sending job %s to %s' % (job['id'], aggregator))
        self.sched.add_message(message, self.assign_job, offset=True)
        
    def step_on(self):
        try:
            self.conn.Process(1)
        except KeyboardInterrupt: return 0
        return 1

    def go_on(self):
        while self.step_on(): pass